diff --git a/.editorconfig b/.editorconfig
index 07f00de06ee4..560067027c52 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -1,11 +1,11 @@
root = true
-[*.{py,c,cpp,h,rst,md,yml,json,test}]
+[*.{py,pyi,c,cpp,h,rst,md,yml,json,test}]
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
-[*.{py,c,h,json,test}]
+[*.{py,pyi,c,h,json,test}]
indent_size = 4
[*.yml]
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 000000000000..043c3ac878ab
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# Adopt black and isort
+97c5ee99bc98dc475512e549b252b23a6e7e0997
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md
index 1b3a16eebd2c..b5cf5bb4dc80 100644
--- a/.github/ISSUE_TEMPLATE/bug.md
+++ b/.github/ISSUE_TEMPLATE/bug.md
@@ -5,51 +5,41 @@ labels: "bug"
---
**Bug Report**
(A clear and concise description of what the bug is.)
**To Reproduce**
-(Write your steps here:)
-
-1. Step 1...
-2. Step 2...
-3. Step 3...
+```python
+# Ideally, a small sample program that demonstrates the problem.
+# Or even better, a reproducible playground link https://mypy-play.net/ (use the "Gist" button)
+```
**Expected Behavior**
-(Write what you thought would happen.)
-
**Actual Behavior**
-
-
-(Write what happened.)
+
**Your Environment**
@@ -59,9 +49,5 @@ for this report: https://github.com/python/typeshed/issues
- Mypy command-line flags:
- Mypy configuration options from `mypy.ini` (and other config files):
- Python version used:
-- Operating system and version:
-
+
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 4794ec05c906..696eb8aee125 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,22 +1,12 @@
-### Have you read the [Contributing Guidelines](https://github.com/python/mypy/blob/master/CONTRIBUTING.md)?
-
-(Once you have, delete this section. If you leave it in, your PR may be closed without action.)
-
-### Description
-
-
+
(Explain how this PR changes mypy.)
-## Test Plan
-
-
-(Write your test plan here. If you changed any code, please provide us with clear instructions on how you verified your changes work.)
diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index 1d56057854c3..7edfa03584c1 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -10,8 +10,8 @@ jobs:
if: github.repository == 'python/mypy'
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
with:
python-version: '3.7'
- name: Trigger script
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index 79560db2d09b..a3294c08a79c 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -1,6 +1,7 @@
name: Check documentation build
on:
+ workflow_dispatch:
push:
branches: [master, 'release*']
tags: ['*']
@@ -18,14 +19,15 @@ jobs:
runs-on: ubuntu-latest
env:
TOXENV: docs
+ TOX_SKIP_MISSING_INTERPRETERS: False
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
with:
- python-version: '3.10'
+ python-version: '3.7'
- name: Install tox
- run: pip install --upgrade 'setuptools!=50' 'virtualenv>=20.6.0' tox==3.24.5
+ run: pip install --upgrade 'setuptools!=50' tox==4.4.4
- name: Setup tox environment
- run: tox -e ${{ env.TOXENV }} --notest
+ run: tox run -e ${{ env.TOXENV }} --notest
- name: Test
- run: tox -e ${{ env.TOXENV }} --skip-pkg-install
+ run: tox run -e ${{ env.TOXENV }} --skip-pkg-install
diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml
index e65d918228b4..9eef1c1c7466 100644
--- a/.github/workflows/mypy_primer.yml
+++ b/.github/workflows/mypy_primer.yml
@@ -7,11 +7,17 @@ on:
- 'docs/**'
- '**/*.rst'
- '**/*.md'
+ - 'misc/**'
- 'mypyc/**'
- 'mypy/stubtest.py'
- 'mypy/stubgen.py'
- 'mypy/stubgenc.py'
- 'mypy/test/**'
+ - 'test-data/**'
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
+ cancel-in-progress: true
jobs:
mypy_primer:
@@ -21,14 +27,14 @@ jobs:
contents: read
strategy:
matrix:
- shard-index: [0, 1, 2]
+ shard-index: [0, 1, 2, 3, 4]
fail-fast: false
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
path: mypy_to_test
fetch-depth: 0
- - uses: actions/setup-python@v2
+ - uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install dependencies
@@ -54,13 +60,14 @@ jobs:
mypy_primer \
--repo mypy_to_test \
--new $GITHUB_SHA --old base_commit \
- --num-shards 3 --shard-index ${{ matrix.shard-index }} \
+ --num-shards 5 --shard-index ${{ matrix.shard-index }} \
--debug \
+ --additional-flags="--debug-serialize" \
--output concise \
| tee diff_${{ matrix.shard-index }}.txt
) || [ $? -eq 1 ]
- name: Upload mypy_primer diff
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: mypy_primer_diffs
path: diff_${{ matrix.shard-index }}.txt
@@ -70,7 +77,7 @@ jobs:
echo ${{ github.event.pull_request.number }} | tee pr_number.txt
- if: ${{ matrix.shard-index }} == 0
name: Upload PR number
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: mypy_primer_diffs
path: pr_number.txt
diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml
index 36977862eebe..12ce91c12910 100644
--- a/.github/workflows/mypy_primer_comment.yml
+++ b/.github/workflows/mypy_primer_comment.yml
@@ -15,13 +15,14 @@ jobs:
comment:
name: Comment PR from mypy_primer
runs-on: ubuntu-latest
+ if: ${{ github.event.workflow_run.conclusion == 'success' }}
steps:
- name: Download diffs
- uses: actions/github-script@v3
+ uses: actions/github-script@v6
with:
script: |
const fs = require('fs');
- const artifacts = await github.actions.listWorkflowRunArtifacts({
+ const artifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{ github.event.workflow_run.id }},
@@ -29,7 +30,7 @@ jobs:
const [matchArtifact] = artifacts.data.artifacts.filter((artifact) =>
artifact.name == "mypy_primer_diffs");
- const download = await github.actions.downloadArtifact({
+ const download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
@@ -43,17 +44,33 @@ jobs:
- name: Post comment
id: post-comment
- uses: actions/github-script@v3
+ uses: actions/github-script@v6
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
+ const MAX_CHARACTERS = 30000
+ const MAX_CHARACTERS_PER_PROJECT = MAX_CHARACTERS / 3
+
const fs = require('fs')
let data = fs.readFileSync('fulldiff.txt', { encoding: 'utf8' })
- // posting comment fails if too long, so truncate
- if (data.length > 30000) {
- data = data.substring(0, 30000) + `\n\n... (truncated ${data.length - 30000} chars) ...\n`
+
+ function truncateIfNeeded(original, maxLength) {
+ if (original.length <= maxLength) {
+ return original
+ }
+ let truncated = original.substring(0, maxLength)
+ // further, remove last line that might be truncated
+ truncated = truncated.substring(0, truncated.lastIndexOf('\n'))
+ let lines_truncated = original.split('\n').length - truncated.split('\n').length
+ return `${truncated}\n\n... (truncated ${lines_truncated} lines) ...`
}
+ const projects = data.split('\n\n')
+ // don't let one project dominate
+ data = projects.map(project => truncateIfNeeded(project, MAX_CHARACTERS_PER_PROJECT)).join('\n\n')
+ // posting comment fails if too long, so truncate
+ data = truncateIfNeeded(data, MAX_CHARACTERS)
+
console.log("Diff from mypy_primer:")
console.log(data)
@@ -64,7 +81,7 @@ jobs:
body = 'According to [mypy_primer](https://github.com/hauntsaninja/mypy_primer), this change has no effect on the checked open source code. 🤖🎉'
}
const prNumber = parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" }))
- await github.issues.createComment({
+ await github.rest.issues.createComment({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
@@ -73,8 +90,8 @@ jobs:
return prNumber
- name: Hide old comments
- # v0.3.0
- uses: kanga333/comment-hider@bbdf5b562fbec24e6f60572d8f712017428b92e0
+ # v0.4.0
+ uses: kanga333/comment-hider@c12bb20b48aeb8fc098e35967de8d4f8018fffdf
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
leave_visible: 1
diff --git a/.github/workflows/sync_typeshed.yml b/.github/workflows/sync_typeshed.yml
new file mode 100644
index 000000000000..1db2e846f099
--- /dev/null
+++ b/.github/workflows/sync_typeshed.yml
@@ -0,0 +1,33 @@
+name: Sync typeshed
+
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: "0 0 1,15 * *"
+
+permissions:
+ contents: write
+ pull-requests: write
+
+jobs:
+ sync_typeshed:
+ name: Sync typeshed
+ if: github.repository == 'python/mypy'
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ # TODO: use whatever solution ends up working for
+ # https://github.com/python/typeshed/issues/8434
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.10"
+ - name: git config
+ run: |
+ git config --global user.name mypybot
+ git config --global user.email '<>'
+ - name: Sync typeshed
+ run: |
+ python -m pip install requests==2.28.1
+ GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} python misc/sync-typeshed.py --make-pr
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index f62dba8cb9c7..e7072f5369c2 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,6 +1,7 @@
name: Tests
on:
+ workflow_dispatch:
push:
branches: [master, 'release*']
tags: ['*']
@@ -14,6 +15,10 @@ on:
- CREDITS
- LICENSE
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
+ cancel-in-progress: true
+
jobs:
main:
runs-on: ${{ matrix.os }}
@@ -21,37 +26,32 @@ jobs:
fail-fast: false
matrix:
include:
- - name: Test suite with py37-windows-32
- python: '3.7'
- arch: x86
- os: windows-latest
- toxenv: py37
- name: Test suite with py37-windows-64
python: '3.7'
arch: x64
os: windows-latest
toxenv: py37
- - name: Test suite with py37-ubuntu
- python: '3.7'
+ - name: Test suite with py38-ubuntu
+ python: '3.8'
arch: x64
os: ubuntu-latest
toxenv: py
tox_extra_args: "-n 2"
- - name: Test suite with py38-ubuntu
- python: '3.8'
+ - name: Test suite with py39-ubuntu
+ python: '3.9'
arch: x64
os: ubuntu-latest
toxenv: py
tox_extra_args: "-n 2"
- - name: Test suite with py36-ubuntu, mypyc-compiled
- python: '3.6'
+ - name: Test suite with py37-ubuntu, mypyc-compiled
+ python: '3.7'
arch: x64
os: ubuntu-latest
toxenv: py
tox_extra_args: "-n 2"
test_mypyc: true
- - name: Test suite with py39-ubuntu, mypyc-compiled
- python: '3.9'
+ - name: Test suite with py310-ubuntu, mypyc-compiled
+ python: '3.10'
arch: x64
os: ubuntu-latest
toxenv: py
@@ -63,17 +63,24 @@ jobs:
os: ubuntu-latest
toxenv: py
tox_extra_args: "-n 2"
- - name: mypyc runtime tests with py36-macos
- python: '3.6'
+ - name: Test suite with py311-ubuntu, mypyc-compiled
+ python: '3.11'
+ arch: x64
+ os: ubuntu-latest
+ toxenv: py
+ tox_extra_args: "-n 2"
+ test_mypyc: true
+ - name: mypyc runtime tests with py37-macos
+ python: '3.7'
arch: x64
os: macos-latest
toxenv: py
tox_extra_args: "-n 2 mypyc/test/test_run.py mypyc/test/test_external.py"
- - name: mypyc runtime tests with py36-debug-build-ubuntu
- python: '3.6.8'
+ - name: mypyc runtime tests with py37-debug-build-ubuntu
+ python: '3.7.13'
arch: x64
os: ubuntu-latest
- toxenv: py36
+ toxenv: py
tox_extra_args: "-n 2 mypyc/test/test_run.py mypyc/test/test_external.py"
debug_build: true
- name: Type check our own code (py37-ubuntu)
@@ -86,16 +93,28 @@ jobs:
arch: x64
os: windows-latest
toxenv: type
- - name: Code style with flake8
+ - name: Formatting with Black + isort and code style with flake8
python: '3.7'
arch: x64
os: ubuntu-latest
toxenv: lint
name: ${{ matrix.name }}
+ env:
+ TOX_SKIP_MISSING_INTERPRETERS: False
+ # Rich (pip)
+ FORCE_COLOR: 1
+ # Tox
+ PY_COLORS: 1
+ # Mypy (see https://github.com/python/mypy/issues/7771)
+ TERM: xterm-color
+ MYPY_FORCE_COLOR: 1
+ MYPY_FORCE_TERMINAL_WIDTH: 200
+ # Pytest
+ PYTEST_ADDOPTS: --color=yes
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
architecture: ${{ matrix.arch }}
@@ -108,34 +127,31 @@ jobs:
./misc/build-debug-python.sh $PYTHONVERSION $PYTHONDIR $VENV
source $VENV/bin/activate
- name: Install tox
- run: pip install --upgrade 'setuptools!=50' 'virtualenv>=20.6.0' tox==3.24.5
+ run: pip install --upgrade 'setuptools!=50' tox==4.4.4
- name: Compiled with mypyc
if: ${{ matrix.test_mypyc }}
run: |
pip install -r test-requirements.txt
- CC=clang MYPYC_OPT_LEVEL=0 python3 setup.py --use-mypyc build_ext --inplace
+ CC=clang MYPYC_OPT_LEVEL=0 MYPY_USE_MYPYC=1 pip install -e .
- name: Setup tox environment
- run: tox -e ${{ matrix.toxenv }} --notest
+ run: tox run -e ${{ matrix.toxenv }} --notest
- name: Test
- run: tox -e ${{ matrix.toxenv }} --skip-pkg-install -- ${{ matrix.tox_extra_args }}
+ run: tox run -e ${{ matrix.toxenv }} --skip-pkg-install -- ${{ matrix.tox_extra_args }}
-# TODO: re-enable when `typed-ast` will be fixed for `python==3.11`
-# python-nightly:
-# runs-on: ubuntu-latest
-# name: Test suite with Python nightly
-# steps:
-# - uses: actions/checkout@v2
-# - uses: actions/setup-python@v2
-# with:
-# python-version: '3.11-dev'
-# - name: Install tox
-# run: |
-# pip install -U pip==21.2.3 setuptools
-# pip install --upgrade 'setuptools!=50' virtualenv==20.4.7 tox==3.20.1
-# - name: Setup tox environment
-# run: tox -e py --notest
-# - name: Test
-# run: tox -e py --skip-pkg-install -- "-n 2"
-# continue-on-error: true
-# - name: Mark as a success
-# run: exit 0
+ python-nightly:
+ runs-on: ubuntu-latest
+ name: Test suite with Python nightly
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.12-dev'
+ - name: Install tox
+ run: pip install --upgrade 'setuptools!=50' tox==4.4.4
+ - name: Setup tox environment
+ run: tox run -e py --notest
+ - name: Test
+ run: tox run -e py --skip-pkg-install -- "-n 2"
+ continue-on-error: true
+ - name: Mark as a success
+ run: exit 0
diff --git a/.github/workflows/test_stubgenc.yml b/.github/workflows/test_stubgenc.yml
index 6408f21ccffe..b48031e5c18f 100644
--- a/.github/workflows/test_stubgenc.yml
+++ b/.github/workflows/test_stubgenc.yml
@@ -1,6 +1,7 @@
name: Test stubgenc on pybind11-mypy-demo
on:
+ workflow_dispatch:
push:
branches: [master, 'release*']
tags: ['*']
@@ -18,10 +19,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Setup 🐍 3.8
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: 3.8
diff --git a/.gitignore b/.gitignore
index 3c0f60cfae4f..c6761f0ed736 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,11 +9,12 @@ docs/source/_build
mypyc/doc/_build
*.iml
/out/
-.venv
+.venv*
venv/
.mypy_cache/
.incremental_checker_cache.json
.cache
+test-data/packages/.pip_lock
dmypy.json
.dmypy.json
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 000000000000..0de686b7eb01
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,16 @@
+repos:
+ - repo: https://github.com/psf/black
+ rev: 22.12.0 # must match test-requirements.txt
+ hooks:
+ - id: black
+ - repo: https://github.com/pycqa/isort
+ rev: 5.11.4 # must match test-requirements.txt
+ hooks:
+ - id: isort
+ - repo: https://github.com/pycqa/flake8
+ rev: 5.0.4 # must match test-requirements.txt
+ hooks:
+ - id: flake8
+ additional_dependencies:
+ - flake8-bugbear==22.12.6 # must match test-requirements.txt
+ - flake8-noqa==1.3.0 # must match test-requirements.txt
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index eafefe346d01..2b2e6cdb9734 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -17,48 +17,73 @@ articulated in the [Python Community Code of Conduct](https://www.python.org/psf
### Setup
-Run the following:
+#### (1) Clone the mypy repository and enter into it
```
-# Clone the mypy repository
git clone https://github.com/python/mypy.git
-
-# Enter the repository
cd mypy
+```
-# Create then activate a virtual environment
+#### (2) Create then activate a virtual environment
+```
+# On Windows, the commands may be slightly different. For more details, see
+# https://docs.python.org/3/library/venv.html#creating-virtual-environments
python3 -m venv venv
source venv/bin/activate
+```
-# Install the test requirements and the project
+#### (3) Install the test requirements and the project
+```
python3 -m pip install -r test-requirements.txt
python3 -m pip install -e .
-hash -r
+hash -r # This resets shell PATH cache, not necessary on Windows
```
### Running tests
-Once setup, you should be able to run tests:
+Running the full test suite can take a while, and usually isn't necessary when
+preparing a PR. Once you file a PR, the full test suite will run on GitHub.
+You'll then be able to see any test failures, and make any necessary changes to
+your PR.
+
+However, if you wish to do so, you can run the full test suite
+like this:
```
python3 runtests.py
```
-To use mypy to check mypy's own code, run:
+You can also use `tox` to run tests (`tox` handles setting up the test environment for you):
```
+tox run -e py
+
+# Or some specific python version:
+tox run -e py39
+
+# Or some specific command:
+tox run -e lint
+```
+
+Some useful commands for running specific tests include:
+```bash
+# Use mypy to check mypy's own code
python3 runtests.py self
# or equivalently:
python3 -m mypy --config-file mypy_self_check.ini -p mypy
-```
-You can also use `tox` to run tests, for instance:
-```
-tox -e py
-```
-
-The easiest way to run a single test is:
-```
+# Run a single test from the test suite
pytest -n0 -k 'test_name'
+
+# Run all test cases in the "test-data/unit/check-dataclasses.test" file
+pytest mypy/test/testcheck.py::TypeCheckSuite::check-dataclasses.test
+
+# Run the linter
+flake8
+
+# Run formatters
+black . && isort .
```
-There's more useful information on writing and running tests [here](test-data/unit/README.md)
+
+For an in-depth guide on running and writing tests,
+see [the README in the test-data directory](test-data/unit/README.md).
## First time contributors
@@ -115,10 +140,9 @@ advice about good pull requests for open-source projects applies; we
have [our own writeup](https://github.com/python/mypy/wiki/Good-Pull-Request)
of this advice.
-See also our [coding conventions](https://github.com/python/mypy/wiki/Code-Conventions) --
-which consist mainly of a reference to
-[PEP 8](https://www.python.org/dev/peps/pep-0008/) -- for the code you
-put in the pull request.
+We are using `black` and `isort` to enforce a consistent coding style.
+Run `black . && isort .` before your commits, otherwise you would receive
+a CI failure.
Also, do not squash your commits after you have submitted a pull request, as this
erases context during review. We will squash commits when the pull request is merged.
diff --git a/MANIFEST.in b/MANIFEST.in
index fe10e22265a6..1c26ae16fc78 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -42,6 +42,7 @@ include pytest.ini
include LICENSE mypyc/README.md
exclude .gitmodules CONTRIBUTING.md CREDITS ROADMAP.md tox.ini action.yml .editorconfig
+exclude .git-blame-ignore-revs .pre-commit-config.yaml
global-exclude *.py[cod]
global-exclude .DS_Store
diff --git a/README.md b/README.md
index 98f6d48cd982..9d9618e6bc12 100644
--- a/README.md
+++ b/README.md
@@ -1,15 +1,16 @@
-
+
Mypy: Static Typing for Python
=======================================
[](https://pypi.org/project/mypy/)
[](https://pypistats.org/packages/mypy)
-[](https://travis-ci.com/python/mypy)
+[](https://github.com/python/mypy/actions)
[](https://mypy.readthedocs.io/en/latest/?badge=latest)
[](https://gitter.im/python/typing?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-[](http://mypy-lang.org/)
-
+[](https://mypy-lang.org/)
+[](https://github.com/psf/black)
+[](https://pycqa.github.io/isort/)
Got a question?
---------------
@@ -20,7 +21,7 @@ We are always happy to answer questions! Here are some good places to ask them:
- for general questions about Python typing, try [typing discussions](https://github.com/python/typing/discussions)
If you're just getting started,
-[the documentation](https://mypy.readthedocs.io/en/stable/introduction.html)
+[the documentation](https://mypy.readthedocs.io/en/stable/index.html)
and [type hints cheat sheet](https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html)
can also help answer questions.
@@ -57,10 +58,6 @@ Python is a dynamic language, so usually you'll only see errors in your code
when you attempt to run it. Mypy is a *static* checker, so it finds bugs
in your programs without even running them!
-Mypy is designed with gradual typing in mind. This means you can add type
-hints to your code base slowly and that you can always fall back to dynamic
-typing when static typing is not convenient.
-
Here is a small example to whet your appetite:
```python
@@ -68,12 +65,26 @@ number = input("What is your favourite number?")
print("It is", number + 1) # error: Unsupported operand types for + ("str" and "int")
```
-See [the documentation](https://mypy.readthedocs.io/en/stable/introduction.html) for more examples.
+Adding type hints for mypy does not interfere with the way your program would
+otherwise run. Think of type hints as similar to comments! You can always use
+the Python interpreter to run your code, even if mypy reports errors.
+
+Mypy is designed with gradual typing in mind. This means you can add type
+hints to your code base slowly and that you can always fall back to dynamic
+typing when static typing is not convenient.
+
+Mypy has a powerful and easy-to-use type system, supporting features such as
+type inference, generics, callable types, tuple types, union types,
+structural subtyping and more. Using mypy will make your programs easier to
+understand, debug, and maintain.
+
+See [the documentation](https://mypy.readthedocs.io/en/stable/index.html) for
+more examples and information.
In particular, see:
- [type hints cheat sheet](https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html)
- [getting started](https://mypy.readthedocs.io/en/stable/getting_started.html)
-
+- [list of error codes](https://mypy.readthedocs.io/en/stable/error_code_list.html)
Quick start
-----------
@@ -82,10 +93,12 @@ Mypy can be installed using pip:
python3 -m pip install -U mypy
-If you want to run the latest version of the code, you can install from git:
-
- python3 -m pip install -U git+git://github.com/python/mypy.git
+If you want to run the latest version of the code, you can install from the
+repo directly:
+ python3 -m pip install -U git+https://github.com/python/mypy.git
+ # or if you don't have 'git' installed
+ python3 -m pip install -U https://github.com/python/mypy/zipball/master
Now you can type-check the [statically typed parts] of a program like this:
@@ -97,9 +110,13 @@ programs, even if mypy reports type errors:
python3 PROGRAM
You can also try mypy in an [online playground](https://mypy-play.net/) (developed by
-Yusuke Miyazaki).
+Yusuke Miyazaki). If you are working with large code bases, you can run mypy in
+[daemon mode], that will give much faster (often sub-second) incremental updates:
+
+ dmypy run -- PROGRAM
[statically typed parts]: https://mypy.readthedocs.io/en/latest/getting_started.html#function-signatures-and-dynamic-vs-static-typing
+[daemon-mode]: https://mypy.readthedocs.io/en/stable/mypy_daemon.html
Integrations
@@ -112,7 +129,7 @@ Mypy can be integrated into popular IDEs:
`let g:syntastic_python_checkers=['mypy']`
* Using [ALE](https://github.com/dense-analysis/ale): should be enabled by default when `mypy` is installed,
or can be explicitly enabled by adding `let b:ale_linters = ['mypy']` in `~/vim/ftplugin/python.vim`
-* Emacs: using [Flycheck](https://github.com/flycheck/) and [Flycheck-mypy](https://github.com/lbolla/emacs-flycheck-mypy)
+* Emacs: using [Flycheck](https://github.com/flycheck/)
* Sublime Text: [SublimeLinter-contrib-mypy](https://github.com/fredcallaway/SublimeLinter-contrib-mypy)
* Atom: [linter-mypy](https://atom.io/packages/linter-mypy)
* PyCharm: [mypy plugin](https://github.com/dropbox/mypy-PyCharm-plugin) (PyCharm integrates
@@ -148,14 +165,7 @@ To get started with developing mypy, see [CONTRIBUTING.md](CONTRIBUTING.md).
If you need help getting started, don't hesitate to ask on [gitter](https://gitter.im/python/typing).
-Development status
-------------------
-
-Mypy is beta software, but it has already been used in production
-for several years at Dropbox and in many other organizations, and
-it has an extensive test suite.
-
-mypyc and compiled version of mypy
+Mypyc and compiled version of mypy
----------------------------------
[Mypyc](https://github.com/mypyc/mypyc) uses Python type hints to compile Python
diff --git a/build-requirements.txt b/build-requirements.txt
index b4d024ee7f38..52c518d53bc2 100644
--- a/build-requirements.txt
+++ b/build-requirements.txt
@@ -1,2 +1,5 @@
+# NOTE: this needs to be kept in sync with the "requires" list in pyproject.toml
-r mypy-requirements.txt
-types-typed-ast>=1.4.0,<1.5.0
+types-psutil
+types-setuptools
+types-typed-ast>=1.5.8,<1.6.0
diff --git a/conftest.py b/conftest.py
index 83a6689f6373..0bd7b6a38031 100644
--- a/conftest.py
+++ b/conftest.py
@@ -1,8 +1,8 @@
+from __future__ import annotations
+
import os.path
-pytest_plugins = [
- 'mypy.test.data',
-]
+pytest_plugins = ["mypy.test.data"]
def pytest_configure(config):
@@ -14,5 +14,6 @@ def pytest_configure(config):
# This function name is special to pytest. See
# http://doc.pytest.org/en/latest/writing_plugins.html#initialization-command-line-and-configuration-hooks
def pytest_addoption(parser) -> None:
- parser.addoption('--bench', action='store_true', default=False,
- help='Enable the benchmark test runs')
+ parser.addoption(
+ "--bench", action="store_true", default=False, help="Enable the benchmark test runs"
+ )
diff --git a/docs/README.md b/docs/README.md
index 2122eefc4b4a..0d574c9213a5 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -6,7 +6,7 @@ What's this?
This directory contains the source code for Mypy documentation (under `source/`)
and build scripts. The documentation uses Sphinx and reStructuredText. We use
-`sphinx-rtd-theme` as the documentation theme.
+`furo` as the documentation theme.
Building the documentation
--------------------------
diff --git a/docs/source/additional_features.rst b/docs/source/additional_features.rst
index 19e0d4dcce01..ef5bf9e8936d 100644
--- a/docs/source/additional_features.rst
+++ b/docs/source/additional_features.rst
@@ -177,7 +177,7 @@ Caveats/Known Issues
will complain about not understanding the argument and the type annotation in
:py:meth:`__init__ ` will be replaced by ``Any``.
-* :ref:`Validator decorators `
+* :ref:`Validator decorators `
and `default decorators `_
are not type-checked against the attribute they are setting/validating.
diff --git a/docs/source/builtin_types.rst b/docs/source/builtin_types.rst
index 7ff9bd3c38e9..37b56169d879 100644
--- a/docs/source/builtin_types.rst
+++ b/docs/source/builtin_types.rst
@@ -15,8 +15,8 @@ Type Description
``int`` integer
``float`` floating point number
``bool`` boolean value (subclass of ``int``)
-``str`` string (unicode in Python 3)
-``bytes`` 8-bit string
+``str`` text, sequence of unicode codepoints
+``bytes`` 8-bit string, sequence of byte values
``object`` an arbitrary object (``object`` is the common base class)
====================== ===============================
diff --git a/docs/source/cheat_sheet.rst b/docs/source/cheat_sheet.rst
deleted file mode 100644
index 64a2d524faf8..000000000000
--- a/docs/source/cheat_sheet.rst
+++ /dev/null
@@ -1,282 +0,0 @@
-.. _cheat-sheet-py2:
-
-Type hints cheat sheet (Python 2)
-=================================
-
-This document is a quick cheat sheet showing how the :pep:`484` type
-language represents various common types in Python 2.
-
-.. note::
-
- Technically many of the type annotations shown below are redundant,
- because mypy can derive them from the type of the expression. So
- many of the examples have a dual purpose: show how to write the
- annotation, and show the inferred types.
-
-.. note::
-
- To check Python 2 code with mypy, you'll need to install mypy with
- ``pip install 'mypy[python2]'``.
-
-
-
-Built-in types
-**************
-
-.. code-block:: python
-
- from typing import List, Set, Dict, Tuple, Text, Optional
-
- # For simple built-in types, just use the name of the type
- x = 1 # type: int
- x = 1.0 # type: float
- x = True # type: bool
- x = "test" # type: str
- x = u"test" # type: unicode
-
- # For collections, the name of the type is capitalized, and the
- # name of the type inside the collection is in brackets
- x = [1] # type: List[int]
- x = {6, 7} # type: Set[int]
-
- # For mappings, we need the types of both keys and values
- x = {'field': 2.0} # type: Dict[str, float]
-
- # For tuples, we specify the types of all the elements
- x = (3, "yes", 7.5) # type: Tuple[int, str, float]
-
- # For textual data, use Text
- # ("Text" means "unicode" in Python 2 and "str" in Python 3)
- x = [u"one", u"two"] # type: List[Text]
-
- # Use Optional[] for values that could be None
- x = some_function() # type: Optional[str]
- # Mypy understands a value can't be None in an if-statement
- if x is not None:
- print x.upper()
- # If a value can never be None due to some invariants, use an assert
- assert x is not None
- print x.upper()
-
-Functions
-*********
-
-.. code-block:: python
-
- from typing import Callable, Iterator, Union, Optional, List
-
- # This is how you annotate a function definition
- def stringify(num):
- # type: (int) -> str
- """Your function docstring goes here after the type definition."""
- return str(num)
-
- # This function has no parameters and also returns nothing. Annotations
- # can also be placed on the same line as their function headers.
- def greet_world(): # type: () -> None
- print "Hello, world!"
-
- # And here's how you specify multiple arguments
- def plus(num1, num2):
- # type: (int, int) -> int
- return num1 + num2
-
- # Add type annotations for arguments with default values as though they
- # had no defaults
- def f(num1, my_float=3.5):
- # type: (int, float) -> float
- return num1 + my_float
-
- # An argument can be declared positional-only by giving it a name
- # starting with two underscores
- def quux(__x):
- # type: (int) -> None
- pass
-
- quux(3) # Fine
- quux(__x=3) # Error
-
- # This is how you annotate a callable (function) value
- x = f # type: Callable[[int, float], float]
-
- # A generator function that yields ints is secretly just a function that
- # returns an iterator of ints, so that's how we annotate it
- def g(n):
- # type: (int) -> Iterator[int]
- i = 0
- while i < n:
- yield i
- i += 1
-
- # There's an alternative syntax for functions with many arguments
- def send_email(address, # type: Union[str, List[str]]
- sender, # type: str
- cc, # type: Optional[List[str]]
- bcc, # type: Optional[List[str]]
- subject='',
- body=None # type: List[str]
- ):
- # type: (...) -> bool
- ...
-
-When you're puzzled or when things are complicated
-**************************************************
-
-.. code-block:: python
-
- from typing import Union, Any, List, Optional, cast
-
- # To find out what type mypy infers for an expression anywhere in
- # your program, wrap it in reveal_type(). Mypy will print an error
- # message with the type; remove it again before running the code.
- reveal_type(1) # -> Revealed type is "builtins.int"
-
- # Use Union when something could be one of a few types
- x = [3, 5, "test", "fun"] # type: List[Union[int, str]]
-
- # Use Any if you don't know the type of something or it's too
- # dynamic to write a type for
- x = mystery_function() # type: Any
-
- # If you initialize a variable with an empty container or "None"
- # you may have to help mypy a bit by providing a type annotation
- x = [] # type: List[str]
- x = None # type: Optional[str]
-
- # This makes each positional arg and each keyword arg a "str"
- def call(self, *args, **kwargs):
- # type: (*str, **str) -> str
- request = make_request(*args, **kwargs)
- return self.do_api_query(request)
-
- # Use a "type: ignore" comment to suppress errors on a given line,
- # when your code confuses mypy or runs into an outright bug in mypy.
- # Good practice is to comment every "ignore" with a bug link
- # (in mypy, typeshed, or your own code) or an explanation of the issue.
- x = confusing_function() # type: ignore # https://github.com/python/mypy/issues/1167
-
- # "cast" is a helper function that lets you override the inferred
- # type of an expression. It's only for mypy -- there's no runtime check.
- a = [4]
- b = cast(List[int], a) # Passes fine
- c = cast(List[str], a) # Passes fine (no runtime check)
- reveal_type(c) # -> Revealed type is "builtins.list[builtins.str]"
- print c # -> [4]; the object is not cast
-
- # If you want dynamic attributes on your class, have it override "__setattr__"
- # or "__getattr__" in a stub or in your source code.
- #
- # "__setattr__" allows for dynamic assignment to names
- # "__getattr__" allows for dynamic access to names
- class A:
- # This will allow assignment to any A.x, if x is the same type as "value"
- # (use "value: Any" to allow arbitrary types)
- def __setattr__(self, name, value):
- # type: (str, int) -> None
- ...
-
- a.foo = 42 # Works
- a.bar = 'Ex-parrot' # Fails type checking
-
-
-Standard "duck types"
-*********************
-
-In typical Python code, many functions that can take a list or a dict
-as an argument only need their argument to be somehow "list-like" or
-"dict-like". A specific meaning of "list-like" or "dict-like" (or
-something-else-like) is called a "duck type", and several duck types
-that are common in idiomatic Python are standardized.
-
-.. code-block:: python
-
- from typing import Mapping, MutableMapping, Sequence, Iterable
-
- # Use Iterable for generic iterables (anything usable in "for"),
- # and Sequence where a sequence (supporting "len" and "__getitem__") is
- # required
- def f(iterable_of_ints):
- # type: (Iterable[int]) -> List[str]
- return [str(x) for x in iterator_of_ints]
-
- f(range(1, 3))
-
- # Mapping describes a dict-like object (with "__getitem__") that we won't
- # mutate, and MutableMapping one (with "__setitem__") that we might
- def f(my_dict):
- # type: (Mapping[int, str]) -> List[int]
- return list(my_dict.keys())
-
- f({3: 'yes', 4: 'no'})
-
- def f(my_mapping):
- # type: (MutableMapping[int, str]) -> Set[str]
- my_mapping[5] = 'maybe'
- return set(my_mapping.values())
-
- f({3: 'yes', 4: 'no'})
-
-
-Classes
-*******
-
-.. code-block:: python
-
- class MyClass(object):
- # For instance methods, omit type for "self"
- def my_method(self, num, str1):
- # type: (int, str) -> str
- return num * str1
-
- # The "__init__" method doesn't return anything, so it gets return
- # type "None" just like any other method that doesn't return anything
- def __init__(self):
- # type: () -> None
- pass
-
- # User-defined classes are valid as types in annotations
- x = MyClass() # type: MyClass
-
-
-Miscellaneous
-*************
-
-.. code-block:: python
-
- import sys
- import re
- from typing import Match, AnyStr, IO
-
- # "typing.Match" describes regex matches from the re module
- x = re.match(r'[0-9]+', "15") # type: Match[str]
-
- # Use IO[] for functions that should accept or return any
- # object that comes from an open() call (IO[] does not
- # distinguish between reading, writing or other modes)
- def get_sys_IO(mode='w'):
- # type: (str) -> IO[str]
- if mode == 'w':
- return sys.stdout
- elif mode == 'r':
- return sys.stdin
- else:
- return sys.stdout
-
-
-Decorators
-**********
-
-Decorator functions can be expressed via generics. See
-:ref:`declaring-decorators` for the more details.
-
-.. code-block:: python
-
- from typing import Any, Callable, TypeVar
-
- F = TypeVar('F', bound=Callable[..., Any])
-
- def bare_decorator(func): # type: (F) -> F
- ...
-
- def decorator_args(url): # type: (str) -> Callable[[F], F]
- ...
diff --git a/docs/source/cheat_sheet_py3.rst b/docs/source/cheat_sheet_py3.rst
index e14cde7d50df..5aa1770512b8 100644
--- a/docs/source/cheat_sheet_py3.rst
+++ b/docs/source/cheat_sheet_py3.rst
@@ -1,38 +1,27 @@
.. _cheat-sheet-py3:
-Type hints cheat sheet (Python 3)
-=================================
-
-This document is a quick cheat sheet showing how the :pep:`484` type
-annotation notation represents various common types in Python 3.
-
-.. note::
-
- Technically many of the type annotations shown below are redundant,
- because mypy can derive them from the type of the expression. So
- many of the examples have a dual purpose: show how to write the
- annotation, and show the inferred types.
+Type hints cheat sheet
+======================
+This document is a quick cheat sheet showing how to use type
+annotations for various common types in Python.
Variables
*********
-Python 3.6 introduced a syntax for annotating variables in :pep:`526`
-and we use it in most examples.
+Technically many of the type annotations shown below are redundant,
+since mypy can usually infer the type of a variable from its value.
+See :ref:`type-inference-and-annotations` for more details.
.. code-block:: python
- # This is how you declare the type of a variable type in Python 3.6
+ # This is how you declare the type of a variable
age: int = 1
- # In Python 3.5 and earlier you can use a type comment instead
- # (equivalent to the previous definition)
- age = 1 # type: int
-
# You don't need to initialize a variable to annotate it
a: int # Ok (no value at runtime until assigned)
- # The latter is useful in conditional branches
+ # Doing so is useful in conditional branches
child: bool
if age < 18:
child = True
@@ -40,48 +29,52 @@ and we use it in most examples.
child = False
-Built-in types
-**************
+Useful built-in types
+*********************
.. code-block:: python
-
- from typing import List, Set, Dict, Tuple, Optional
-
- # For simple built-in types, just use the name of the type
+ # For most types, just use the name of the type.
+ # Note that mypy can usually infer the type of a variable from its value,
+ # so technically these annotations are redundant
x: int = 1
x: float = 1.0
x: bool = True
x: str = "test"
x: bytes = b"test"
- # For collections, the type of the collection item is in brackets
- # (Python 3.9+)
+ # For collections on Python 3.9+, the type of the collection item is in brackets
x: list[int] = [1]
x: set[int] = {6, 7}
- # In Python 3.8 and earlier, the name of the collection type is
- # capitalized, and the type is imported from the 'typing' module
- x: List[int] = [1]
- x: Set[int] = {6, 7}
-
- # Same as above, but with type comment syntax (Python 3.5 and earlier)
- x = [1] # type: List[int]
-
# For mappings, we need the types of both keys and values
x: dict[str, float] = {"field": 2.0} # Python 3.9+
- x: Dict[str, float] = {"field": 2.0}
# For tuples of fixed size, we specify the types of all the elements
x: tuple[int, str, float] = (3, "yes", 7.5) # Python 3.9+
- x: Tuple[int, str, float] = (3, "yes", 7.5)
# For tuples of variable size, we use one type and ellipsis
x: tuple[int, ...] = (1, 2, 3) # Python 3.9+
+
+ # On Python 3.8 and earlier, the name of the collection type is
+ # capitalized, and the type is imported from the 'typing' module
+ from typing import List, Set, Dict, Tuple
+ x: List[int] = [1]
+ x: Set[int] = {6, 7}
+ x: Dict[str, float] = {"field": 2.0}
+ x: Tuple[int, str, float] = (3, "yes", 7.5)
x: Tuple[int, ...] = (1, 2, 3)
- # Use Optional[] for values that could be None
- x: Optional[str] = some_function()
+ from typing import Union, Optional
+
+ # On Python 3.10+, use the | operator when something could be one of a few types
+ x: list[int | str] = [3, 5, "test", "fun"] # Python 3.10+
+ # On earlier versions, use Union
+ x: list[Union[int, str]] = [3, 5, "test", "fun"]
+
+ # Use Optional[X] for a value that could be None
+ # Optional[X] is the same as X | None or Union[X, None]
+ x: Optional[str] = "something" if some_condition() else None
# Mypy understands a value can't be None in an if-statement
if x is not None:
print(x.upper())
@@ -92,8 +85,6 @@ Built-in types
Functions
*********
-Python 3 supports an annotation syntax for function declarations.
-
.. code-block:: python
from typing import Callable, Iterator, Union, Optional
@@ -106,16 +97,23 @@ Python 3 supports an annotation syntax for function declarations.
def plus(num1: int, num2: int) -> int:
return num1 + num2
- # Add default value for an argument after the type annotation
- def f(num1: int, my_float: float = 3.5) -> float:
- return num1 + my_float
+ # If a function does not return a value, use None as the return type
+ # Default value for an argument goes after the type annotation
+ def show(value: str, excitement: int = 10) -> None:
+ print(value + "!" * excitement)
+
+ # Note that arguments without a type are dynamically typed (treated as Any)
+ # and that functions without any annotations not checked
+ def untyped(x):
+ x.anything() + 1 + "string" # no errors
# This is how you annotate a callable (function) value
x: Callable[[int, float], float] = f
+ def register(callback: Callable[[str], int]) -> None: ...
# A generator function that yields ints is secretly just a function that
# returns an iterator of ints, so that's how we annotate it
- def g(n: int) -> Iterator[int]:
+ def gen(n: int) -> Iterator[int]:
i = 0
while i < n:
yield i
@@ -126,78 +124,140 @@ Python 3 supports an annotation syntax for function declarations.
sender: str,
cc: Optional[list[str]],
bcc: Optional[list[str]],
- subject='',
+ subject: str = '',
body: Optional[list[str]] = None
) -> bool:
...
- # An argument can be declared positional-only by giving it a name
- # starting with two underscores:
- def quux(__x: int) -> None:
+ # Mypy understands positional-only and keyword-only arguments
+ # Positional-only arguments can also be marked by using a name starting with
+ # two underscores
+ def quux(x: int, / *, y: int) -> None:
pass
- quux(3) # Fine
- quux(__x=3) # Error
+ quux(3, y=5) # Ok
+ quux(3, 5) # error: Too many positional arguments for "quux"
+ quux(x=3, y=5) # error: Unexpected keyword argument "x" for "quux"
+
+ # This says each positional arg and each keyword arg is a "str"
+ def call(self, *args: str, **kwargs: str) -> str:
+ reveal_type(args) # Revealed type is "tuple[str, ...]"
+ reveal_type(kwargs) # Revealed type is "dict[str, str]"
+ request = make_request(*args, **kwargs)
+ return self.do_api_query(request)
+
+Classes
+*******
+
+.. code-block:: python
+
+ class BankAccount:
+ # The "__init__" method doesn't return anything, so it gets return
+ # type "None" just like any other method that doesn't return anything
+ def __init__(self, account_name: str, initial_balance: int = 0) -> None:
+ # mypy will infer the correct types for these instance variables
+ # based on the types of the parameters.
+ self.account_name = account_name
+ self.balance = initial_balance
+
+ # For instance methods, omit type for "self"
+ def deposit(self, amount: int) -> None:
+ self.balance += amount
+
+ def withdraw(self, amount: int) -> None:
+ self.balance -= amount
+
+ # User-defined classes are valid as types in annotations
+ account: BankAccount = BankAccount("Alice", 400)
+ def transfer(src: BankAccount, dst: BankAccount, amount: int) -> None:
+ src.withdraw(amount)
+ dst.deposit(amount)
+
+ # Functions that accept BankAccount also accept any subclass of BankAccount!
+ class AuditedBankAccount(BankAccount):
+ # You can optionally declare instance variables in the class body
+ audit_log: list[str]
+ # This is an instance variable with a default value
+ auditor_name: str = "The Spanish Inquisition"
+
+ def __init__(self, account_name: str, initial_balance: int = 0) -> None:
+ super().__init__(account_name, initial_balance)
+ self.audit_log: list[str] = []
+
+ def deposit(self, amount: int) -> None:
+ self.audit_log.append(f"Deposited {amount}")
+ self.balance += amount
+
+ def withdraw(self, amount: int) -> None:
+ self.audit_log.append(f"Withdrew {amount}")
+ self.balance -= amount
+
+ audited = AuditedBankAccount("Bob", 300)
+ transfer(audited, account, 100) # type checks!
+
+ # You can use the ClassVar annotation to declare a class variable
+ class Car:
+ seats: ClassVar[int] = 4
+ passengers: ClassVar[list[str]]
+
+ # If you want dynamic attributes on your class, have it
+ # override "__setattr__" or "__getattr__"
+ class A:
+ # This will allow assignment to any A.x, if x is the same type as "value"
+ # (use "value: Any" to allow arbitrary types)
+ def __setattr__(self, name: str, value: int) -> None: ...
+
+ # This will allow access to any A.x, if x is compatible with the return type
+ def __getattr__(self, name: str) -> int: ...
+
+ a.foo = 42 # Works
+ a.bar = 'Ex-parrot' # Fails type checking
When you're puzzled or when things are complicated
**************************************************
.. code-block:: python
- from typing import Union, Any, Optional, cast
+ from typing import Union, Any, Optional, TYPE_CHECKING, cast
# To find out what type mypy infers for an expression anywhere in
# your program, wrap it in reveal_type(). Mypy will print an error
# message with the type; remove it again before running the code.
- reveal_type(1) # -> Revealed type is "builtins.int"
-
- # Use Union when something could be one of a few types
- x: list[Union[int, str]] = [3, 5, "test", "fun"]
-
- # Use Any if you don't know the type of something or it's too
- # dynamic to write a type for
- x: Any = mystery_function()
+ reveal_type(1) # Revealed type is "builtins.int"
# If you initialize a variable with an empty container or "None"
- # you may have to help mypy a bit by providing a type annotation
+ # you may have to help mypy a bit by providing an explicit type annotation
x: list[str] = []
x: Optional[str] = None
- # This makes each positional arg and each keyword arg a "str"
- def call(self, *args: str, **kwargs: str) -> str:
- request = make_request(*args, **kwargs)
- return self.do_api_query(request)
+ # Use Any if you don't know the type of something or it's too
+ # dynamic to write a type for
+ x: Any = mystery_function()
+ # Mypy will let you do anything with x!
+ x.whatever() * x["you"] + x("want") - any(x) and all(x) is super # no errors
# Use a "type: ignore" comment to suppress errors on a given line,
# when your code confuses mypy or runs into an outright bug in mypy.
- # Good practice is to comment every "ignore" with a bug link
- # (in mypy, typeshed, or your own code) or an explanation of the issue.
- x = confusing_function() # type: ignore # https://github.com/python/mypy/issues/1167
+ # Good practice is to add a comment explaining the issue.
+ x = confusing_function() # type: ignore # confusing_function won't return None here because ...
# "cast" is a helper function that lets you override the inferred
# type of an expression. It's only for mypy -- there's no runtime check.
a = [4]
b = cast(list[int], a) # Passes fine
- c = cast(list[str], a) # Passes fine (no runtime check)
- reveal_type(c) # -> Revealed type is "builtins.list[builtins.str]"
- print(c) # -> [4]; the object is not cast
-
- # If you want dynamic attributes on your class, have it override "__setattr__"
- # or "__getattr__" in a stub or in your source code.
- #
- # "__setattr__" allows for dynamic assignment to names
- # "__getattr__" allows for dynamic access to names
- class A:
- # This will allow assignment to any A.x, if x is the same type as "value"
- # (use "value: Any" to allow arbitrary types)
- def __setattr__(self, name: str, value: int) -> None: ...
-
- # This will allow access to any A.x, if x is compatible with the return type
- def __getattr__(self, name: str) -> int: ...
-
- a.foo = 42 # Works
- a.bar = 'Ex-parrot' # Fails type checking
+ c = cast(list[str], a) # Passes fine despite being a lie (no runtime check)
+ reveal_type(c) # Revealed type is "builtins.list[builtins.str]"
+ print(c) # Still prints [4] ... the object is not changed or casted at runtime
+
+ # Use "TYPE_CHECKING" if you want to have code that mypy can see but will not
+ # be executed at runtime (or to have code that mypy can't see)
+ if TYPE_CHECKING:
+ import json
+ else:
+ import orjson as json # mypy is unaware of this
+In some cases type annotations can cause issues at runtime, see
+:ref:`runtime_troubles` for dealing with this.
Standard "duck types"
*********************
@@ -223,7 +283,7 @@ that are common in idiomatic Python are standardized.
# Mapping describes a dict-like object (with "__getitem__") that we won't
# mutate, and MutableMapping one (with "__setitem__") that we might
def f(my_mapping: Mapping[int, str]) -> list[int]:
- my_mapping[5] = 'maybe' # if we try this, mypy will throw an error...
+ my_mapping[5] = 'maybe' # mypy will complain about this line...
return list(my_mapping.keys())
f({3: 'yes', 4: 'no'})
@@ -237,40 +297,6 @@ that are common in idiomatic Python are standardized.
You can even make your own duck types using :ref:`protocol-types`.
-Classes
-*******
-
-.. code-block:: python
-
- class MyClass:
- # You can optionally declare instance variables in the class body
- attr: int
- # This is an instance variable with a default value
- charge_percent: int = 100
-
- # The "__init__" method doesn't return anything, so it gets return
- # type "None" just like any other method that doesn't return anything
- def __init__(self) -> None:
- ...
-
- # For instance methods, omit type for "self"
- def my_method(self, num: int, str1: str) -> str:
- return num * str1
-
- # User-defined classes are valid as types in annotations
- x: MyClass = MyClass()
-
- # You can use the ClassVar annotation to declare a class variable
- class Car:
- seats: ClassVar[int] = 4
- passengers: ClassVar[list[str]]
-
- # You can also declare the type of an attribute in "__init__"
- class Box:
- def __init__(self) -> None:
- self.items: list[str] = []
-
-
Coroutines and asyncio
**********************
@@ -283,7 +309,7 @@ See :ref:`async-and-await` for the full detail on typing coroutines and asynchro
# A coroutine is typed like a normal function
async def countdown35(tag: str, count: int) -> str:
while count > 0:
- print('T-minus {} ({})'.format(count, tag))
+ print(f'T-minus {count} ({tag})')
await asyncio.sleep(0.1)
count -= 1
return "Blastoff!"
@@ -295,11 +321,7 @@ Miscellaneous
.. code-block:: python
import sys
- import re
- from typing import Match, IO
-
- # "typing.Match" describes regex matches from the re module
- x: Match[str] = re.match(r'[0-9]+', "15")
+ from typing import IO
# Use IO[] for functions that should accept or return any
# object that comes from an open() call (IO[] does not
@@ -314,7 +336,7 @@ Miscellaneous
# Forward references are useful if you want to reference a class before
# it is defined
- def f(foo: A) -> int: # This will fail
+ def f(foo: A) -> int: # This will fail at runtime with 'A' is not defined
...
class A:
diff --git a/docs/source/class_basics.rst b/docs/source/class_basics.rst
index 3c12b4b06d9b..1d4164192318 100644
--- a/docs/source/class_basics.rst
+++ b/docs/source/class_basics.rst
@@ -1,3 +1,5 @@
+.. _class-basics:
+
Class basics
============
@@ -42,19 +44,6 @@ As in Python generally, a variable defined in the class body can be used
as a class or an instance variable. (As discussed in the next section, you
can override this with a :py:data:`~typing.ClassVar` annotation.)
-Type comments work as well, if you need to support Python versions earlier
-than 3.6:
-
-.. code-block:: python
-
- class A:
- x = None # type: list[int] # Declare attribute 'x' of type list[int]
-
-Note that attribute definitions in the class body that use a type comment
-are special: a ``None`` value is valid as the initializer, even though
-the declared type is not optional. This should be used sparingly, as this can
-result in ``None``-related runtime errors that mypy can't detect.
-
Similarly, you can give explicit types to instance variables defined
in a method:
@@ -158,6 +147,22 @@ a :py:data:`~typing.ClassVar` annotation, but this might not do what you'd expec
In this case the type of the attribute will be implicitly ``Any``.
This behavior will change in the future, since it's surprising.
+An explicit :py:data:`~typing.ClassVar` may be particularly handy to distinguish
+between class and instance variables with callable types. For example:
+
+.. code-block:: python
+
+ from typing import Callable, ClassVar
+
+ class A:
+ foo: Callable[[int], None]
+ bar: ClassVar[Callable[[A, int], None]]
+ bad: Callable[[A], None]
+
+ A().foo(42) # OK
+ A().bar(42) # OK
+ A().bad() # Error: Too few arguments
+
.. note::
A :py:data:`~typing.ClassVar` type parameter cannot include type variables:
``ClassVar[T]`` and ``ClassVar[list[T]]``
@@ -255,11 +260,6 @@ function decorator. Example:
x = Animal() # Error: 'Animal' is abstract due to 'eat' and 'can_walk'
y = Cat() # OK
-.. note::
-
- In Python 2.7 you have to use :py:func:`@abc.abstractproperty ` to define
- an abstract property.
-
Note that mypy performs checking for unimplemented abstract methods
even if you omit the :py:class:`~abc.ABCMeta` metaclass. This can be useful if the
metaclass would cause runtime metaclass conflicts.
@@ -308,6 +308,26 @@ however:
in this case, but any attempt to construct an instance will be
flagged as an error.
+Mypy allows you to omit the body for an abstract method, but if you do so,
+it is unsafe to call such method via ``super()``. For example:
+
+.. code-block:: python
+
+ from abc import abstractmethod
+ class Base:
+ @abstractmethod
+ def foo(self) -> int: pass
+ @abstractmethod
+ def bar(self) -> int:
+ return 0
+ class Sub(Base):
+ def foo(self) -> int:
+ return super().foo() + 1 # error: Call to abstract method "foo" of "Base"
+ # with trivial body via super() is unsafe
+ @abstractmethod
+ def bar(self) -> int:
+ return super().bar() + 1 # This is OK however.
+
A class can inherit any number of classes, both abstract and
concrete. As with normal overrides, a dynamically typed method can
override or implement a statically typed method defined in any base
diff --git a/docs/source/command_line.rst b/docs/source/command_line.rst
index 36c13910c21a..31d23db204eb 100644
--- a/docs/source/command_line.rst
+++ b/docs/source/command_line.rst
@@ -129,30 +129,12 @@ Import discovery
The following flags customize how exactly mypy discovers and follows
imports.
-.. option:: --namespace-packages
-
- This flag enables import discovery to use namespace packages (see
- :pep:`420`). In particular, this allows discovery of imported
- packages that don't have an ``__init__.py`` (or ``__init__.pyi``)
- file.
-
- Namespace packages are found (using the PEP 420 rules, which
- prefers "classic" packages over namespace packages) along the
- module search path -- this is primarily set from the source files
- passed on the command line, the ``MYPYPATH`` environment variable,
- and the :confval:`mypy_path` config option.
-
- This flag affects how mypy finds modules and packages explicitly passed on
- the command line. It also affects how mypy determines fully qualified module
- names for files passed on the command line. See :ref:`Mapping file paths to
- modules ` for details.
-
.. option:: --explicit-package-bases
This flag tells mypy that top-level packages will be based in either the
current directory, or a member of the ``MYPYPATH`` environment variable or
:confval:`mypy_path` config option. This option is only useful in
- conjunction with :option:`--namespace-packages`. See :ref:`Mapping file
+ in the absence of `__init__.py`. See :ref:`Mapping file
paths to modules ` for details.
.. option:: --ignore-missing-imports
@@ -212,6 +194,41 @@ imports.
By default, mypy will suppress any error messages generated within :pep:`561`
compliant packages. Adding this flag will disable this behavior.
+.. option:: --fast-module-lookup
+
+ The default logic used to scan through search paths to resolve imports has a
+ quadratic worse-case behavior in some cases, which is for instance triggered
+ by a large number of folders sharing a top-level namespace as in::
+
+ foo/
+ company/
+ foo/
+ a.py
+ bar/
+ company/
+ bar/
+ b.py
+ baz/
+ company/
+ baz/
+ c.py
+ ...
+
+ If you are in this situation, you can enable an experimental fast path by
+ setting the :option:`--fast-module-lookup` option.
+
+
+.. option:: --no-namespace-packages
+
+ This flag disables import discovery of namespace packages (see :pep:`420`).
+ In particular, this prevents discovery of packages that don't have an
+ ``__init__.py`` (or ``__init__.pyi``) file.
+
+ This flag affects how mypy finds modules and packages explicitly passed on
+ the command line. It also affects how mypy determines fully qualified module
+ names for files passed on the command line. See :ref:`Mapping file paths to
+ modules ` for details.
+
.. _platform-configuration:
@@ -228,23 +245,13 @@ For more information on how to use these flags, see :ref:`version_and_platform_c
This flag will make mypy type check your code as if it were
run under Python version X.Y. Without this option, mypy will default to using
- whatever version of Python is running mypy. Note that the :option:`-2` and
- :option:`--py2` flags are aliases for :option:`--python-version 2.7 <--python-version>`.
+ whatever version of Python is running mypy.
This flag will attempt to find a Python executable of the corresponding
version to search for :pep:`561` compliant packages. If you'd like to
disable this, use the :option:`--no-site-packages` flag (see
:ref:`import-discovery` for more details).
-.. option:: -2, --py2
-
- Equivalent to running :option:`--python-version 2.7 <--python-version>`.
-
- .. note::
-
- To check Python 2 code with mypy, you'll need to install mypy with
- ``pip install 'mypy[python2]'``.
-
.. option:: --platform PLATFORM
This flag will make mypy type check your code as if it were
@@ -377,29 +384,23 @@ None and Optional handling
The following flags adjust how mypy handles values of type ``None``.
For more details, see :ref:`no_strict_optional`.
-.. _no-implicit-optional:
+.. _implicit-optional:
-.. option:: --no-implicit-optional
+.. option:: --implicit-optional
- This flag causes mypy to stop treating arguments with a ``None``
+ This flag causes mypy to treat arguments with a ``None``
default value as having an implicit :py:data:`~typing.Optional` type.
- For example, by default mypy will assume that the ``x`` parameter
- is of type ``Optional[int]`` in the code snippet below since
- the default parameter is ``None``:
+ For example, if this flag is set, mypy would assume that the ``x``
+ parameter is actually of type ``Optional[int]`` in the code snippet below
+ since the default parameter is ``None``:
.. code-block:: python
def foo(x: int = None) -> None:
print(x)
- If this flag is set, the above snippet will no longer type check:
- we must now explicitly indicate that the type is ``Optional[int]``:
-
- .. code-block:: python
-
- def foo(x: Optional[int] = None) -> None:
- print(x)
+ **Note:** This was disabled by default starting in mypy 0.980.
.. option:: --no-strict-optional
@@ -447,9 +448,10 @@ potentially problematic or redundant in some way.
are when:
- The function has a ``None`` or ``Any`` return type
- - The function has an empty body or a body that is just
- ellipsis (``...``). Empty functions are often used for
- abstract methods.
+ - The function has an empty body and is marked as an abstract method,
+ is in a protocol class, or is in a stub file
+ - The execution path can never return; for example, if an exception
+ is always raised
Passing in :option:`--no-warn-no-return` will disable these error
messages in all cases.
@@ -548,11 +550,11 @@ of the above sections.
from typing import Optional
a = None # Need type annotation here if using --local-partial-types
- b = None # type: Optional[int]
+ b: Optional[int] = None
class Foo:
bar = None # Need type annotation here if using --local-partial-types
- baz = None # type: Optional[int]
+ baz: Optional[int] = None
def __init__(self) -> None:
self.bar = 1
@@ -616,6 +618,7 @@ of the above sections.
.. option:: --disable-error-code
This flag allows disabling one or multiple error codes globally.
+ See :ref:`error-codes` for more information.
.. code-block:: python
@@ -623,20 +626,21 @@ of the above sections.
x = 'a string'
x.trim() # error: "str" has no attribute "trim" [attr-defined]
- # --disable-error-code attr-defined
+ # When using --disable-error-code attr-defined
x = 'a string'
x.trim()
.. option:: --enable-error-code
This flag allows enabling one or multiple error codes globally.
+ See :ref:`error-codes` for more information.
- Note: This flag will override disabled error codes from the --disable-error-code
- flag
+ Note: This flag will override disabled error codes from the
+ :option:`--disable-error-code ` flag.
.. code-block:: python
- # --disable-error-code attr-defined
+ # When using --disable-error-code attr-defined
x = 'a string'
x.trim()
@@ -680,9 +684,17 @@ in error messages.
main.py:12:9: error: Unsupported operand types for / ("int" and "str")
-.. option:: --show-error-codes
+.. option:: --show-error-end
+
+ This flag will make mypy show not just that start position where
+ an error was detected, but also the end position of the relevant expression.
+ This way various tools can easily highlight the whole error span. The format is
+ ``file:line:column:end_line:end_column``. This option implies
+ ``--show-column-numbers``.
+
+.. option:: --hide-error-codes
- This flag will add an error code ``[]`` to error messages. The error
+ This flag will hide the error code ``[]`` from error messages. By default, the error
code is shown after each error message::
prog.py:1: error: "str" has no attribute "trim" [attr-defined]
@@ -807,7 +819,8 @@ in developing or debugging mypy internals.
submitting them upstream, but also allows you to use a forked version of
typeshed.
- Note that this doesn't affect third-party library stubs.
+ Note that this doesn't affect third-party library stubs. To test third-party stubs,
+ for example try ``MYPYPATH=stubs/six mypy ...``.
.. _warn-incomplete-stub:
diff --git a/docs/source/common_issues.rst b/docs/source/common_issues.rst
index 9d0961894a7e..afb8e7d3ffe1 100644
--- a/docs/source/common_issues.rst
+++ b/docs/source/common_issues.rst
@@ -9,15 +9,6 @@ doesn't work as expected. Statically typed code is often identical to
normal Python code (except for type annotations), but sometimes you need
to do things slightly differently.
-Can't install mypy using pip
-----------------------------
-
-If installation fails, you've probably hit one of these issues:
-
-* Mypy needs Python 3.6 or later to run.
-* You may have to run pip like this:
- ``python3 -m pip install mypy``.
-
.. _annotations_needed:
No errors reported for obviously wrong code
@@ -26,7 +17,9 @@ No errors reported for obviously wrong code
There are several common reasons why obviously wrong code is not
flagged as an error.
-**The function containing the error is not annotated.** Functions that
+**The function containing the error is not annotated.**
+
+Functions that
do not have any annotations (neither for any argument nor for the
return type) are not type-checked, and even the most blatant type
errors (e.g. ``2 + 'a'``) pass silently. The solution is to add
@@ -52,7 +45,9 @@ once you add annotations:
If you don't know what types to add, you can use ``Any``, but beware:
-**One of the values involved has type 'Any'.** Extending the above
+**One of the values involved has type 'Any'.**
+
+Extending the above
example, if we were to leave out the annotation for ``a``, we'd get
no error:
@@ -68,49 +63,52 @@ The reason is that if the type of ``a`` is unknown, the type of
If you're having trouble debugging such situations,
:ref:`reveal_type() ` might come in handy.
-Note that sometimes library stubs have imprecise type information,
-e.g. the :py:func:`pow` builtin returns ``Any`` (see `typeshed issue 285
-`_ for the reason).
+Note that sometimes library stubs with imprecise type information
+can be a source of ``Any`` values.
:py:meth:`__init__ ` **method has no annotated
-arguments or return type annotation.** :py:meth:`__init__ `
-is considered fully-annotated **if at least one argument is annotated**,
-while mypy will infer the return type as ``None``.
-The implication is that, for a :py:meth:`__init__ ` method
-that has no argument, you'll have to explicitly annotate the return type
-as ``None`` to type-check this :py:meth:`__init__ ` method:
+arguments and no return type annotation.**
+
+This is basically a combination of the two cases above, in that ``__init__``
+without annotations can cause ``Any`` types leak into instance variables:
.. code-block:: python
- def foo(s: str) -> str:
- return s
+ class Bad:
+ def __init__(self):
+ self.value = "asdf"
+ 1 + "asdf" # No error!
- class A():
- def __init__(self, value: str): # Return type inferred as None, considered as typed method
+ bad = Bad()
+ bad.value + 1 # No error!
+ reveal_type(bad) # Revealed type is "__main__.Bad"
+ reveal_type(bad.value) # Revealed type is "Any"
+
+ class Good:
+ def __init__(self) -> None: # Explicitly return None
self.value = value
- foo(1) # error: Argument 1 to "foo" has incompatible type "int"; expected "str"
-
- class B():
- def __init__(self): # No argument is annotated, considered as untyped method
- foo(1) # No error!
-
- class C():
- def __init__(self) -> None: # Must specify return type to type-check
- foo(1) # error: Argument 1 to "foo" has incompatible type "int"; expected "str"
-
-**Some imports may be silently ignored**. Another source of
-unexpected ``Any`` values are the :option:`--ignore-missing-imports
-` and :option:`--follow-imports=skip
-` flags. When you use :option:`--ignore-missing-imports `,
-any imported module that cannot be found is silently replaced with
-``Any``. When using :option:`--follow-imports=skip ` the same is true for
-modules for which a ``.py`` file is found but that are not specified
-on the command line. (If a ``.pyi`` stub is found it is always
-processed normally, regardless of the value of
-:option:`--follow-imports `.) To help debug the former situation (no
-module found at all) leave out :option:`--ignore-missing-imports `; to get
-clarity about the latter use :option:`--follow-imports=error `. You can
-read up about these and other useful flags in :ref:`command-line`.
+
+
+**Some imports may be silently ignored**.
+
+A common source of unexpected ``Any`` values is the
+:option:`--ignore-missing-imports ` flag.
+
+When you use :option:`--ignore-missing-imports `,
+any imported module that cannot be found is silently replaced with ``Any``.
+
+To help debug this, simply leave out
+:option:`--ignore-missing-imports `.
+As mentioned in :ref:`fix-missing-imports`, setting ``ignore_missing_imports=True``
+on a per-module basis will make bad surprises less likely and is highly encouraged.
+
+Use of the :option:`--follow-imports=skip ` flags can also
+cause problems. Use of these flags is strongly discouraged and only required in
+relatively niche situations. See :ref:`follow-imports` for more information.
+
+**mypy considers some of your code unreachable**.
+
+See :ref:`unreachable` for more information.
**A function annotated as returning a non-optional type returns 'None'
and mypy doesn't complain**.
@@ -186,29 +184,16 @@ over ``.py`` files.
Ignoring a whole file
---------------------
-A ``# type: ignore`` comment at the top of a module (before any statements,
-including imports or docstrings) has the effect of ignoring the *entire* module.
-
-.. code-block:: python
-
- # type: ignore
+* To only ignore errors, use a top-level ``# mypy: ignore-errors`` comment instead.
+* To only ignore errors with a specific error code, use a top-level
+ ``# mypy: disable-error-code=...`` comment.
+* To replace the contents of a module with ``Any``, use a per-module ``follow_imports = skip``.
+ See :ref:`Following imports ` for details.
- import foo
-
- foo.bar()
-
-Unexpected errors about 'None' and/or 'Optional' types
-------------------------------------------------------
-
-Starting from mypy 0.600, mypy uses
-:ref:`strict optional checking ` by default,
-and the ``None`` value is not compatible with non-optional types.
-It's easy to switch back to the older behavior where ``None`` was
-compatible with arbitrary types (see :ref:`no_strict_optional`).
-You can also fall back to this behavior if strict optional
-checking would require a large number of ``assert foo is not None``
-checks to be inserted, and you want to minimize the number
-of code changes required to get a clean mypy run.
+Note that a ``# type: ignore`` comment at the top of a module (before any statements,
+including imports or docstrings) has the effect of ignoring the entire contents of the module.
+This behaviour can be surprising and result in
+"Module ... has no attribute ... [attr-defined]" errors.
Issues with code at runtime
---------------------------
@@ -267,20 +252,20 @@ Redefinitions with incompatible types
Each name within a function only has a single 'declared' type. You can
reuse for loop indices etc., but if you want to use a variable with
-multiple types within a single function, you may need to declare it
-with the ``Any`` type.
+multiple types within a single function, you may need to instead use
+multiple variables (or maybe declare the variable with an ``Any`` type).
.. code-block:: python
def f() -> None:
n = 1
...
- n = 'x' # Type error: n has type int
+ n = 'x' # error: Incompatible types in assignment (expression has type "str", variable has type "int")
.. note::
- This limitation could be lifted in a future mypy
- release.
+ Using the :option:`--allow-redefinition `
+ flag can suppress this error in several cases.
Note that you can redefine a variable with a more *precise* or a more
concrete type. For example, you can redefine a sequence (which does
@@ -294,6 +279,8 @@ not support ``sort()``) as a list and sort it in-place:
# Type of x is List[int] here.
x.sort() # Okay!
+See :ref:`type-narrowing` for more information.
+
.. _variance:
Invariance vs covariance
@@ -345,24 +332,24 @@ Declaring a supertype as variable type
Sometimes the inferred type is a subtype (subclass) of the desired
type. The type inference uses the first assignment to infer the type
-of a name (assume here that ``Shape`` is the base class of both
-``Circle`` and ``Triangle``):
+of a name:
.. code-block:: python
- shape = Circle() # Infer shape to be Circle
- ...
- shape = Triangle() # Type error: Triangle is not a Circle
+ class Shape: ...
+ class Circle(Shape): ...
+ class Triangle(Shape): ...
+
+ shape = Circle() # mypy infers the type of shape to be Circle
+ shape = Triangle() # error: Incompatible types in assignment (expression has type "Triangle", variable has type "Circle")
You can just give an explicit type for the variable in cases such the
above example:
.. code-block:: python
- shape = Circle() # type: Shape # The variable s can be any Shape,
- # not just Circle
- ...
- shape = Triangle() # OK
+ shape: Shape = Circle() # The variable s can be any Shape, not just Circle
+ shape = Triangle() # OK
Complex type tests
------------------
@@ -441,10 +428,8 @@ More specifically, mypy will understand the use of :py:data:`sys.version_info` a
# Distinguishing between different versions of Python:
if sys.version_info >= (3, 8):
# Python 3.8+ specific definitions and imports
- elif sys.version_info[0] >= 3:
- # Python 3 specific definitions and imports
else:
- # Python 2 specific definitions and imports
+ # Other definitions and imports
# Distinguishing between different operating systems:
if sys.platform.startswith("linux"):
@@ -484,9 +469,9 @@ operating system as default values for :py:data:`sys.version_info` and
:py:data:`sys.platform`.
To target a different Python version, use the :option:`--python-version X.Y ` flag.
-For example, to verify your code typechecks if were run using Python 2, pass
-in :option:`--python-version 2.7 ` from the command line. Note that you do not need
-to have Python 2.7 installed to perform this check.
+For example, to verify your code typechecks if were run using Python 3.8, pass
+in :option:`--python-version 3.8 ` from the command line. Note that you do not need
+to have Python 3.8 installed to perform this check.
To target a different operating system, use the :option:`--platform PLATFORM ` flag.
For example, to verify your code typechecks if it were run in Windows, pass
@@ -629,7 +614,10 @@ You can install the latest development version of mypy from source. Clone the
git clone https://github.com/python/mypy.git
cd mypy
- sudo python3 -m pip install --upgrade .
+ python3 -m pip install --upgrade .
+
+To install a development version of mypy that is mypyc-compiled, see the
+instructions at the `mypyc wheels repo `_.
Variables vs type aliases
-------------------------
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 5c3bf94c2f8c..5faefdc92ed1 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -12,8 +12,10 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys
+from __future__ import annotations
+
import os
+import sys
from sphinx.application import Sphinx
from sphinx.util.docfields import Field
@@ -21,54 +23,54 @@
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('../..'))
+sys.path.insert(0, os.path.abspath("../.."))
from mypy.version import __version__ as mypy_version
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['sphinx.ext.intersphinx']
+extensions = ["sphinx.ext.intersphinx"]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
# The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = u'mypy'
-copyright = u'2012-2022 Jukka Lehtosalo and mypy contributors'
+project = "mypy"
+copyright = "2012-2022 Jukka Lehtosalo and mypy contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = mypy_version.split('-')[0]
+version = mypy_version.split("-")[0]
# The full version, including alpha/beta/rc tags.
release = mypy_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
-#language = None
+# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
-#today = ''
+# today = ''
# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@@ -76,27 +78,27 @@
# The reST default role (used for this markup: `text`) to use for all
# documents.
-#default_role = None
+# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
-#show_authors = False
+# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
-#keep_warnings = False
+# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
@@ -108,135 +110,127 @@
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-#html_theme_options = {}
+# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
-#html_title = None
+# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-html_logo = "http://mypy-lang.org/static/mypy_light.svg"
+html_logo = "mypy_light.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-#html_static_path = ['_static']
+# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
-#html_extra_path = []
+# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
# If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
# If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
# If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
# Output file base name for HTML help builder.
-htmlhelp_basename = 'mypydoc'
+htmlhelp_basename = "mypydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+ # The paper size ('letterpaper' or 'a4paper').
+ #'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
-latex_documents = [
- ('index', 'Mypy.tex', u'Mypy Documentation',
- u'Jukka', 'manual'),
-]
+latex_documents = [("index", "Mypy.tex", "Mypy Documentation", "Jukka", "manual")]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
-#latex_logo = None
+# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
# If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
# If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
# Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
# If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'mypy', u'Mypy Documentation',
- [u'Jukka Lehtosalo'], 1)
-]
+man_pages = [("index", "mypy", "Mypy Documentation", ["Jukka Lehtosalo"], 1)]
# If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
@@ -245,43 +239,49 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- ('index', 'Mypy', u'Mypy Documentation',
- u'Jukka', 'Mypy', 'One line description of project.',
- 'Miscellaneous'),
+ (
+ "index",
+ "Mypy",
+ "Mypy Documentation",
+ "Jukka",
+ "Mypy",
+ "One line description of project.",
+ "Miscellaneous",
+ )
]
# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
# If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
-#texinfo_no_detailmenu = False
+# texinfo_no_detailmenu = False
-rst_prolog = '.. |...| unicode:: U+2026 .. ellipsis\n'
+rst_prolog = ".. |...| unicode:: U+2026 .. ellipsis\n"
intersphinx_mapping = {
- 'python': ('https://docs.python.org/3', None),
- 'six': ('https://six.readthedocs.io', None),
- 'attrs': ('http://www.attrs.org/en/stable', None),
- 'cython': ('http://docs.cython.org/en/latest', None),
- 'monkeytype': ('https://monkeytype.readthedocs.io/en/latest', None),
- 'setuptools': ('https://setuptools.readthedocs.io/en/latest', None),
+ "python": ("https://docs.python.org/3", None),
+ "six": ("https://six.readthedocs.io", None),
+ "attrs": ("http://www.attrs.org/en/stable", None),
+ "cython": ("http://docs.cython.org/en/latest", None),
+ "monkeytype": ("https://monkeytype.readthedocs.io/en/latest", None),
+ "setuptools": ("https://setuptools.readthedocs.io/en/latest", None),
}
def setup(app: Sphinx) -> None:
app.add_object_type(
- 'confval',
- 'confval',
- objname='configuration value',
- indextemplate='pair: %s; configuration value',
+ "confval",
+ "confval",
+ objname="configuration value",
+ indextemplate="pair: %s; configuration value",
doc_field_types=[
- Field('type', label='Type', has_arg=False, names=('type',)),
- Field('default', label='Default', has_arg=False, names=('default',)),
- ]
+ Field("type", label="Type", has_arg=False, names=("type",)),
+ Field("default", label="Default", has_arg=False, names=("default",)),
+ ],
)
diff --git a/docs/source/config_file.rst b/docs/source/config_file.rst
index 22893ff069d5..3b96e6bd7a5a 100644
--- a/docs/source/config_file.rst
+++ b/docs/source/config_file.rst
@@ -114,7 +114,6 @@ of your repo and run mypy.
# Global options:
[mypy]
- python_version = 2.7
warn_return_any = True
warn_unused_configs = True
@@ -129,16 +128,13 @@ of your repo and run mypy.
[mypy-somelibrary]
ignore_missing_imports = True
-This config file specifies three global options in the ``[mypy]`` section. These three
+This config file specifies two global options in the ``[mypy]`` section. These two
options will:
-1. Type-check your entire project assuming it will be run using Python 2.7.
- (This is equivalent to using the :option:`--python-version 2.7 ` or :option:`-2 ` flag).
-
-2. Report an error whenever a function returns a value that is inferred
+1. Report an error whenever a function returns a value that is inferred
to have type ``Any``.
-3. Report any config options that are unused by mypy. (This will help us catch typos
+2. Report any config options that are unused by mypy. (This will help us catch typos
when making changes to our config file).
Next, this module specifies three per-module options. The first two options change how mypy
@@ -195,6 +191,28 @@ section of the command line docs.
This option may only be set in the global section (``[mypy]``).
+.. confval:: modules
+
+ :type: comma-separated list of strings
+
+ A comma-separated list of packages which should be checked by mypy if none are given on the command
+ line. Mypy *will not* recursively type check any submodules of the provided
+ module.
+
+ This option may only be set in the global section (``[mypy]``).
+
+
+.. confval:: packages
+
+ :type: comma-separated list of strings
+
+ A comma-separated list of packages which should be checked by mypy if none are given on the command
+ line. Mypy *will* recursively type check any submodules of the provided
+ package. This flag is identical to :confval:`modules` apart from this
+ behavior.
+
+ This option may only be set in the global section (``[mypy]``).
+
.. confval:: exclude
:type: regular expression
@@ -258,10 +276,11 @@ section of the command line docs.
.. confval:: namespace_packages
:type: boolean
- :default: False
+ :default: True
Enables :pep:`420` style namespace packages. See the
- corresponding flag :option:`--namespace-packages ` for more information.
+ corresponding flag :option:`--no-namespace-packages `
+ for more information.
This option may only be set in the global section (``[mypy]``).
@@ -273,7 +292,7 @@ section of the command line docs.
This flag tells mypy that top-level packages will be based in either the
current directory, or a member of the ``MYPYPATH`` environment variable or
:confval:`mypy_path` config option. This option is only useful in
- conjunction with :confval:`namespace_packages`. See :ref:`Mapping file
+ the absence of `__init__.py`. See :ref:`Mapping file
paths to modules ` for details.
This option may only be set in the global section (``[mypy]``).
@@ -507,13 +526,15 @@ None and Optional handling
For more information, see the :ref:`None and Optional handling `
section of the command line docs.
-.. confval:: no_implicit_optional
+.. confval:: implicit_optional
:type: boolean
:default: False
- Changes the treatment of arguments with a default value of ``None`` by not implicitly
- making their type :py:data:`~typing.Optional`.
+ Causes mypy to treat arguments with a ``None``
+ default value as having an implicit :py:data:`~typing.Optional` type.
+
+ **Note:** This was True by default in mypy versions 0.980 and earlier.
.. confval:: strict_optional
@@ -578,14 +599,6 @@ Suppressing errors
Note: these configuration options are available in the config file only. There is
no analog available via the command line options.
-.. confval:: show_none_errors
-
- :type: boolean
- :default: True
-
- Shows errors related to strict ``None`` checking, if the global :confval:`strict_optional`
- flag is enabled.
-
.. confval:: ignore_errors
:type: boolean
@@ -726,12 +739,12 @@ These options may only be set in the global section (``[mypy]``).
Shows column numbers in error messages.
-.. confval:: show_error_codes
+.. confval:: hide_error_codes
:type: boolean
:default: False
- Shows error codes in error messages. See :ref:`error-codes` for more information.
+ Hides error codes in error messages. See :ref:`error-codes` for more information.
.. confval:: pretty
@@ -862,9 +875,16 @@ These options may only be set in the global section (``[mypy]``).
:type: string
- Specifies an alternative directory to look for stubs instead of the
- default ``typeshed`` directory. User home directory and environment
- variables will be expanded.
+ This specifies the directory where mypy looks for standard library typeshed
+ stubs, instead of the typeshed that ships with mypy. This is
+ primarily intended to make it easier to test typeshed changes before
+ submitting them upstream, but also allows you to use a forked version of
+ typeshed.
+
+ User home directory and environment variables will be expanded.
+
+ Note that this doesn't affect third-party library stubs. To test third-party stubs,
+ for example try ``MYPYPATH=stubs/six mypy ...``.
.. confval:: warn_incomplete_stub
@@ -881,6 +901,12 @@ Report generation
If these options are set, mypy will generate a report in the specified
format into the specified directory.
+.. warning::
+
+ Generating reports disables incremental mode and can significantly slow down
+ your workflow. It is recommended to enable reporting only for specific runs
+ (e.g. in CI).
+
.. confval:: any_exprs_report
:type: string
diff --git a/docs/source/duck_type_compatibility.rst b/docs/source/duck_type_compatibility.rst
index 45dcfc40688f..e801f9251db5 100644
--- a/docs/source/duck_type_compatibility.rst
+++ b/docs/source/duck_type_compatibility.rst
@@ -9,7 +9,6 @@ supported for a small set of built-in types:
* ``int`` is duck type compatible with ``float`` and ``complex``.
* ``float`` is duck type compatible with ``complex``.
* ``bytearray`` and ``memoryview`` are duck type compatible with ``bytes``.
-* In Python 2, ``str`` is duck type compatible with ``unicode``.
For example, mypy considers an ``int`` object to be valid whenever a
``float`` object is expected. Thus code like this is nice and clean
@@ -30,16 +29,3 @@ a more principled and extensible fashion. Protocols don't apply to
cases like ``int`` being compatible with ``float``, since ``float`` is not
a protocol class but a regular, concrete class, and many standard library
functions expect concrete instances of ``float`` (or ``int``).
-
-.. note::
-
- Note that in Python 2 a ``str`` object with non-ASCII characters is
- often *not valid* when a unicode string is expected. The mypy type
- system does not consider a string with non-ASCII values as a
- separate type so some programs with this kind of error will
- silently pass type checking. In Python 3 ``str`` and ``bytes`` are
- separate, unrelated types and this kind of error is easy to
- detect. This a good reason for preferring Python 3 over Python 2!
-
- See :ref:`text-and-anystr` for details on how to enforce that a
- value must be a unicode string in a cross-compatible way.
diff --git a/docs/source/dynamic_typing.rst b/docs/source/dynamic_typing.rst
index add445009666..d3476de2ca64 100644
--- a/docs/source/dynamic_typing.rst
+++ b/docs/source/dynamic_typing.rst
@@ -4,27 +4,39 @@
Dynamically typed code
======================
-As mentioned earlier, bodies of functions that don't have any explicit
-types in their function annotation are dynamically typed (operations
-are checked at runtime). Code outside functions is statically typed by
-default, and types of variables are inferred. This does usually the
-right thing, but you can also make any variable dynamically typed by
-defining it explicitly with the type ``Any``:
+In :ref:`getting-started-dynamic-vs-static`, we discussed how bodies of functions
+that don't have any explicit type annotations in their function are "dynamically typed"
+and that mypy will not check them. In this section, we'll talk a little bit more
+about what that means and how you can enable dynamic typing on a more fine grained basis.
+
+In cases where your code is too magical for mypy to understand, you can make a
+variable or parameter dynamically typed by explicitly giving it the type
+``Any``. Mypy will let you do basically anything with a value of type ``Any``,
+including assigning a value of type ``Any`` to a variable of any type (or vice
+versa).
.. code-block:: python
from typing import Any
- s = 1 # Statically typed (type int)
- d: Any = 1 # Dynamically typed (type Any)
- s = 'x' # Type check error
- d = 'x' # OK
+ num = 1 # Statically typed (inferred to be int)
+ num = 'x' # error: Incompatible types in assignment (expression has type "str", variable has type "int")
+
+ dyn: Any = 1 # Dynamically typed (type Any)
+ dyn = 'x' # OK
+
+ num = dyn # No error, mypy will let you assign a value of type Any to any variable
+ num += 1 # Oops, mypy still thinks num is an int
+
+You can think of ``Any`` as a way to locally disable type checking.
+See :ref:`silencing-type-errors` for other ways you can shut up
+the type checker.
Operations on Any values
------------------------
-You can do anything using a value with type ``Any``, and type checker
-does not complain:
+You can do anything using a value with type ``Any``, and the type checker
+will not complain:
.. code-block:: python
@@ -37,7 +49,7 @@ does not complain:
open(x).read()
return x
-Values derived from an ``Any`` value also often have the type ``Any``
+Values derived from an ``Any`` value also usually have the type ``Any``
implicitly, as mypy can't infer a more precise result type. For
example, if you get the attribute of an ``Any`` value or call a
``Any`` value the result is ``Any``:
@@ -45,12 +57,45 @@ example, if you get the attribute of an ``Any`` value or call a
.. code-block:: python
def f(x: Any) -> None:
- y = x.foo() # y has type Any
- y.bar() # Okay as well!
+ y = x.foo()
+ reveal_type(y) # Revealed type is "Any"
+ z = y.bar("mypy will let you do anything to y")
+ reveal_type(z) # Revealed type is "Any"
``Any`` types may propagate through your program, making type checking
less effective, unless you are careful.
+Function parameters without annotations are also implicitly ``Any``:
+
+.. code-block:: python
+
+ def f(x) -> None:
+ reveal_type(x) # Revealed type is "Any"
+ x.can.do["anything", x]("wants", 2)
+
+You can make mypy warn you about untyped function parameters using the
+:option:`--disallow-untyped-defs ` flag.
+
+Generic types missing type parameters will have those parameters implicitly
+treated as ``Any``:
+
+.. code-block:: python
+
+ from typing import List
+
+ def f(x: List) -> None:
+ reveal_type(x) # Revealed type is "builtins.list[Any]"
+ reveal_type(x[0]) # Revealed type is "Any"
+ x[0].anything_goes() # OK
+
+You can make mypy warn you about untyped function parameters using the
+:option:`--disallow-any-generics ` flag.
+
+Finally, another major source of ``Any`` types leaking into your program is from
+third party libraries that mypy does not know about. This is particularly the case
+when using the :option:`--ignore-missing-imports `
+flag. See :ref:`fix-missing-imports` for more information about this.
+
Any vs. object
--------------
@@ -77,9 +122,14 @@ operations:
o.foo() # Error!
o + 2 # Error!
open(o) # Error!
- n = 1 # type: int
+ n: int = 1
n = o # Error!
+
+If you're not sure whether you need to use :py:class:`object` or ``Any``, use
+:py:class:`object` -- only switch to using ``Any`` if you get a type checker
+complaint.
+
You can use different :ref:`type narrowing `
techniques to narrow :py:class:`object` to a more specific
type (subtype) such as ``int``. Type narrowing is not needed with
diff --git a/docs/source/error_code_list.rst b/docs/source/error_code_list.rst
index e655cae3c45d..efafb4d01f96 100644
--- a/docs/source/error_code_list.rst
+++ b/docs/source/error_code_list.rst
@@ -89,6 +89,23 @@ This example accidentally calls ``sort()`` instead of :py:func:`sorted`:
x = sort([3, 2, 4]) # Error: Name "sort" is not defined [name-defined]
+
+Check that a variable is not used before it's defined [used-before-def]
+-----------------------------------------------------------------------
+
+Mypy will generate an error if a name is used before it's defined.
+While the name-defined check will catch issues with names that are undefined,
+it will not flag if a variable is used and then defined later in the scope.
+used-before-def check will catch such cases.
+
+Example:
+
+.. code-block:: python
+
+ print(x) # Error: Name "x" is used before definition [used-before-def]
+ x = 123
+
+
Check arguments in calls [call-arg]
-----------------------------------
@@ -564,6 +581,54 @@ Example:
# Error: Cannot instantiate abstract class "Thing" with abstract attribute "save" [abstract]
t = Thing()
+Safe handling of abstract type object types [type-abstract]
+-----------------------------------------------------------
+
+Mypy always allows instantiating (calling) type objects typed as ``Type[t]``,
+even if it is not known that ``t`` is non-abstract, since it is a common
+pattern to create functions that act as object factories (custom constructors).
+Therefore, to prevent issues described in the above section, when an abstract
+type object is passed where ``Type[t]`` is expected, mypy will give an error.
+Example:
+
+.. code-block:: python
+
+ from abc import ABCMeta, abstractmethod
+ from typing import List, Type, TypeVar
+
+ class Config(metaclass=ABCMeta):
+ @abstractmethod
+ def get_value(self, attr: str) -> str: ...
+
+ T = TypeVar("T")
+ def make_many(typ: Type[T], n: int) -> List[T]:
+ return [typ() for _ in range(n)] # This will raise if typ is abstract
+
+ # Error: Only concrete class can be given where "Type[Config]" is expected [type-abstract]
+ make_many(Config, 5)
+
+Check that call to an abstract method via super is valid [safe-super]
+---------------------------------------------------------------------
+
+Abstract methods often don't have any default implementation, i.e. their
+bodies are just empty. Calling such methods in subclasses via ``super()``
+will cause runtime errors, so mypy prevents you from doing so:
+
+.. code-block:: python
+
+ from abc import abstractmethod
+ class Base:
+ @abstractmethod
+ def foo(self) -> int: ...
+ class Sub(Base):
+ def foo(self) -> int:
+ return super().foo() + 1 # error: Call to abstract method "foo" of "Base" with
+ # trivial body via super() is unsafe [safe-super]
+ Sub().foo() # This will crash at runtime.
+
+Mypy considers the following as trivial bodies: a ``pass`` statement, a literal
+ellipsis ``...``, a docstring, and a ``raise NotImplementedError`` statement.
+
Check the target of NewType [valid-newtype]
-------------------------------------------
@@ -679,6 +744,56 @@ implementation.
def func(value):
pass # actual implementation
+Check that coroutine return value is used [unused-coroutine]
+------------------------------------------------------------
+
+Mypy ensures that return values of async def functions are not
+ignored, as this is usually a programming error, as the coroutine
+won't be executed at the call site.
+
+.. code-block:: python
+
+ async def f() -> None:
+ ...
+
+ async def g() -> None:
+ f() # Error: missing await
+ await f() # OK
+
+You can work around this error by assigning the result to a temporary,
+otherwise unused variable:
+
+.. code-block:: python
+
+ _ = f() # No error
+
+Check types in assert_type [assert-type]
+----------------------------------------
+
+The inferred type for an expression passed to ``assert_type`` must match
+the provided type.
+
+.. code-block:: python
+
+ from typing_extensions import assert_type
+
+ assert_type([1], list[int]) # OK
+
+ assert_type([1], list[str]) # Error
+
+Check that function isn't used in boolean context [truthy-function]
+-------------------------------------------------------------------
+
+Functions will always evaluate to true in boolean contexts.
+
+.. code-block:: python
+
+ def f():
+ ...
+
+ if f: # Error: Function "Callable[[], Any]" could always be true in boolean context [truthy-function]
+ pass
+
Report syntax errors [syntax]
-----------------------------
diff --git a/docs/source/error_code_list2.rst b/docs/source/error_code_list2.rst
index c55643ad6181..f160515f0a9e 100644
--- a/docs/source/error_code_list2.rst
+++ b/docs/source/error_code_list2.rst
@@ -82,6 +82,28 @@ Example:
# Error: Redundant cast to "int" [redundant-cast]
return cast(int, x)
+Check that methods do not have redundant Self annotations [redundant-self]
+--------------------------------------------------------------------------
+
+If a method uses the ``Self`` type in the return type or the type of a
+non-self argument, there is no need to annotate the ``self`` argument
+explicitly. Such annotations are allowed by :pep:`673` but are
+redundant. If you enable this error code, mypy will generate an error if
+there is a redundant ``Self`` type.
+
+Example:
+
+.. code-block:: python
+
+ # mypy: enable-error-code="redundant-self"
+
+ from typing import Self
+
+ class C:
+ # Error: Redundant "Self" annotation for the first method argument
+ def copy(self: Self) -> Self:
+ return type(self)()
+
Check that comparisons are overlapping [comparison-overlap]
-----------------------------------------------------------
@@ -200,7 +222,7 @@ mypy generates an error if it thinks that an expression is redundant.
.. code-block:: python
- # mypy: enable-error-code redundant-expr
+ # Use "mypy --enable-error-code redundant-expr ..."
def example(x: int) -> None:
# Error: Left operand of "and" is always true [redundant-expr]
@@ -217,44 +239,52 @@ mypy generates an error if it thinks that an expression is redundant.
Check that expression is not implicitly true in boolean context [truthy-bool]
-----------------------------------------------------------------------------
-Warn when an expression whose type does not implement ``__bool__`` or ``__len__`` is used in boolean context,
-since unless implemented by a sub-type, the expression will always evaluate to true.
+Warn when the type of an expression in a boolean context does not
+implement ``__bool__`` or ``__len__``. Unless one of these is
+implemented by a subtype, the expression will always be considered
+true, and there may be a bug in the condition.
+
+As an exception, the ``object`` type is allowed in a boolean context.
+Using an iterable value in a boolean context has a separate error code
+(see below).
.. code-block:: python
- # mypy: enable-error-code truthy-bool
+ # Use "mypy --enable-error-code truthy-bool ..."
class Foo:
- pass
+ pass
foo = Foo()
# Error: "foo" has type "Foo" which does not implement __bool__ or __len__ so it could always be true in boolean context
if foo:
- ...
+ ...
+
+
+Check that iterable is not implicitly true in boolean context [truthy-iterable]
+-------------------------------------------------------------------------------
+Generate an error if a value of type ``Iterable`` is used as a boolean
+condition, since ``Iterable`` does not implement ``__len__`` or ``__bool__``.
-This check might falsely imply an error. For example, ``Iterable`` does not implement
-``__len__`` and so this code will be flagged:
+Example:
.. code-block:: python
- # mypy: enable-error-code truthy-bool
from typing import Iterable
- def transform(items: Iterable[int]) -> Iterable[int]:
- # Error: "items" has type "Iterable[int]" which does not implement __bool__ or __len__ so it could always be true in boolean context [truthy-bool]
+ def transform(items: Iterable[int]) -> list[int]:
+ # Error: "items" has type "Iterable[int]" which can always be true in boolean context. Consider using "Collection[int]" instead. [truthy-iterable]
if not items:
return [42]
return [x + 1 for x in items]
-
-
-If called as ``transform((int(s) for s in []))``, this function would not return ``[42]`` unlike what the author
-might have intended. Of course it's possible that ``transform`` is only passed ``list`` objects, and so there is
-no error in practice. In such case, it might be prudent to annotate ``items: Sequence[int]``.
-
-This is similar in concept to ensuring that an expression's type implements an expected interface (e.g. ``Sized``),
-except that attempting to invoke an undefined method (e.g. ``__len__``) results in an error,
-while attempting to evaluate an object in boolean context without a concrete implementation results in a truthy value.
+If ``transform`` is called with a ``Generator`` argument, such as
+``int(x) for x in []``, this function would not return ``[42]`` unlike
+what might be intended. Of course, it's possible that ``transform`` is
+only called with ``list`` or other container objects, and the ``if not
+items`` check is actually valid. If that is the case, it is
+recommended to annotate ``items`` as ``Collection[int]`` instead of
+``Iterable[int]``.
.. _ignore-without-code:
@@ -270,7 +300,7 @@ Example:
.. code-block:: python
- # mypy: enable-error-code ignore-without-code
+ # Use "mypy --enable-error-code ignore-without-code ..."
class Foo:
def __init__(self, name: str) -> None:
@@ -288,3 +318,32 @@ Example:
# This line warns correctly about the typo in the attribute name
# Error: "Foo" has no attribute "nme"; maybe "name"?
f.nme = 42 # type: ignore[assignment]
+
+Check that awaitable return value is used [unused-awaitable]
+------------------------------------------------------------
+
+If you use :option:`--enable-error-code unused-awaitable `,
+mypy generates an error if you don't use a returned value that defines ``__await__``.
+
+Example:
+
+.. code-block:: python
+
+ # Use "mypy --enable-error-code unused-awaitable ..."
+
+ import asyncio
+
+ async def f() -> int: ...
+
+ async def g() -> None:
+ # Error: Value of type "Task[int]" must be used
+ # Are you missing an await?
+ asyncio.create_task(f())
+
+You can assign the value to a temporary, otherwise unused to variable to
+silence the error:
+
+.. code-block:: python
+
+ async def g() -> None:
+ _ = asyncio.create_task(f()) # No error
diff --git a/docs/source/error_codes.rst b/docs/source/error_codes.rst
index 08d56c59fba2..aabedf87f73a 100644
--- a/docs/source/error_codes.rst
+++ b/docs/source/error_codes.rst
@@ -23,12 +23,12 @@ Error codes may change in future mypy releases.
Displaying error codes
----------------------
-Error codes are not displayed by default. Use :option:`--show-error-codes `
-or config `show_error_codes = True` to display error codes. Error codes are shown inside square brackets:
+Error codes are displayed by default. Use :option:`--hide-error-codes `
+or config ``hide_error_codes = True`` to hide error codes. Error codes are shown inside square brackets:
.. code-block:: text
- $ mypy --show-error-codes prog.py
+ $ mypy prog.py
prog.py:1: error: "str" has no attribute "trim" [attr-defined]
It's also possible to require error codes for ``type: ignore`` comments.
@@ -46,11 +46,8 @@ line. This can be used even if you have not configured mypy to show
error codes. Currently it's only possible to disable arbitrary error
codes on individual lines using this comment.
-.. note::
-
- There are command-line flags and config file settings for enabling
- certain optional error codes, such as :option:`--disallow-untyped-defs `,
- which enables the ``no-untyped-def`` error code.
+You can also use :option:`--disable-error-code `
+to disable specific error codes globally.
This example shows how to ignore an error about an imported name mypy
thinks is undefined:
@@ -60,3 +57,59 @@ thinks is undefined:
# 'foo' is defined in 'foolib', even though mypy can't see the
# definition.
from foolib import foo # type: ignore[attr-defined]
+
+
+Enabling specific error codes
+-----------------------------
+
+There are command-line flags and config file settings for enabling
+certain optional error codes, such as :option:`--disallow-untyped-defs `,
+which enables the ``no-untyped-def`` error code.
+
+You can use :option:`--enable-error-code ` to
+enable specific error codes that don't have a dedicated command-line
+flag or config file setting.
+
+Per-module enabling/disabling error codes
+-----------------------------------------
+
+You can use :ref:`configuration file ` sections to enable or
+disable specific error codes only in some modules. For example, this ``mypy.ini``
+config will enable non-annotated empty containers in tests, while keeping
+other parts of code checked in strict mode:
+
+.. code-block:: ini
+
+ [mypy]
+ strict = True
+
+ [mypy-tests.*]
+ allow_untyped_defs = True
+ allow_untyped_calls = True
+ disable_error_code = var-annotated, has-type
+
+Note that per-module enabling/disabling acts as override over the global
+options. So that you don't need to repeat the error code lists for each
+module if you have them in global config section. For example:
+
+.. code-block:: ini
+
+ [mypy]
+ enable_error_code = truthy-bool, ignore-without-code, unused-awaitable
+
+ [mypy-extensions.*]
+ disable_error_code = unused-awaitable
+
+The above config will allow unused awaitables in extension modules, but will
+still keep the other two error codes enabled. The overall logic is following:
+
+* Command line and/or config main section set global error codes
+
+* Individual config sections *adjust* them per glob/module
+
+* Inline ``# mypy: ...`` comments can further *adjust* them for a specific
+ module
+
+So one can e.g. enable some code globally, disable it for all tests in
+the corresponding config section, and then re-enable it with an inline
+comment in some specific test.
diff --git a/docs/source/existing_code.rst b/docs/source/existing_code.rst
index 66259e5e94c7..410d7af0c350 100644
--- a/docs/source/existing_code.rst
+++ b/docs/source/existing_code.rst
@@ -7,38 +7,78 @@ This section explains how to get started using mypy with an existing,
significant codebase that has little or no type annotations. If you are
a beginner, you can skip this section.
-These steps will get you started with mypy on an existing codebase:
+Start small
+-----------
-1. Start small -- get a clean mypy build for some files, with few
- annotations
+If your codebase is large, pick a subset of your codebase (say, 5,000 to 50,000
+lines) and get mypy to run successfully only on this subset at first, *before
+adding annotations*. This should be doable in a day or two. The sooner you get
+some form of mypy passing on your codebase, the sooner you benefit.
-2. Write a mypy runner script to ensure consistent results
+You'll likely need to fix some mypy errors, either by inserting
+annotations requested by mypy or by adding ``# type: ignore``
+comments to silence errors you don't want to fix now.
-3. Run mypy in Continuous Integration to prevent type errors
+We'll mention some tips for getting mypy passing on your codebase in various
+sections below.
-4. Gradually annotate commonly imported modules
+Run mypy consistently and prevent regressions
+---------------------------------------------
-5. Write annotations as you modify existing code and write new code
+Make sure all developers on your codebase run mypy the same way.
+One way to ensure this is adding a small script with your mypy
+invocation to your codebase, or adding your mypy invocation to
+existing tools you use to run tests, like ``tox``.
-6. Use :doc:`monkeytype:index` or `PyAnnotate`_ to automatically annotate legacy code
+* Make sure everyone runs mypy with the same options. Checking a mypy
+ :ref:`configuration file ` into your codebase can help
+ with this.
-We discuss all of these points in some detail below, and a few optional
-follow-up steps.
+* Make sure everyone type checks the same set of files. See
+ :ref:`specifying-code-to-be-checked` for details.
-Start small
------------
+* Make sure everyone runs mypy with the same version of mypy, for instance
+ by pinning mypy with the rest of your dev requirements.
-If your codebase is large, pick a subset of your codebase (say, 5,000
-to 50,000 lines) and run mypy only on this subset at first,
-*without any annotations*. This shouldn't take more than a day or two
-to implement, so you start enjoying benefits soon.
+In particular, you'll want to make sure to run mypy as part of your
+Continuous Integration (CI) system as soon as possible. This will
+prevent new type errors from being introduced into your codebase.
-You'll likely need to fix some mypy errors, either by inserting
-annotations requested by mypy or by adding ``# type: ignore``
-comments to silence errors you don't want to fix now.
+A simple CI script could look something like this:
+
+.. code-block:: text
+
+ python3 -m pip install mypy==0.971
+ # Run your standardised mypy invocation, e.g.
+ mypy my_project
+ # This could also look like `scripts/run_mypy.sh`, `tox run -e mypy`, `make mypy`, etc
+
+Ignoring errors from certain modules
+------------------------------------
-In particular, mypy often generates errors about modules that it can't
-find or that don't have stub files:
+By default mypy will follow imports in your code and try to check everything.
+This means even if you only pass in a few files to mypy, it may still process a
+large number of imported files. This could potentially result in lots of errors
+you don't want to deal with at the moment.
+
+One way to deal with this is to ignore errors in modules you aren't yet ready to
+type check. The :confval:`ignore_errors` option is useful for this, for instance,
+if you aren't yet ready to deal with errors from ``package_to_fix_later``:
+
+.. code-block:: text
+
+ [mypy-package_to_fix_later.*]
+ ignore_errors = True
+
+You could even invert this, by setting ``ignore_errors = True`` in your global
+config section and only enabling error reporting with ``ignore_errors = False``
+for the set of modules you are ready to type check.
+
+Fixing errors related to imports
+--------------------------------
+
+A common class of error you will encounter is errors from mypy about modules
+that it can't find, that don't have types, or don't have stub files:
.. code-block:: text
@@ -46,7 +86,15 @@ find or that don't have stub files:
core/model.py:9: error: Cannot find implementation or library stub for module named 'acme'
...
-This is normal, and you can easily ignore these errors. For example,
+Sometimes these can be fixed by installing the relevant packages or
+stub libraries in the environment you're running ``mypy`` in.
+
+See :ref:`ignore-missing-imports` for a complete reference on these errors
+and the ways in which you can fix them.
+
+You'll likely find that you want to suppress all errors from importing
+a given module that doesn't have types. If you only import that module
+in one or two places, you can use ``# type: ignore`` comments. For example,
here we ignore an error about a third-party module ``frobnicate`` that
doesn't have stubs using ``# type: ignore``:
@@ -56,9 +104,9 @@ doesn't have stubs using ``# type: ignore``:
...
frobnicate.initialize() # OK (but not checked)
-You can also use a mypy configuration file, which is convenient if
-there are a large number of errors to ignore. For example, to disable
-errors about importing ``frobnicate`` and ``acme`` everywhere in your
+But if you import the module in many places, this becomes unwieldy. In this
+case, we recommend using a :ref:`configuration file `. For example,
+to disable errors about importing ``frobnicate`` and ``acme`` everywhere in your
codebase, use a config like this:
.. code-block:: text
@@ -69,69 +117,33 @@ codebase, use a config like this:
[mypy-acme.*]
ignore_missing_imports = True
-You can add multiple sections for different modules that should be
-ignored.
-
-If your config file is named ``mypy.ini``, this is how you run mypy:
-
-.. code-block:: text
-
- mypy --config-file mypy.ini mycode/
-
If you get a large number of errors, you may want to ignore all errors
-about missing imports. This can easily cause problems later on and
-hide real errors, and it's only recommended as a last resort.
-For more details, look :ref:`here `.
-
-Mypy follows imports by default. This can result in a few files passed
-on the command line causing mypy to process a large number of imported
-files, resulting in lots of errors you don't want to deal with at the
-moment. There is a config file option to disable this behavior, but
-since this can hide errors, it's not recommended for most users.
-
-Mypy runner script
-------------------
-
-Introduce a mypy runner script that runs mypy, so that every developer
-will use mypy consistently. Here are some things you may want to do in
-the script:
-
-* Ensure that the correct version of mypy is installed.
-
-* Specify mypy config file or command-line options.
-
-* Provide set of files to type check. You may want to implement
- inclusion and exclusion filters for full control of the file
- list.
-
-Continuous Integration
-----------------------
-
-Once you have a clean mypy run and a runner script for a part
-of your codebase, set up your Continuous Integration (CI) system to
-run mypy to ensure that developers won't introduce bad annotations.
-A simple CI script could look something like this:
+about missing imports, for instance by setting :confval:`ignore_missing_imports`
+to true globally. This can hide errors later on, so we recommend avoiding this
+if possible.
-.. code-block:: text
+Finally, mypy allows fine-grained control over specific import following
+behaviour. It's very easy to silently shoot yourself in the foot when playing
+around with these, so it's mostly recommended as a last resort. For more
+details, look :ref:`here `.
- python3 -m pip install mypy==0.790 # Pinned version avoids surprises
- scripts/mypy # Run the mypy runner script you set up
-
-Annotate widely imported modules
---------------------------------
+Prioritise annotating widely imported modules
+---------------------------------------------
Most projects have some widely imported modules, such as utilities or
model classes. It's a good idea to annotate these pretty early on,
since this allows code using these modules to be type checked more
-effectively. Since mypy supports gradual typing, it's okay to leave
-some of these modules unannotated. The more you annotate, the more
-useful mypy will be, but even a little annotation coverage is useful.
+effectively.
+
+Mypy is designed to support gradual typing, i.e. letting you add annotations at
+your own pace, so it's okay to leave some of these modules unannotated. The more
+you annotate, the more useful mypy will be, but even a little annotation
+coverage is useful.
Write annotations as you go
---------------------------
-Now you are ready to include type annotations in your development
-workflows. Consider adding something like these in your code style
+Consider adding something like these in your code style
conventions:
1. Developers should add annotations for any new code.
@@ -143,9 +155,9 @@ codebase without much effort.
Automate annotation of legacy code
----------------------------------
-There are tools for automatically adding draft annotations
-based on type profiles collected at runtime. Tools include
-:doc:`monkeytype:index` (Python 3) and `PyAnnotate`_.
+There are tools for automatically adding draft annotations based on simple
+static analysis or on type profiles collected at runtime. Tools include
+:doc:`monkeytype:index`, `autotyping`_ and `PyAnnotate`_.
A simple approach is to collect types from test runs. This may work
well if your test coverage is good (and if your tests aren't very
@@ -156,14 +168,7 @@ fraction of production network requests. This clearly requires more
care, as type collection could impact the reliability or the
performance of your service.
-Speed up mypy runs
-------------------
-
-You can use :ref:`mypy daemon ` to get much faster
-incremental mypy runs. The larger your project is, the more useful
-this will be. If your project has at least 100,000 lines of code or
-so, you may also want to set up :ref:`remote caching `
-for further speedups.
+.. _getting-to-strict:
Introduce stricter options
--------------------------
@@ -172,7 +177,69 @@ Mypy is very configurable. Once you get started with static typing, you may want
to explore the various strictness options mypy provides to catch more bugs. For
example, you can ask mypy to require annotations for all functions in certain
modules to avoid accidentally introducing code that won't be type checked using
-:confval:`disallow_untyped_defs`, or type check code without annotations as well
-with :confval:`check_untyped_defs`. Refer to :ref:`config-file` for the details.
+:confval:`disallow_untyped_defs`. Refer to :ref:`config-file` for the details.
+
+An excellent goal to aim for is to have your codebase pass when run against ``mypy --strict``.
+This basically ensures that you will never have a type related error without an explicit
+circumvention somewhere (such as a ``# type: ignore`` comment).
+
+The following config is equivalent to ``--strict`` (as of mypy 0.990):
+
+.. code-block:: text
+
+ # Start off with these
+ warn_unused_configs = True
+ warn_redundant_casts = True
+ warn_unused_ignores = True
+ no_implicit_optional = True
+
+ # Getting these passing should be easy
+ strict_equality = True
+ strict_concatenate = True
+
+ # Strongly recommend enabling this one as soon as you can
+ check_untyped_defs = True
+
+ # These shouldn't be too much additional work, but may be tricky to
+ # get passing if you use a lot of untyped libraries
+ disallow_subclassing_any = True
+ disallow_untyped_decorators = True
+ disallow_any_generics = True
+
+ # These next few are various gradations of forcing use of type annotations
+ disallow_untyped_calls = True
+ disallow_incomplete_defs = True
+ disallow_untyped_defs = True
+
+ # This one isn't too hard to get passing, but return on investment is lower
+ no_implicit_reexport = True
+
+ # This one can be tricky to get passing if you use a lot of untyped libraries
+ warn_return_any = True
+
+Note that you can also start with ``--strict`` and subtract, for instance:
+
+.. code-block:: text
+
+ strict = True
+ warn_return_any = False
+
+Remember that many of these options can be enabled on a per-module basis. For instance,
+you may want to enable ``disallow_untyped_defs`` for modules which you've completed
+annotations for, in order to prevent new code from being added without annotations.
+
+And if you want, it doesn't stop at ``--strict``. Mypy has additional checks
+that are not part of ``--strict`` that can be useful. See the complete
+:ref:`command-line` reference and :ref:`error-codes-optional`.
+
+Speed up mypy runs
+------------------
+
+You can use :ref:`mypy daemon ` to get much faster
+incremental mypy runs. The larger your project is, the more useful
+this will be. If your project has at least 100,000 lines of code or
+so, you may also want to set up :ref:`remote caching `
+for further speedups.
.. _PyAnnotate: https://github.com/dropbox/pyannotate
+.. _autotyping: https://github.com/JelleZijlstra/autotyping
diff --git a/docs/source/extending_mypy.rst b/docs/source/extending_mypy.rst
index 5c59bef506cc..daf863616334 100644
--- a/docs/source/extending_mypy.rst
+++ b/docs/source/extending_mypy.rst
@@ -155,23 +155,9 @@ When analyzing this code, mypy will call ``get_type_analyze_hook("lib.Vector")``
so the plugin can return some valid type for each variable.
**get_function_hook()** is used to adjust the return type of a function call.
-This is a good choice if the return type of some function depends on *values*
-of some arguments that can't be expressed using literal types (for example
-a function may return an ``int`` for positive arguments and a ``float`` for
-negative arguments). This hook will be also called for instantiation of classes.
-For example:
-
-.. code-block:: python
-
- from contextlib import contextmanager
- from typing import TypeVar, Callable
-
- T = TypeVar('T')
-
- @contextmanager # built-in plugin can infer a precise type here
- def stopwatch(timer: Callable[[], T]) -> Iterator[T]:
- ...
- yield timer()
+This hook will be also called for instantiation of classes.
+This is a good choice if the return type is too complex
+to be expressed by regular python typing.
**get_function_signature_hook** is used to adjust the signature of a function.
@@ -198,6 +184,10 @@ fields which already exist on the class. *Exception:* if :py:meth:`__getattr__ <
:py:meth:`__getattribute__ ` is a method on the class, the hook is called for all
fields which do not refer to methods.
+**get_class_attribute_hook()** is similar to above, but for attributes on classes rather than instances.
+Unlike above, this does not have special casing for :py:meth:`__getattr__ ` or
+:py:meth:`__getattribute__ `.
+
**get_class_decorator_hook()** can be used to update class definition for
given class decorators. For example, you can add some attributes to the class
to match runtime behaviour:
@@ -247,31 +237,3 @@ mypy's cache for that module so that it can be rechecked. This hook
should be used to report to mypy any relevant configuration data,
so that mypy knows to recheck the module if the configuration changes.
The hooks should return data encodable as JSON.
-
-Notes about the semantic analyzer
-*********************************
-
-Mypy 0.710 introduced a new semantic analyzer, and the old semantic
-analyzer was removed in mypy 0.730. Support for the new semantic analyzer
-required some changes to existing plugins. Here is a short summary of the
-most important changes:
-
-* The order of processing AST nodes is different. Code outside
- functions is processed first, and functions and methods are
- processed afterwards.
-
-* Each AST node can be processed multiple times to resolve forward
- references. The same plugin hook may be called multiple times, so
- they need to be idempotent.
-
-* The ``anal_type()`` API method returns ``None`` if some part of
- the type is not available yet due to forward references, for example.
-
-* When looking up symbols, you may encounter *placeholder nodes* that
- are used for names that haven't been fully processed yet. You'll
- generally want to request another semantic analysis iteration by
- *deferring* in that case.
-
-See the docstring at the top of
-`mypy/plugin.py `_
-for more details.
diff --git a/docs/source/faq.rst b/docs/source/faq.rst
index 2a79498dd792..d97929c2cfa6 100644
--- a/docs/source/faq.rst
+++ b/docs/source/faq.rst
@@ -85,14 +85,6 @@ could be other tools that can compile statically typed mypy code to C
modules or to efficient JVM bytecode, for example, but this is outside
the scope of the mypy project.
-How do I type check my Python 2 code?
-*************************************
-
-You can use a :pep:`comment-based function annotation syntax
-<484#suggested-syntax-for-python-2-7-and-straddling-code>`
-and use the :option:`--py2 ` command-line option to type check your Python 2 code.
-You'll also need to install ``typing`` for Python 2 via ``pip install typing``.
-
Is mypy free?
*************
@@ -197,12 +189,12 @@ the following aspects, among others:
defined in terms of translating them to C or C++. Mypy just uses
Python semantics, and mypy does not deal with accessing C library
functionality.
-
+
Does it run on PyPy?
*********************
Somewhat. With PyPy 3.8, mypy is at least able to type check itself.
-With older versions of PyPy, mypy relies on `typed-ast
+With older versions of PyPy, mypy relies on `typed-ast
`_, which uses several APIs that
PyPy does not support (including some internal CPython APIs).
diff --git a/docs/source/final_attrs.rst b/docs/source/final_attrs.rst
index e5d209644fce..297b97eca787 100644
--- a/docs/source/final_attrs.rst
+++ b/docs/source/final_attrs.rst
@@ -17,7 +17,7 @@ There is no runtime enforcement by the Python runtime.
The examples in this page import ``Final`` and ``final`` from the
``typing`` module. These types were added to ``typing`` in Python 3.8,
- but are also available for use in Python 2.7 and 3.4 - 3.7 via the
+ but are also available for use in Python 3.4 - 3.7 via the
``typing_extensions`` package.
Final names
@@ -33,7 +33,7 @@ further assignments to final names in type-checked code:
from typing import Final
- RATE: Final = 3000
+ RATE: Final = 3_000
class Base:
DEFAULT_ID: Final = 0
diff --git a/docs/source/generics.rst b/docs/source/generics.rst
index 7730bd0e5c10..9ac79f90121d 100644
--- a/docs/source/generics.rst
+++ b/docs/source/generics.rst
@@ -50,17 +50,9 @@ Using ``Stack`` is similar to built-in container types:
stack = Stack[int]()
stack.push(2)
stack.pop()
- stack.push('x') # Type error
+ stack.push('x') # error: Argument 1 to "push" of "Stack" has incompatible type "str"; expected "int"
-Type inference works for user-defined generic types as well:
-
-.. code-block:: python
-
- def process(stack: Stack[int]) -> None: ...
-
- process(Stack()) # Argument has inferred type Stack[int]
-
-Construction of instances of generic types is also type checked:
+Construction of instances of generic types is type checked:
.. code-block:: python
@@ -68,77 +60,17 @@ Construction of instances of generic types is also type checked:
def __init__(self, content: T) -> None:
self.content = content
- Box(1) # OK, inferred type is Box[int]
+ Box(1) # OK, inferred type is Box[int]
Box[int](1) # Also OK
- s = 'some string'
- Box[int](s) # Type error
-
-Generic class internals
-***********************
-
-You may wonder what happens at runtime when you index
-``Stack``. Indexing ``Stack`` returns a *generic alias*
-to ``Stack`` that returns instances of the original class on
-instantiation:
-
-.. code-block:: python
-
- >>> print(Stack)
- __main__.Stack
- >>> print(Stack[int])
- __main__.Stack[int]
- >>> print(Stack[int]().__class__)
- __main__.Stack
-
-Generic aliases can be instantiated or subclassed, similar to real
-classes, but the above examples illustrate that type variables are
-erased at runtime. Generic ``Stack`` instances are just ordinary
-Python objects, and they have no extra runtime overhead or magic due
-to being generic, other than a metaclass that overloads the indexing
-operator.
-
-Note that in Python 3.8 and lower, the built-in types
-:py:class:`list`, :py:class:`dict` and others do not support indexing.
-This is why we have the aliases :py:class:`~typing.List`,
-:py:class:`~typing.Dict` and so on in the :py:mod:`typing`
-module. Indexing these aliases gives you a generic alias that
-resembles generic aliases constructed by directly indexing the target
-class in more recent versions of Python:
-
-.. code-block:: python
-
- >>> # Only relevant for Python 3.8 and below
- >>> # For Python 3.9 onwards, prefer `list[int]` syntax
- >>> from typing import List
- >>> List[int]
- typing.List[int]
-
-Note that the generic aliases in ``typing`` don't support constructing
-instances:
-
-.. code-block:: python
-
- >>> from typing import List
- >>> List[int]()
- Traceback (most recent call last):
- ...
- TypeError: Type List cannot be instantiated; use list() instead
-
-.. note::
-
- In Python 3.6 indexing generic types or type aliases results in actual
- type objects. This means that generic types in type annotations can
- have a significant runtime cost. This was changed in Python 3.7, and
- indexing generic types became a cheap operation.
+ Box[int]('some string') # error: Argument 1 to "Box" has incompatible type "str"; expected "int"
.. _generic-subclasses:
-Defining sub-classes of generic classes
-***************************************
+Defining subclasses of generic classes
+**************************************
User-defined generic classes and generic classes defined in :py:mod:`typing`
-can be used as base classes for another classes, both generic and
-non-generic. For example:
+can be used as a base class for another class (generic or non-generic). For example:
.. code-block:: python
@@ -147,29 +79,29 @@ non-generic. For example:
KT = TypeVar('KT')
VT = TypeVar('VT')
- class MyMap(Mapping[KT, VT]): # This is a generic subclass of Mapping
- def __getitem__(self, k: KT) -> VT:
- ... # Implementations omitted
- def __iter__(self) -> Iterator[KT]:
- ...
- def __len__(self) -> int:
- ...
+ # This is a generic subclass of Mapping
+ class MyMap(Mapping[KT, VT]):
+ def __getitem__(self, k: KT) -> VT: ...
+ def __iter__(self) -> Iterator[KT]: ...
+ def __len__(self) -> int: ...
- items: MyMap[str, int] # Okay
+ items: MyMap[str, int] # OK
- class StrDict(dict[str, str]): # This is a non-generic subclass of dict
+ # This is a non-generic subclass of dict
+ class StrDict(dict[str, str]):
def __str__(self) -> str:
- return 'StrDict({})'.format(super().__str__())
+ return f'StrDict({super().__str__()})'
+
data: StrDict[int, int] # Error! StrDict is not generic
data2: StrDict # OK
+ # This is a user-defined generic class
class Receiver(Generic[T]):
- def accept(self, value: T) -> None:
- ...
+ def accept(self, value: T) -> None: ...
- class AdvancedReceiver(Receiver[T]):
- ...
+ # This is a generic subclass of Receiver
+ class AdvancedReceiver(Receiver[T]): ...
.. note::
@@ -215,15 +147,16 @@ For example:
Generic functions
*****************
-Generic type variables can also be used to define generic functions:
+Type variables can be used to define generic functions:
.. code-block:: python
from typing import TypeVar, Sequence
- T = TypeVar('T') # Declare type variable
+ T = TypeVar('T')
- def first(seq: Sequence[T]) -> T: # Generic function
+ # A generic function!
+ def first(seq: Sequence[T]) -> T:
return seq[0]
As with generic classes, the type variable can be replaced with any
@@ -232,10 +165,8 @@ return type is derived from the sequence item type. For example:
.. code-block:: python
- # Assume first defined as above.
-
- s = first('foo') # s has type str.
- n = first([1, 2, 3]) # n has type int.
+ reveal_type(first([1, 2, 3])) # Revealed type is "builtins.int"
+ reveal_type(first(['a', 'b'])) # Revealed type is "builtins.str"
Note also that a single definition of a type variable (such as ``T``
above) can be used in multiple generic functions or classes. In this
@@ -262,17 +193,11 @@ Generic methods and generic self
********************************
You can also define generic methods — just use a type variable in the
-method signature that is different from class type variables. In particular,
-``self`` may also be generic, allowing a method to return the most precise
-type known at the point of access.
-
-.. note::
-
- This feature is experimental. Checking code with type annotations for self
- arguments is still not fully implemented. Mypy may disallow valid code or
- allow unsafe code.
-
-In this way, for example, you can typecheck chaining of setter methods:
+method signature that is different from class type variables. In
+particular, the ``self`` argument may also be generic, allowing a
+method to return the most precise type known at the point of access.
+In this way, for example, you can type check a chain of setter
+methods:
.. code-block:: python
@@ -295,10 +220,12 @@ In this way, for example, you can typecheck chaining of setter methods:
self.width = w
return self
- circle = Circle().set_scale(0.5).set_radius(2.7) # type: Circle
- square = Square().set_scale(0.5).set_width(3.2) # type: Square
+ circle: Circle = Circle().set_scale(0.5).set_radius(2.7)
+ square: Square = Square().set_scale(0.5).set_width(3.2)
-Without using generic ``self``, the last two lines could not be type-checked properly.
+Without using generic ``self``, the last two lines could not be type
+checked properly, since the return type of ``set_scale`` would be
+``Shape``, which doesn't define ``set_radius`` or ``set_width``.
Other uses are factory methods, such as copy and deserialization.
For class methods, you can also define generic ``cls``, using :py:class:`Type[T] `:
@@ -310,7 +237,7 @@ For class methods, you can also define generic ``cls``, using :py:class:`Type[T]
T = TypeVar('T', bound='Friend')
class Friend:
- other = None # type: Friend
+ other: "Friend" = None
@classmethod
def make_pair(cls: Type[T]) -> tuple[T, T]:
@@ -331,9 +258,74 @@ In the latter case, you must implement this method in all future subclasses.
Note also that mypy cannot always verify that the implementation of a copy
or a deserialization method returns the actual type of self. Therefore
you may need to silence mypy inside these methods (but not at the call site),
-possibly by making use of the ``Any`` type.
+possibly by making use of the ``Any`` type or a ``# type: ignore`` comment.
+
+Note that mypy lets you use generic self types in certain unsafe ways
+in order to support common idioms. For example, using a generic
+self type in an argument type is accepted even though it's unsafe:
+
+.. code-block:: python
+
+ from typing import TypeVar
-For some advanced uses of self-types see :ref:`additional examples `.
+ T = TypeVar("T")
+
+ class Base:
+ def compare(self: T, other: T) -> bool:
+ return False
+
+ class Sub(Base):
+ def __init__(self, x: int) -> None:
+ self.x = x
+
+ # This is unsafe (see below) but allowed because it's
+ # a common pattern and rarely causes issues in practice.
+ def compare(self, other: Sub) -> bool:
+ return self.x > other.x
+
+ b: Base = Sub(42)
+ b.compare(Base()) # Runtime error here: 'Base' object has no attribute 'x'
+
+For some advanced uses of self types, see :ref:`additional examples `.
+
+Automatic self types using typing.Self
+**************************************
+
+Since the patterns described above are quite common, mypy supports a
+simpler syntax, introduced in :pep:`673`, to make them easier to use.
+Instead of defining a type variable and using an explicit annotation
+for ``self``, you can import the special type ``typing.Self`` that is
+automatically transformed into a type variable with the current class
+as the upper bound, and you don't need an annotation for ``self`` (or
+``cls`` in class methods). The example from the previous section can
+be made simpler by using ``Self``:
+
+.. code-block:: python
+
+ from typing import Self
+
+ class Friend:
+ other: Self | None = None
+
+ @classmethod
+ def make_pair(cls) -> tuple[Self, Self]:
+ a, b = cls(), cls()
+ a.other = b
+ b.other = a
+ return a, b
+
+ class SuperFriend(Friend):
+ pass
+
+ a, b = SuperFriend.make_pair()
+
+This is more compact than using explicit type variables. Also, you can
+use ``Self`` in attribute annotations in addition to methods.
+
+.. note::
+
+ To use this feature on Python versions earlier than 3.11, you will need to
+ import ``Self`` from ``typing_extensions`` (version 4.0 or newer).
.. _variance-of-generics:
@@ -345,51 +337,84 @@ relations between them: invariant, covariant, and contravariant.
Assuming that we have a pair of types ``A`` and ``B``, and ``B`` is
a subtype of ``A``, these are defined as follows:
-* A generic class ``MyCovGen[T, ...]`` is called covariant in type variable
- ``T`` if ``MyCovGen[B, ...]`` is always a subtype of ``MyCovGen[A, ...]``.
-* A generic class ``MyContraGen[T, ...]`` is called contravariant in type
- variable ``T`` if ``MyContraGen[A, ...]`` is always a subtype of
- ``MyContraGen[B, ...]``.
-* A generic class ``MyInvGen[T, ...]`` is called invariant in ``T`` if neither
+* A generic class ``MyCovGen[T]`` is called covariant in type variable
+ ``T`` if ``MyCovGen[B]`` is always a subtype of ``MyCovGen[A]``.
+* A generic class ``MyContraGen[T]`` is called contravariant in type
+ variable ``T`` if ``MyContraGen[A]`` is always a subtype of
+ ``MyContraGen[B]``.
+* A generic class ``MyInvGen[T]`` is called invariant in ``T`` if neither
of the above is true.
Let us illustrate this by few simple examples:
-* :py:data:`~typing.Union` is covariant in all variables: ``Union[Cat, int]`` is a subtype
- of ``Union[Animal, int]``,
- ``Union[Dog, int]`` is also a subtype of ``Union[Animal, int]``, etc.
- Most immutable containers such as :py:class:`~typing.Sequence` and :py:class:`~typing.FrozenSet` are also
- covariant.
-* :py:data:`~typing.Callable` is an example of type that behaves contravariant in types of
- arguments, namely ``Callable[[Employee], int]`` is a subtype of
- ``Callable[[Manager], int]``. To understand this, consider a function:
+.. code-block:: python
+
+ # We'll use these classes in the examples below
+ class Shape: ...
+ class Triangle(Shape): ...
+ class Square(Shape): ...
+
+* Most immutable containers, such as :py:class:`~typing.Sequence` and
+ :py:class:`~typing.FrozenSet` are covariant. :py:data:`~typing.Union` is
+ also covariant in all variables: ``Union[Triangle, int]`` is
+ a subtype of ``Union[Shape, int]``.
.. code-block:: python
- def salaries(staff: list[Manager],
- accountant: Callable[[Manager], int]) -> list[int]: ...
+ def count_lines(shapes: Sequence[Shape]) -> int:
+ return sum(shape.num_sides for shape in shapes)
- This function needs a callable that can calculate a salary for managers, and
- if we give it a callable that can calculate a salary for an arbitrary
- employee, it's still safe.
-* :py:class:`~typing.List` is an invariant generic type. Naively, one would think
- that it is covariant, but let us consider this code:
+ triangles: Sequence[Triangle]
+ count_lines(triangles) # OK
+
+ def foo(triangle: Triangle, num: int):
+ shape_or_number: Union[Shape, int]
+ # a Triangle is a Shape, and a Shape is a valid Union[Shape, int]
+ shape_or_number = triangle
+
+ Covariance should feel relatively intuitive, but contravariance and invariance
+ can be harder to reason about.
+
+* :py:data:`~typing.Callable` is an example of type that behaves contravariant
+ in types of arguments. That is, ``Callable[[Shape], int]`` is a subtype of
+ ``Callable[[Triangle], int]``, despite ``Shape`` being a supertype of
+ ``Triangle``. To understand this, consider:
.. code-block:: python
- class Shape:
- pass
+ def cost_of_paint_required(
+ triangle: Triangle,
+ area_calculator: Callable[[Triangle], float]
+ ) -> float:
+ return area_calculator(triangle) * DOLLAR_PER_SQ_FT
+
+ # This straightforwardly works
+ def area_of_triangle(triangle: Triangle) -> float: ...
+ cost_of_paint_required(triangle, area_of_triangle) # OK
+
+ # But this works as well!
+ def area_of_any_shape(shape: Shape) -> float: ...
+ cost_of_paint_required(triangle, area_of_any_shape) # OK
+
+ ``cost_of_paint_required`` needs a callable that can calculate the area of a
+ triangle. If we give it a callable that can calculate the area of an
+ arbitrary shape (not just triangles), everything still works.
+
+* :py:class:`~typing.List` is an invariant generic type. Naively, one would think
+ that it is covariant, like :py:class:`~typing.Sequence` above, but consider this code:
+
+ .. code-block:: python
class Circle(Shape):
- def rotate(self):
- ...
+ # The rotate method is only defined on Circle, not on Shape
+ def rotate(self): ...
def add_one(things: list[Shape]) -> None:
things.append(Shape())
- my_things: list[Circle] = []
- add_one(my_things) # This may appear safe, but...
- my_things[0].rotate() # ...this will fail
+ my_circles: list[Circle] = []
+ add_one(my_circles) # This may appear safe, but...
+ my_circles[-1].rotate() # ...this will fail, since my_circles[0] is now a Shape, not a Circle
Another example of invariant type is :py:class:`~typing.Dict`. Most mutable containers
are invariant.
@@ -417,6 +442,45 @@ type variables defined with special keyword arguments ``covariant`` or
my_box = Box(Cat())
look_into(my_box) # OK, but mypy would complain here for an invariant type
+.. _type-variable-upper-bound:
+
+Type variables with upper bounds
+********************************
+
+A type variable can also be restricted to having values that are
+subtypes of a specific type. This type is called the upper bound of
+the type variable, and is specified with the ``bound=...`` keyword
+argument to :py:class:`~typing.TypeVar`.
+
+.. code-block:: python
+
+ from typing import TypeVar, SupportsAbs
+
+ T = TypeVar('T', bound=SupportsAbs[float])
+
+In the definition of a generic function that uses such a type variable
+``T``, the type represented by ``T`` is assumed to be a subtype of
+its upper bound, so the function can use methods of the upper bound on
+values of type ``T``.
+
+.. code-block:: python
+
+ def largest_in_absolute_value(*xs: T) -> T:
+ return max(xs, key=abs) # Okay, because T is a subtype of SupportsAbs[float].
+
+In a call to such a function, the type ``T`` must be replaced by a
+type that is a subtype of its upper bound. Continuing the example
+above:
+
+.. code-block:: python
+
+ largest_in_absolute_value(-3.5, 2) # Okay, has type float.
+ largest_in_absolute_value(5+6j, 7) # Okay, has type complex.
+ largest_in_absolute_value('a', 'b') # Error: 'str' is not a subtype of SupportsAbs[float].
+
+Type parameters of generic classes may also have upper bounds, which
+restrict the valid values for the type parameter in the same way.
+
.. _type-variable-value-restriction:
Type variables with value restriction
@@ -451,7 +515,7 @@ argument types:
concat(b'a', b'b') # Okay
concat(1, 2) # Error!
-Note that this is different from a union type, since combinations
+Importantly, this is different from a union type, since combinations
of ``str`` and ``bytes`` are not accepted:
.. code-block:: python
@@ -459,8 +523,8 @@ of ``str`` and ``bytes`` are not accepted:
concat('string', b'bytes') # Error!
In this case, this is exactly what we want, since it's not possible
-to concatenate a string and a bytes object! The type checker
-will reject this function:
+to concatenate a string and a bytes object! If we tried to use
+``Union``, the type checker would complain about this possibility:
.. code-block:: python
@@ -475,10 +539,13 @@ subtype of ``str``:
class S(str): pass
ss = concat(S('foo'), S('bar'))
+ reveal_type(ss) # Revealed type is "builtins.str"
You may expect that the type of ``ss`` is ``S``, but the type is
actually ``str``: a subtype gets promoted to one of the valid values
-for the type variable, which in this case is ``str``. This is thus
+for the type variable, which in this case is ``str``.
+
+This is thus
subtly different from *bounded quantification* in languages such as
Java, where the return type would be ``S``. The way mypy implements
this is correct for ``concat``, since ``concat`` actually returns a
@@ -494,62 +561,57 @@ values when defining a generic class. For example, mypy uses the type
:py:class:`Pattern[AnyStr] ` for the return value of :py:func:`re.compile`,
since regular expressions can be based on a string or a bytes pattern.
-.. _type-variable-upper-bound:
-
-Type variables with upper bounds
-********************************
-
-A type variable can also be restricted to having values that are
-subtypes of a specific type. This type is called the upper bound of
-the type variable, and is specified with the ``bound=...`` keyword
-argument to :py:class:`~typing.TypeVar`.
+A type variable may not have both a value restriction (see
+:ref:`type-variable-upper-bound`) and an upper bound.
-.. code-block:: python
+.. _declaring-decorators:
- from typing import TypeVar, SupportsAbs
+Declaring decorators
+********************
- T = TypeVar('T', bound=SupportsAbs[float])
+Decorators are typically functions that take a function as an argument and
+return another function. Describing this behaviour in terms of types can
+be a little tricky; we'll show how you can use ``TypeVar`` and a special
+kind of type variable called a *parameter specification* to do so.
-In the definition of a generic function that uses such a type variable
-``T``, the type represented by ``T`` is assumed to be a subtype of
-its upper bound, so the function can use methods of the upper bound on
-values of type ``T``.
+Suppose we have the following decorator, not type annotated yet,
+that preserves the original function's signature and merely prints the decorated function's name:
.. code-block:: python
- def largest_in_absolute_value(*xs: T) -> T:
- return max(xs, key=abs) # Okay, because T is a subtype of SupportsAbs[float].
+ def printing_decorator(func):
+ def wrapper(*args, **kwds):
+ print("Calling", func)
+ return func(*args, **kwds)
+ return wrapper
-In a call to such a function, the type ``T`` must be replaced by a
-type that is a subtype of its upper bound. Continuing the example
-above,
+and we use it to decorate function ``add_forty_two``:
.. code-block:: python
- largest_in_absolute_value(-3.5, 2) # Okay, has type float.
- largest_in_absolute_value(5+6j, 7) # Okay, has type complex.
- largest_in_absolute_value('a', 'b') # Error: 'str' is not a subtype of SupportsAbs[float].
+ # A decorated function.
+ @printing_decorator
+ def add_forty_two(value: int) -> int:
+ return value + 42
-Type parameters of generic classes may also have upper bounds, which
-restrict the valid values for the type parameter in the same way.
+ a = add_forty_two(3)
-A type variable may not have both a value restriction (see
-:ref:`type-variable-value-restriction`) and an upper bound.
+Since ``printing_decorator`` is not type-annotated, the following won't get type checked:
-.. _declaring-decorators:
+.. code-block:: python
-Declaring decorators
-********************
+ reveal_type(a) # Revealed type is "Any"
+ add_forty_two('foo') # No type checker error :(
-One common application of type variable upper bounds is in declaring a
-decorator that preserves the signature of the function it decorates,
-regardless of that signature.
+This is a sorry state of affairs! If you run with ``--strict``, mypy will
+even alert you to this fact:
+``Untyped decorator makes function "add_forty_two" untyped``
Note that class decorators are handled differently than function decorators in
mypy: decorating a class does not erase its type, even if the decorator has
incomplete type annotations.
-Here's a complete example of a function decorator:
+Here's how one could annotate the decorator:
.. code-block:: python
@@ -558,32 +620,92 @@ Here's a complete example of a function decorator:
F = TypeVar('F', bound=Callable[..., Any])
# A decorator that preserves the signature.
- def my_decorator(func: F) -> F:
+ def printing_decorator(func: F) -> F:
def wrapper(*args, **kwds):
print("Calling", func)
return func(*args, **kwds)
return cast(F, wrapper)
- # A decorated function.
- @my_decorator
- def foo(a: int) -> str:
- return str(a)
-
- a = foo(12)
- reveal_type(a) # str
- foo('x') # Type check error: incompatible type "str"; expected "int"
+ @printing_decorator
+ def add_forty_two(value: int) -> int:
+ return value + 42
-From the final block we see that the signatures of the decorated
-functions ``foo()`` and ``bar()`` are the same as those of the original
-functions (before the decorator is applied).
+ a = add_forty_two(3)
+ reveal_type(a) # Revealed type is "builtins.int"
+ add_forty_two('x') # Argument 1 to "add_forty_two" has incompatible type "str"; expected "int"
-The bound on ``F`` is used so that calling the decorator on a
-non-function (e.g. ``my_decorator(1)``) will be rejected.
+This still has some shortcomings. First, we need to use the unsafe
+:py:func:`~typing.cast` to convince mypy that ``wrapper()`` has the same
+signature as ``func``. See :ref:`casts `.
-Also note that the ``wrapper()`` function is not type-checked. Wrapper
-functions are typically small enough that this is not a big
+Second, the ``wrapper()`` function is not tightly type checked, although
+wrapper functions are typically small enough that this is not a big
problem. This is also the reason for the :py:func:`~typing.cast` call in the
-``return`` statement in ``my_decorator()``. See :ref:`casts `.
+``return`` statement in ``printing_decorator()``.
+
+However, we can use a parameter specification (:py:class:`~typing.ParamSpec`),
+for a more faithful type annotation:
+
+.. code-block:: python
+
+ from typing import Callable, TypeVar
+ from typing_extensions import ParamSpec
+
+ P = ParamSpec('P')
+ T = TypeVar('T')
+
+ def printing_decorator(func: Callable[P, T]) -> Callable[P, T]:
+ def wrapper(*args: P.args, **kwds: P.kwargs) -> T:
+ print("Calling", func)
+ return func(*args, **kwds)
+ return wrapper
+
+Parameter specifications also allow you to describe decorators that
+alter the signature of the input function:
+
+.. code-block:: python
+
+ from typing import Callable, TypeVar
+ from typing_extensions import ParamSpec
+
+ P = ParamSpec('P')
+ T = TypeVar('T')
+
+ # We reuse 'P' in the return type, but replace 'T' with 'str'
+ def stringify(func: Callable[P, T]) -> Callable[P, str]:
+ def wrapper(*args: P.args, **kwds: P.kwargs) -> str:
+ return str(func(*args, **kwds))
+ return wrapper
+
+ @stringify
+ def add_forty_two(value: int) -> int:
+ return value + 42
+
+ a = add_forty_two(3)
+ reveal_type(a) # Revealed type is "builtins.str"
+ add_forty_two('x') # error: Argument 1 to "add_forty_two" has incompatible type "str"; expected "int"
+
+Or insert an argument:
+
+.. code-block:: python
+
+ from typing import Callable, TypeVar
+ from typing_extensions import Concatenate, ParamSpec
+
+ P = ParamSpec('P')
+ T = TypeVar('T')
+
+ def printing_decorator(func: Callable[P, T]) -> Callable[Concatenate[str, P], T]:
+ def wrapper(msg: str, /, *args: P.args, **kwds: P.kwargs) -> T:
+ print("Calling", func, "with", msg)
+ return func(*args, **kwds)
+ return wrapper
+
+ @printing_decorator
+ def add_forty_two(value: int) -> int:
+ return value + 42
+
+ a = add_forty_two('three', 3)
.. _decorator-factories:
@@ -611,7 +733,7 @@ achieved by combining with :py:func:`@overload `:
.. code-block:: python
- from typing import Any, Callable, TypeVar, overload
+ from typing import Any, Callable, Optional, TypeVar, overload
F = TypeVar('F', bound=Callable[..., Any])
@@ -623,7 +745,7 @@ achieved by combining with :py:func:`@overload `:
def atomic(*, savepoint: bool = True) -> Callable[[F], F]: ...
# Implementation
- def atomic(__func: Callable[..., Any] = None, *, savepoint: bool = True):
+ def atomic(__func: Optional[Callable[..., Any]] = None, *, savepoint: bool = True):
def decorator(func: Callable[..., Any]):
... # Code goes here
if __func is not None:
@@ -673,9 +795,8 @@ protocols mostly follow the normal rules for generic classes. Example:
y: Box[int] = ...
x = y # Error -- Box is invariant
-Per :pep:`PEP 544: Generic protocols <544#generic-protocols>`, ``class
-ClassName(Protocol[T])`` is allowed as a shorthand for ``class
-ClassName(Protocol, Generic[T])``.
+Note that ``class ClassName(Protocol[T])`` is allowed as a shorthand for
+``class ClassName(Protocol, Generic[T])``, as per :pep:`PEP 544: Generic protocols <544#generic-protocols>`,
The main difference between generic protocols and ordinary generic
classes is that mypy checks that the declared variances of generic
@@ -686,20 +807,18 @@ variable is invariant:
.. code-block:: python
- from typing import TypeVar
- from typing_extensions import Protocol
+ from typing import Protocol, TypeVar
T = TypeVar('T')
- class ReadOnlyBox(Protocol[T]): # Error: covariant type variable expected
+ class ReadOnlyBox(Protocol[T]): # error: Invariant type variable "T" used in protocol where covariant one is expected
def content(self) -> T: ...
This example correctly uses a covariant type variable:
.. code-block:: python
- from typing import TypeVar
- from typing_extensions import Protocol
+ from typing import Protocol, TypeVar
T_co = TypeVar('T_co', covariant=True)
@@ -724,16 +843,12 @@ Generic protocols can also be recursive. Example:
class L:
val: int
+ def next(self) -> 'L': ...
- ... # details omitted
-
- def next(self) -> 'L':
- ... # details omitted
-
- def last(seq: Linked[T]) -> T:
- ... # implementation omitted
+ def last(seq: Linked[T]) -> T: ...
- result = last(L()) # Inferred type of 'result' is 'int'
+ result = last(L())
+ reveal_type(result) # Revealed type is "builtins.int"
.. _generic-type-aliases:
@@ -803,9 +918,60 @@ defeating the purpose of using aliases. Example:
OIntVec = Optional[Vec[int]]
-.. note::
+Using type variable bounds or values in generic aliases has the same effect
+as in generic classes/functions.
+
+
+Generic class internals
+***********************
+
+You may wonder what happens at runtime when you index a generic class.
+Indexing returns a *generic alias* to the original class that returns instances
+of the original class on instantiation:
+
+.. code-block:: python
+
+ >>> from typing import TypeVar, Generic
+ >>> T = TypeVar('T')
+ >>> class Stack(Generic[T]): ...
+ >>> Stack
+ __main__.Stack
+ >>> Stack[int]
+ __main__.Stack[int]
+ >>> instance = Stack[int]()
+ >>> instance.__class__
+ __main__.Stack
- A type alias does not define a new type. For generic type aliases
- this means that variance of type variables used for alias definition does not
- apply to aliases. A parameterized generic alias is treated simply as an original
- type with the corresponding type variables substituted.
+Generic aliases can be instantiated or subclassed, similar to real
+classes, but the above examples illustrate that type variables are
+erased at runtime. Generic ``Stack`` instances are just ordinary
+Python objects, and they have no extra runtime overhead or magic due
+to being generic, other than a metaclass that overloads the indexing
+operator.
+
+Note that in Python 3.8 and lower, the built-in types
+:py:class:`list`, :py:class:`dict` and others do not support indexing.
+This is why we have the aliases :py:class:`~typing.List`,
+:py:class:`~typing.Dict` and so on in the :py:mod:`typing`
+module. Indexing these aliases gives you a generic alias that
+resembles generic aliases constructed by directly indexing the target
+class in more recent versions of Python:
+
+.. code-block:: python
+
+ >>> # Only relevant for Python 3.8 and below
+ >>> # For Python 3.9 onwards, prefer `list[int]` syntax
+ >>> from typing import List
+ >>> List[int]
+ typing.List[int]
+
+Note that the generic aliases in ``typing`` don't support constructing
+instances:
+
+.. code-block:: python
+
+ >>> from typing import List
+ >>> List[int]()
+ Traceback (most recent call last):
+ ...
+ TypeError: Type List cannot be instantiated; use list() instead
diff --git a/docs/source/getting_started.rst b/docs/source/getting_started.rst
index 124edd650d27..9b927097cfd2 100644
--- a/docs/source/getting_started.rst
+++ b/docs/source/getting_started.rst
@@ -4,17 +4,19 @@ Getting started
===============
This chapter introduces some core concepts of mypy, including function
-annotations, the :py:mod:`typing` module, library stubs, and more.
+annotations, the :py:mod:`typing` module, stub files, and more.
-Be sure to read this chapter carefully, as the rest of the documentation
+If you're looking for a quick intro, see the
+:ref:`mypy cheatsheet `.
+
+If you're unfamiliar with the concepts of static and dynamic type checking,
+be sure to read this chapter carefully, as the rest of the documentation
may not make much sense otherwise.
Installing and running mypy
***************************
-Mypy requires Python 3.6 or later to run. Once you've
-`installed Python 3 `_,
-install mypy using pip:
+Mypy requires Python 3.7 or later to run. You can install mypy using pip:
.. code-block:: shell
@@ -31,26 +33,21 @@ out any errors it finds. Mypy will type check your code *statically*: this
means that it will check for errors without ever running your code, just
like a linter.
-This means that you are always free to ignore the errors mypy reports and
-treat them as just warnings, if you so wish: mypy runs independently from
-Python itself.
+This also means that you are always free to ignore the errors mypy reports,
+if you so wish. You can always use the Python interpreter to run your code,
+even if mypy reports errors.
However, if you try directly running mypy on your existing Python code, it
-will most likely report little to no errors: you must add *type annotations*
-to your code to take full advantage of mypy. See the section below for details.
-
-.. note::
-
- Although you must install Python 3 to run mypy, mypy is fully capable of
- type checking Python 2 code as well: just pass in the :option:`--py2 ` flag. See
- :ref:`python2` for more details.
+will most likely report little to no errors. This is a feature! It makes it
+easy to adopt mypy incrementally.
- .. code-block:: shell
+In order to get useful diagnostics from mypy, you must add *type annotations*
+to your code. See the section below for details.
- $ mypy --py2 program.py
+.. _getting-started-dynamic-vs-static:
-Function signatures and dynamic vs static typing
-************************************************
+Dynamic vs static typing
+************************
A function without type annotations is considered to be *dynamically typed* by mypy:
@@ -62,22 +59,32 @@ A function without type annotations is considered to be *dynamically typed* by m
By default, mypy will **not** type check dynamically typed functions. This means
that with a few exceptions, mypy will not report any errors with regular unannotated Python.
-This is the case even if you misuse the function: for example, mypy would currently
-not report any errors if you tried running ``greeting(3)`` or ``greeting(b"Alice")``
-even though those function calls would result in errors at runtime.
+This is the case even if you misuse the function!
+
+.. code-block:: python
+
+ def greeting(name):
+ return 'Hello ' + name
-You can teach mypy to detect these kinds of bugs by adding *type annotations* (also
-known as *type hints*). For example, you can teach mypy that ``greeting`` both accepts
+ # These calls will fail when the program run, but mypy does not report an error
+ # because "greeting" does not have type annotations.
+ greeting(123)
+ greeting(b"Alice")
+
+We can get mypy to detect these kinds of bugs by adding *type annotations* (also
+known as *type hints*). For example, you can tell mypy that ``greeting`` both accepts
and returns a string like so:
.. code-block:: python
+ # The "name: str" annotation says that the "name" argument should be a string
+ # The "-> str" annotation says that "greeting" will return a string
def greeting(name: str) -> str:
return 'Hello ' + name
-This function is now *statically typed*: mypy can use the provided type hints to detect
-incorrect usages of the ``greeting`` function. For example, it will reject the following
-calls since the arguments have invalid types:
+This function is now *statically typed*: mypy will use the provided type hints
+to detect incorrect use of the ``greeting`` function and incorrect use of
+variables within the ``greeting`` function. For example:
.. code-block:: python
@@ -86,13 +93,10 @@ calls since the arguments have invalid types:
greeting(3) # Argument 1 to "greeting" has incompatible type "int"; expected "str"
greeting(b'Alice') # Argument 1 to "greeting" has incompatible type "bytes"; expected "str"
+ greeting("World!") # No error
-Note that this is all still valid Python 3 code! The function annotation syntax
-shown above was added to Python :pep:`as a part of Python 3.0 <3107>`.
-
-If you are trying to type check Python 2 code, you can add type hints
-using a comment-based syntax instead of the Python 3 annotation syntax.
-See our section on :ref:`typing Python 2 code ` for more details.
+ def bad_greeting(name: str) -> str:
+ return 'Hello ' * name # Unsupported operand types for * ("str" and "str")
Being able to pick whether you want a function to be dynamically or statically
typed can be very helpful. For example, if you are migrating an existing
@@ -103,65 +107,35 @@ the code using dynamic typing and only add type hints later once the code is mor
Once you are finished migrating or prototyping your code, you can make mypy warn you
if you add a dynamic function by mistake by using the :option:`--disallow-untyped-defs `
-flag. See :ref:`command-line` for more information on configuring mypy.
-
-.. note::
-
- The earlier stages of analysis performed by mypy may report errors
- even for dynamically typed functions. However, you should not rely
- on this, as this may change in the future.
-
-More function signatures
-************************
-
-Here are a few more examples of adding type hints to function signatures.
-
-If a function does not explicitly return a value, give it a return
-type of ``None``. Using a ``None`` result in a statically typed
-context results in a type check error:
-
-.. code-block:: python
-
- def p() -> None:
- print('hello')
-
- a = p() # Error: "p" does not return a value
-
-Make sure to remember to include ``None``: if you don't, the function
-will be dynamically typed. For example:
+flag. You can also get mypy to provide some limited checking of dynamically typed
+functions by using the :option:`--check-untyped-defs ` flag.
+See :ref:`command-line` for more information on configuring mypy.
-.. code-block:: python
-
- def f():
- 1 + 'x' # No static type error (dynamically typed)
-
- def g() -> None:
- 1 + 'x' # Type check error (statically typed)
-
-Arguments with default values can be annotated like so:
+Strict mode and configuration
+*****************************
-.. code-block:: python
+Mypy has a *strict mode* that enables a number of additional checks,
+like :option:`--disallow-untyped-defs `.
- def greeting(name: str, excited: bool = False) -> str:
- message = 'Hello, {}'.format(name)
- if excited:
- message += '!!!'
- return message
+If you run mypy with the :option:`--strict ` flag, you
+will basically never get a type related error at runtime without a corresponding
+mypy error, unless you explicitly circumvent mypy somehow.
-``*args`` and ``**kwargs`` arguments can be annotated like so:
+However, this flag will probably be too aggressive if you are trying
+to add static types to a large, existing codebase. See :ref:`existing-code`
+for suggestions on how to handle that case.
-.. code-block:: python
+Mypy is very configurable, so you can start with using ``--strict``
+and toggle off individual checks. For instance, if you use many third
+party libraries that do not have types,
+:option:`--ignore-missing-imports `
+may be useful. See :ref:`getting-to-strict` for how to build up to ``--strict``.
- def stars(*args: int, **kwargs: float) -> None:
- # 'args' has type 'tuple[int, ...]' (a tuple of ints)
- # 'kwargs' has type 'dict[str, float]' (a dict of strs to floats)
- for arg in args:
- print(arg)
- for key, value in kwargs.items():
- print(key, value)
+See :ref:`command-line` and :ref:`config-file` for a complete reference on
+configuration options.
-Additional types, and the typing module
-***************************************
+More complex types
+******************
So far, we've added type hints that use only basic concrete types like
``str`` and ``float``. What if we want to express more complex types,
@@ -187,28 +161,11 @@ accept one or more *type parameters*. In this case, we *parameterized* :py:class
by writing ``list[str]``. This lets mypy know that ``greet_all`` accepts specifically
lists containing strings, and not lists containing ints or any other type.
-In Python 3.8 and earlier, you can instead import the
-:py:class:`~typing.List` type from the :py:mod:`typing` module:
-
-.. code-block:: python
-
- from typing import List # Python 3.8 and earlier
-
- def greet_all(names: List[str]) -> None:
- for name in names:
- print('Hello ' + name)
-
- ...
-
-You can find many of these more complex static types in the :py:mod:`typing` module.
-
In the above examples, the type signature is perhaps a little too rigid.
After all, there's no reason why this function must accept *specifically* a list --
it would run just fine if you were to pass in a tuple, a set, or any other custom iterable.
-You can express this idea using the
-:py:class:`collections.abc.Iterable` (or :py:class:`typing.Iterable` in Python
-3.8 and earlier) type instead of :py:class:`list` :
+You can express this idea using :py:class:`collections.abc.Iterable`:
.. code-block:: python
@@ -218,8 +175,19 @@ You can express this idea using the
for name in names:
print('Hello ' + name)
+This behavior is actually a fundamental aspect of the PEP 484 type system: when
+we annotate some variable with a type ``T``, we are actually telling mypy that
+variable can be assigned an instance of ``T``, or an instance of a *subtype* of ``T``.
+That is, ``list[str]`` is a subtype of ``Iterable[str]``.
+
+This also applies to inheritance, so if you have a class ``Child`` that inherits from
+``Parent``, then a value of type ``Child`` can be assigned to a variable of type ``Parent``.
+For example, a ``RuntimeError`` instance can be passed to a function that is annotated
+as taking an ``Exception``.
+
As another example, suppose you want to write a function that can accept *either*
-ints or strings, but no other types. You can express this using the :py:data:`~typing.Union` type:
+ints or strings, but no other types. You can express this using the
+:py:data:`~typing.Union` type. For example, ``int`` is a subtype of ``Union[int, str]``:
.. code-block:: python
@@ -227,30 +195,16 @@ ints or strings, but no other types. You can express this using the :py:data:`~t
def normalize_id(user_id: Union[int, str]) -> str:
if isinstance(user_id, int):
- return 'user-{}'.format(100000 + user_id)
+ return f'user-{100_000 + user_id}'
else:
return user_id
-Similarly, suppose that you want the function to accept only strings or ``None``. You can
-again use :py:data:`~typing.Union` and use ``Union[str, None]`` -- or alternatively, use the type
-``Optional[str]``. These two types are identical and interchangeable: ``Optional[str]``
-is just a shorthand or *alias* for ``Union[str, None]``. It exists mostly as a convenience
-to help function signatures look a little cleaner:
-
-.. code-block:: python
-
- from typing import Optional
+The :py:mod:`typing` module contains many other useful types.
- def greeting(name: Optional[str] = None) -> str:
- # Optional[str] means the same thing as Union[str, None]
- if name is None:
- name = 'stranger'
- return 'Hello, ' + name
+For a quick overview, look through the :ref:`mypy cheatsheet `.
-The :py:mod:`typing` module contains many other useful types. You can find a
-quick overview by looking through the :ref:`mypy cheatsheets `
-and a more detailed overview (including information on how to make your own
-generic types or your own type aliases) by looking through the
+For a detailed overview (including information on how to make your own
+generic types or your own type aliases), look through the
:ref:`type system reference `.
.. note::
@@ -278,10 +232,7 @@ mypy will try and *infer* as many details as possible.
We saw an example of this in the ``normalize_id`` function above -- mypy understands
basic :py:func:`isinstance ` checks and so can infer that the ``user_id`` variable was of
-type ``int`` in the if-branch and of type ``str`` in the else-branch. Similarly, mypy
-was able to understand that ``name`` could not possibly be ``None`` in the ``greeting``
-function above, based both on the ``name is None`` check and the variable assignment
-in that if statement.
+type ``int`` in the if-branch and of type ``str`` in the else-branch.
As another example, consider the following function. Mypy can type check this function
without a problem: it will use the available context and deduce that ``output`` must be
@@ -296,110 +247,60 @@ of type ``list[float]`` and that ``num`` must be of type ``float``:
output.append(num)
return output
-Mypy will warn you if it is unable to determine the type of some variable --
-for example, when assigning an empty dictionary to some global value:
-
-.. code-block:: python
-
- my_global_dict = {} # Error: Need type annotation for "my_global_dict"
-
-You can teach mypy what type ``my_global_dict`` is meant to have by giving it
-a type hint. For example, if you knew this variable is supposed to be a dict
-of ints to floats, you could annotate it using either variable annotations
-(introduced in Python 3.6 by :pep:`526`) or using a comment-based
-syntax like so:
+For more details, see :ref:`type-inference-and-annotations`.
-.. code-block:: python
-
- # If you're using Python 3.9+
- my_global_dict: dict[int, float] = {}
-
- # If you're using Python 3.6+
- my_global_dict: Dict[int, float] = {}
-
- # If you want compatibility with even older versions of Python
- my_global_dict = {} # type: Dict[int, float]
-
-.. _stubs-intro:
-
-Library stubs and typeshed
-**************************
+Types from libraries
+********************
-Mypy uses library *stubs* to type check code interacting with library
-modules, including the Python standard library. A library stub defines
-a skeleton of the public interface of the library, including classes,
-variables and functions, and their types. Mypy ships with stubs for
-the standard library from the `typeshed
-`_ project, which contains library
-stubs for the Python builtins, the standard library, and selected
-third-party packages.
+Mypy can also understand how to work with types from libraries that you use.
-For example, consider this code:
+For instance, mypy comes out of the box with an intimate knowledge of the
+Python standard library. For example, here is a function which uses the
+``Path`` object from the
+`pathlib standard library module `_:
.. code-block:: python
- x = chr(4)
+ from pathlib import Path
-Without a library stub, mypy would have no way of inferring the type of ``x``
-and checking that the argument to :py:func:`chr` has a valid type.
+ def load_template(template_path: Path, name: str) -> str:
+ # Mypy knows that `file_path` has a `read_text` method that returns a str
+ template = template_path.read_text()
+ # ...so it understands this line type checks
+ return template.replace('USERNAME', name)
-Mypy complains if it can't find a stub (or a real module) for a
-library module that you import. Some modules ship with stubs or inline
-annotations that mypy can automatically find, or you can install
-additional stubs using pip (see :ref:`fix-missing-imports` and
-:ref:`installed-packages` for the details). For example, you can install
-the stubs for the ``requests`` package like this:
+If a third party library you use :ref:`declares support for type checking `,
+mypy will type check your use of that library based on the type hints
+it contains.
-.. code-block:: shell
-
- python3 -m pip install types-requests
-
-The stubs are usually packaged in a distribution named
-``types-``. Note that the distribution name may be
-different from the name of the package that you import. For example,
-``types-PyYAML`` contains stubs for the ``yaml`` package. Mypy can
-often suggest the name of the stub distribution:
+However, if the third party library does not have type hints, mypy will
+complain about missing type information.
.. code-block:: text
- prog.py:1: error: Library stubs not installed for "yaml" (or incompatible with Python 3.8)
+ prog.py:1: error: Library stubs not installed for "yaml"
prog.py:1: note: Hint: "python3 -m pip install types-PyYAML"
+ prog.py:2: error: Library stubs not installed for "requests"
+ prog.py:2: note: Hint: "python3 -m pip install types-requests"
...
-.. note::
-
- Starting in mypy 0.900, most third-party package stubs must be
- installed explicitly. This decouples mypy and stub versioning,
- allowing stubs to updated without updating mypy. This also allows
- stubs not originally included with mypy to be installed. Earlier
- mypy versions included a fixed set of stubs for third-party
- packages.
+In this case, you can provide mypy a different source of type information,
+by installing a *stub* package. A stub package is a package that contains
+type hints for another library, but no actual code.
-You can also :ref:`create
-stubs ` easily. We discuss ways of silencing complaints
-about missing stubs in :ref:`ignore-missing-imports`.
-
-Configuring mypy
-****************
+.. code-block:: shell
-Mypy supports many command line options that you can use to tweak how
-mypy behaves: see :ref:`command-line` for more details.
+ $ python3 -m pip install types-PyYAML types-requests
-For example, suppose you want to make sure *all* functions within your
-codebase are using static typing and make mypy report an error if you
-add a dynamically-typed function by mistake. You can make mypy do this
-by running mypy with the :option:`--disallow-untyped-defs ` flag.
+Stubs packages for a distribution are often named ``types-``.
+Note that a distribution name may be different from the name of the package that
+you import. For example, ``types-PyYAML`` contains stubs for the ``yaml``
+package.
-Another potentially useful flag is :option:`--strict `, which enables many
-(though not all) of the available strictness options -- including
-:option:`--disallow-untyped-defs `.
+For more discussion on strategies for handling errors about libraries without
+type information, refer to :ref:`fix-missing-imports`.
-This flag is mostly useful if you're starting a new project from scratch
-and want to maintain a high degree of type safety from day one. However,
-this flag will probably be too aggressive if you either plan on using
-many untyped third party libraries or are trying to add static types to
-a large, existing codebase. See :ref:`existing-code` for more suggestions
-on how to handle the latter case.
+For more information about stubs, see :ref:`stub-files`.
Next steps
**********
@@ -408,8 +309,7 @@ If you are in a hurry and don't want to read lots of documentation
before getting started, here are some pointers to quick learning
resources:
-* Read the :ref:`mypy cheatsheet ` (also for
- :ref:`Python 2 `).
+* Read the :ref:`mypy cheatsheet `.
* Read :ref:`existing-code` if you have a significant existing
codebase without many type annotations.
@@ -435,5 +335,8 @@ resources:
`mypy issue tracker `_ and
typing `Gitter chat `_.
+* For general questions about Python typing, try posting at
+ `typing discussions `_.
+
You can also continue reading this document and skip sections that
aren't relevant for you. You don't need to read sections in order.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 7127308a2d1d..7ab3edebad39 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -6,35 +6,43 @@
Welcome to mypy documentation!
==============================
-Mypy is a static type checker for Python 3 and Python 2.7. If you sprinkle
-your code with type annotations, mypy can type check your code and find common
-bugs. As mypy is a static analyzer, or a lint-like tool, the type
-annotations are just hints for mypy and don't interfere when running your program.
-You run your program with a standard Python interpreter, and the annotations
-are treated effectively as comments.
-
-Using the Python 3 annotation syntax (using :pep:`484` and :pep:`526` notation)
-or a comment-based annotation syntax for Python 2 code, you will be able to
-efficiently annotate your code and use mypy to check the code for common errors.
-Mypy has a powerful and easy-to-use type system with modern features such as
-type inference, generics, callable types, tuple types, union types, and
-structural subtyping.
-
-As a developer, you decide how to use mypy in your workflow. You can always
-escape to dynamic typing as mypy's approach to static typing doesn't restrict
-what you can do in your programs. Using mypy will make your programs easier to
-understand, debug, and maintain.
+Mypy is a static type checker for Python.
+
+Type checkers help ensure that you're using variables and functions in your code
+correctly. With mypy, add type hints (:pep:`484`)
+to your Python programs, and mypy will warn you when you use those types
+incorrectly.
+
+Python is a dynamic language, so usually you'll only see errors in your code
+when you attempt to run it. Mypy is a *static* checker, so it finds bugs
+in your programs without even running them!
+
+Here is a small example to whet your appetite:
+
+.. code-block:: python
+
+ number = input("What is your favourite number?")
+ print("It is", number + 1) # error: Unsupported operand types for + ("str" and "int")
+
+Adding type hints for mypy does not interfere with the way your program would
+otherwise run. Think of type hints as similar to comments! You can always use
+the Python interpreter to run your code, even if mypy reports errors.
+
+Mypy is designed with gradual typing in mind. This means you can add type
+hints to your code base slowly and that you can always fall back to dynamic
+typing when static typing is not convenient.
-This documentation provides a short introduction to mypy. It will help you
-get started writing statically typed code. Knowledge of Python and a
-statically typed object-oriented language, such as Java, are assumed.
+Mypy has a powerful and easy-to-use type system, supporting features such as
+type inference, generics, callable types, tuple types, union types,
+structural subtyping and more. Using mypy will make your programs easier to
+understand, debug, and maintain.
.. note::
- Mypy is used in production by many companies and projects, but mypy is
- officially beta software. There will be occasional changes
+ Although mypy is production ready, there may be occasional changes
that break backward compatibility. The mypy development team tries to
- minimize the impact of changes to user code.
+ minimize the impact of changes to user code. In case of a major breaking
+ change, mypy's major version will be bumped.
Contents
--------
@@ -44,16 +52,8 @@ Contents
:caption: First steps
getting_started
- existing_code
-
-.. _overview-cheat-sheets:
-
-.. toctree::
- :maxdepth: 2
- :caption: Cheat sheets
-
cheat_sheet_py3
- cheat_sheet
+ existing_code
.. _overview-type-system-reference:
@@ -68,13 +68,13 @@ Contents
runtime_troubles
protocols
dynamic_typing
- python2
type_narrowing
duck_type_compatibility
stubs
generics
more_types
literal_types
+ typed_dict
final_attrs
metaclasses
diff --git a/docs/source/installed_packages.rst b/docs/source/installed_packages.rst
index 8db113e4ba9e..b9a3b891c99c 100644
--- a/docs/source/installed_packages.rst
+++ b/docs/source/installed_packages.rst
@@ -25,6 +25,23 @@ you can create such packages.
:pep:`561` specifies how a package can declare that it supports
type checking.
+.. note::
+
+ New versions of stub packages often use type system features not
+ supported by older, and even fairly recent mypy versions. If you
+ pin to an older version of mypy (using ``requirements.txt``, for
+ example), it is recommended that you also pin the versions of all
+ your stub package dependencies.
+
+.. note::
+
+ Starting in mypy 0.900, most third-party package stubs must be
+ installed explicitly. This decouples mypy and stub versioning,
+ allowing stubs to updated without updating mypy. This also allows
+ stubs not originally included with mypy to be installed. Earlier
+ mypy versions included a fixed set of stubs for third-party
+ packages.
+
Using installed packages with mypy (PEP 561)
********************************************
@@ -40,10 +57,10 @@ stubs.)
If you have installed typed packages in another Python installation or
environment, mypy won't automatically find them. One option is to
install another copy of those packages in the environment in which you
-use to run mypy. Alternatively, you can use the
+installed mypy. Alternatively, you can use the
:option:`--python-executable ` flag to point
-to the target Python executable, and mypy will find packages installed
-for that Python executable.
+to the Python executable for another environment, and mypy will find
+packages installed for that Python executable.
Note that mypy does not support some more advanced import features,
such as zip imports and custom import hooks.
@@ -173,11 +190,6 @@ The ``setup.py`` might look like this:
packages=["package_c-stubs"]
)
-If you have separate stubs for Python 2 and Python 3, you can place
-the Python 2 stubs in a directory with the suffix ``-python2-stubs``.
-We recommend that Python 2 and Python 3 stubs are bundled together for
-simplicity, instead of distributing them separately.
-
The instructions above are enough to ensure that the built wheels
contain the appropriate files. However, to ensure inclusion inside the
``sdist`` (``.tar.gz`` archive), you may also need to modify the
diff --git a/docs/source/kinds_of_types.rst b/docs/source/kinds_of_types.rst
index dd19d7fc0622..b575a6eac4c5 100644
--- a/docs/source/kinds_of_types.rst
+++ b/docs/source/kinds_of_types.rst
@@ -347,23 +347,13 @@ This also works for attributes defined within methods:
def __init__(self) -> None:
self.count: Optional[int] = None
-As a special case, you can use a non-optional type when initializing an
-attribute to ``None`` inside a class body *and* using a type comment,
-since when using a type comment, an initializer is syntactically required,
-and ``None`` is used as a dummy, placeholder initializer:
+This is not a problem when using variable annotations, since no initial
+value is needed:
.. code-block:: python
class Container:
- items = None # type: list[str] # OK (only with type comment)
-
-This is not a problem when using variable annotations, since no initializer
-is needed:
-
-.. code-block:: python
-
- class Container:
- items: list[str] # No initializer
+ items: list[str] # No initial value
Mypy generally uses the first assignment to a variable to
infer the type of the variable. However, if you assign both a ``None``
@@ -398,12 +388,8 @@ case you should add an explicit ``Optional[...]`` annotation (or type comment).
.. note::
``Optional[...]`` *does not* mean a function argument with a default value.
- However, if the default value of an argument is ``None``, you can use
- an optional type for the argument, but it's not enforced by default.
- You can use the :option:`--no-implicit-optional ` command-line option to stop
- treating arguments with a ``None`` default value as having an implicit
- ``Optional[...]`` type. It's possible that this will become the default
- behavior in the future.
+ It simply means that ``None`` is a valid value for the argument. This is
+ a common confusion because ``None`` is a common default value for arguments.
.. _alternative_union_syntax:
@@ -421,9 +407,6 @@ the runtime with some limitations (see :ref:`runtime_troubles`).
t2: int | None # equivalent to Optional[int]
- # Usable in type comments
- t3 = 42 # type: int | str
-
.. _no_strict_optional:
Disabling strict optional checking
@@ -465,7 +448,7 @@ but it's not obvious from its signature:
def greeting(name: str) -> str:
if name:
- return 'Hello, {}'.format(name)
+ return f'Hello, {name}'
else:
return 'Hello, stranger'
@@ -482,7 +465,7 @@ enabled:
def greeting(name: Optional[str]) -> str:
if name:
- return 'Hello, {}'.format(name)
+ return f'Hello, {name}'
else:
return 'Hello, stranger'
@@ -615,10 +598,11 @@ The type of class objects
<484#the-type-of-class-objects>`.)
Sometimes you want to talk about class objects that inherit from a
-given class. This can be spelled as :py:class:`Type[C] ` where ``C`` is a
+given class. This can be spelled as ``type[C]`` (or, on Python 3.8 and lower,
+:py:class:`typing.Type[C] `) where ``C`` is a
class. In other words, when ``C`` is the name of a class, using ``C``
to annotate an argument declares that the argument is an instance of
-``C`` (or of a subclass of ``C``), but using :py:class:`Type[C] ` as an
+``C`` (or of a subclass of ``C``), but using ``type[C]`` as an
argument annotation declares that the argument is a class object
deriving from ``C`` (or ``C`` itself).
@@ -649,7 +633,7 @@ you pass it the right class object:
# (Here we could write the user object to a database)
return user
-How would we annotate this function? Without :py:class:`~typing.Type` the best we
+How would we annotate this function? Without the ability to parameterize ``type``, the best we
could do would be:
.. code-block:: python
@@ -665,14 +649,14 @@ doesn't see that the ``buyer`` variable has type ``ProUser``:
buyer = new_user(ProUser)
buyer.pay() # Rejected, not a method on User
-However, using :py:class:`~typing.Type` and a type variable with an upper bound (see
+However, using the ``type[C]`` syntax and a type variable with an upper bound (see
:ref:`type-variable-upper-bound`) we can do better:
.. code-block:: python
U = TypeVar('U', bound=User)
- def new_user(user_class: Type[U]) -> U:
+ def new_user(user_class: type[U]) -> U:
# Same implementation as before
Now mypy will infer the correct type of the result when we call
@@ -685,58 +669,14 @@ Now mypy will infer the correct type of the result when we call
.. note::
- The value corresponding to :py:class:`Type[C] ` must be an actual class
+ The value corresponding to ``type[C]`` must be an actual class
object that's a subtype of ``C``. Its constructor must be
compatible with the constructor of ``C``. If ``C`` is a type
variable, its upper bound must be a class object.
-For more details about ``Type[]`` see :pep:`PEP 484: The type of
+For more details about ``type[]`` and :py:class:`typing.Type[] `, see :pep:`PEP 484: The type of
class objects <484#the-type-of-class-objects>`.
-.. _text-and-anystr:
-
-Text and AnyStr
-***************
-
-Sometimes you may want to write a function which will accept only unicode
-strings. This can be challenging to do in a codebase intended to run in
-both Python 2 and Python 3 since ``str`` means something different in both
-versions and ``unicode`` is not a keyword in Python 3.
-
-To help solve this issue, use :py:class:`~typing.Text` which is aliased to
-``unicode`` in Python 2 and to ``str`` in Python 3. This allows you to
-indicate that a function should accept only unicode strings in a
-cross-compatible way:
-
-.. code-block:: python
-
- from typing import Text
-
- def unicode_only(s: Text) -> Text:
- return s + u'\u2713'
-
-In other cases, you may want to write a function that will work with any
-kind of string but will not let you mix two different string types. To do
-so use :py:data:`~typing.AnyStr`:
-
-.. code-block:: python
-
- from typing import AnyStr
-
- def concat(x: AnyStr, y: AnyStr) -> AnyStr:
- return x + y
-
- concat('foo', 'foo') # Okay
- concat(b'foo', b'foo') # Okay
- concat('foo', b'foo') # Error: cannot mix bytes and unicode
-
-For more details, see :ref:`type-variable-value-restriction`.
-
-.. note::
-
- How ``bytes``, ``str``, and ``unicode`` are handled between Python 2 and
- Python 3 may change in future versions of mypy.
-
.. _generators:
Generators
diff --git a/docs/source/literal_types.rst b/docs/source/literal_types.rst
index 8aad55c392e0..a66d300bd0fd 100644
--- a/docs/source/literal_types.rst
+++ b/docs/source/literal_types.rst
@@ -52,8 +52,8 @@ precise type signature for this function using ``Literal[...]`` and overloads:
The examples in this page import ``Literal`` as well as ``Final`` and
``TypedDict`` from the ``typing`` module. These types were added to
- ``typing`` in Python 3.8, but are also available for use in Python 2.7
- and 3.4 - 3.7 via the ``typing_extensions`` package.
+ ``typing`` in Python 3.8, but are also available for use in Python
+ 3.4 - 3.7 via the ``typing_extensions`` package.
Parameterizing Literals
***********************
@@ -446,7 +446,7 @@ Let's start with a definition:
def assert_never(value: NoReturn) -> NoReturn:
# This also works in runtime as well:
- assert False, 'This code should never be reached, got: {0}'.format(value)
+ assert False, f'This code should never be reached, got: {value}'
class Direction(Enum):
up = 'up'
@@ -495,13 +495,13 @@ the same way Python's runtime does:
... right = 'right'
Traceback (most recent call last):
...
- TypeError: Other: cannot extend enumeration 'Some'
+ TypeError: AllDirection: cannot extend enumeration 'Direction'
Mypy also catches this error:
.. code-block:: python
- class AllDirection(Direction): # E: Cannot inherit from final class "Some"
+ class AllDirection(Direction): # E: Cannot inherit from final class "Direction"
left = 'left'
right = 'right'
diff --git a/docs/source/metaclasses.rst b/docs/source/metaclasses.rst
index 750b93889b2e..396d7dbb42cc 100644
--- a/docs/source/metaclasses.rst
+++ b/docs/source/metaclasses.rst
@@ -25,27 +25,6 @@ Defining a metaclass
class A(metaclass=M):
pass
-In Python 2, the syntax for defining a metaclass is different:
-
-.. code-block:: python
-
- class A(object):
- __metaclass__ = M
-
-Mypy also supports using :py:func:`six.with_metaclass` and :py:func:`@six.add_metaclass `
-to define metaclass in a portable way:
-
-.. code-block:: python
-
- import six
-
- class A(six.with_metaclass(M)):
- pass
-
- @six.add_metaclass(M)
- class C(object):
- pass
-
.. _examples:
Metaclass usage example
@@ -93,12 +72,15 @@ so it's better not to combine metaclasses and class hierarchies:
class A1(metaclass=M1): pass
class A2(metaclass=M2): pass
- class B1(A1, metaclass=M2): pass # Mypy Error: Inconsistent metaclass structure for "B1"
+ class B1(A1, metaclass=M2): pass # Mypy Error: metaclass conflict
# At runtime the above definition raises an exception
# TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases
- # Same runtime error as in B1, but mypy does not catch it yet
- class B12(A1, A2): pass
+ class B12(A1, A2): pass # Mypy Error: metaclass conflict
+
+ # This can be solved via a common metaclass subtype:
+ class CorrectMeta(M1, M2): pass
+ class B2(A1, A2, metaclass=CorrectMeta): pass # OK, runtime is also OK
* Mypy does not understand dynamically-computed metaclasses,
such as ``class A(metaclass=f()): ...``
diff --git a/docs/source/more_types.rst b/docs/source/more_types.rst
index dd688cab7e17..ff5e8d384351 100644
--- a/docs/source/more_types.rst
+++ b/docs/source/more_types.rst
@@ -2,7 +2,7 @@ More types
==========
This section introduces a few additional kinds of types, including :py:data:`~typing.NoReturn`,
-:py:func:`NewType `, ``TypedDict``, and types for async code. It also discusses
+:py:func:`NewType `, and types for async code. It also discusses
how to give functions more precise types using overloads. All of these are only
situationally useful, so feel free to skip this section and come back when you
have a need for some of them.
@@ -20,9 +20,6 @@ Here's a quick summary of what's covered here:
signatures. This is useful if you need to encode a relationship between the
arguments and the return type that would be difficult to express normally.
-* ``TypedDict`` lets you give precise types for dictionaries that represent
- objects with a fixed schema, such as ``{'id': 1, 'items': ['x']}``.
-
* Async types let you type check programs using ``async`` and ``await``.
.. _noreturn:
@@ -60,12 +57,6 @@ pip to use :py:data:`~typing.NoReturn` in your code. Python 3 command line:
python3 -m pip install --upgrade typing-extensions
-This works for Python 2:
-
-.. code-block:: text
-
- pip install --upgrade typing-extensions
-
.. _newtypes:
NewTypes
@@ -120,7 +111,7 @@ implicitly casting from ``UserId`` where ``int`` is expected. Examples:
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
- num = UserId(5) + 1 # type: int
+ num: int = UserId(5) + 1
:py:func:`NewType ` accepts exactly two arguments. The first argument must be a string literal
containing the name of the new type and must equal the name of the variable to which the new
@@ -846,7 +837,7 @@ expect to get back when ``await``-ing the coroutine.
import asyncio
async def format_string(tag: str, count: int) -> str:
- return 'T-minus {} ({})'.format(count, tag)
+ return f'T-minus {count} ({tag})'
async def countdown_1(tag: str, count: int) -> str:
while count > 0:
@@ -888,7 +879,7 @@ You may also choose to create a subclass of :py:class:`~typing.Awaitable` instea
def __await__(self) -> Generator[Any, None, str]:
for i in range(n, 0, -1):
- print('T-minus {} ({})'.format(i, tag))
+ print(f'T-minus {i} ({tag})')
yield from asyncio.sleep(0.1)
return "Blastoff!"
@@ -925,7 +916,7 @@ To create an iterable coroutine, subclass :py:class:`~typing.AsyncIterator`:
async def countdown_4(tag: str, n: int) -> str:
async for i in arange(n, 0, -1):
- print('T-minus {} ({})'.format(i, tag))
+ print(f'T-minus {i} ({tag})')
await asyncio.sleep(0.1)
return "Blastoff!"
@@ -947,7 +938,7 @@ generator type as the return type:
@asyncio.coroutine
def countdown_2(tag: str, count: int) -> Generator[Any, None, str]:
while count > 0:
- print('T-minus {} ({})'.format(count, tag))
+ print(f'T-minus {count} ({tag})')
yield from asyncio.sleep(0.1)
count -= 1
return "Blastoff!"
@@ -955,265 +946,3 @@ generator type as the return type:
loop = asyncio.get_event_loop()
loop.run_until_complete(countdown_2("USS Enterprise", 5))
loop.close()
-
-
-.. _typeddict:
-
-TypedDict
-*********
-
-Python programs often use dictionaries with string keys to represent objects.
-Here is a typical example:
-
-.. code-block:: python
-
- movie = {'name': 'Blade Runner', 'year': 1982}
-
-Only a fixed set of string keys is expected (``'name'`` and
-``'year'`` above), and each key has an independent value type (``str``
-for ``'name'`` and ``int`` for ``'year'`` above). We've previously
-seen the ``dict[K, V]`` type, which lets you declare uniform
-dictionary types, where every value has the same type, and arbitrary keys
-are supported. This is clearly not a good fit for
-``movie`` above. Instead, you can use a ``TypedDict`` to give a precise
-type for objects like ``movie``, where the type of each
-dictionary value depends on the key:
-
-.. code-block:: python
-
- from typing_extensions import TypedDict
-
- Movie = TypedDict('Movie', {'name': str, 'year': int})
-
- movie = {'name': 'Blade Runner', 'year': 1982} # type: Movie
-
-``Movie`` is a ``TypedDict`` type with two items: ``'name'`` (with type ``str``)
-and ``'year'`` (with type ``int``). Note that we used an explicit type
-annotation for the ``movie`` variable. This type annotation is
-important -- without it, mypy will try to infer a regular, uniform
-:py:class:`dict` type for ``movie``, which is not what we want here.
-
-.. note::
-
- If you pass a ``TypedDict`` object as an argument to a function, no
- type annotation is usually necessary since mypy can infer the
- desired type based on the declared argument type. Also, if an
- assignment target has been previously defined, and it has a
- ``TypedDict`` type, mypy will treat the assigned value as a ``TypedDict``,
- not :py:class:`dict`.
-
-Now mypy will recognize these as valid:
-
-.. code-block:: python
-
- name = movie['name'] # Okay; type of name is str
- year = movie['year'] # Okay; type of year is int
-
-Mypy will detect an invalid key as an error:
-
-.. code-block:: python
-
- director = movie['director'] # Error: 'director' is not a valid key
-
-Mypy will also reject a runtime-computed expression as a key, as
-it can't verify that it's a valid key. You can only use string
-literals as ``TypedDict`` keys.
-
-The ``TypedDict`` type object can also act as a constructor. It
-returns a normal :py:class:`dict` object at runtime -- a ``TypedDict`` does
-not define a new runtime type:
-
-.. code-block:: python
-
- toy_story = Movie(name='Toy Story', year=1995)
-
-This is equivalent to just constructing a dictionary directly using
-``{ ... }`` or ``dict(key=value, ...)``. The constructor form is
-sometimes convenient, since it can be used without a type annotation,
-and it also makes the type of the object explicit.
-
-Like all types, ``TypedDict``\s can be used as components to build
-arbitrarily complex types. For example, you can define nested
-``TypedDict``\s and containers with ``TypedDict`` items.
-Unlike most other types, mypy uses structural compatibility checking
-(or structural subtyping) with ``TypedDict``\s. A ``TypedDict`` object with
-extra items is compatible with (a subtype of) a narrower
-``TypedDict``, assuming item types are compatible (*totality* also affects
-subtyping, as discussed below).
-
-A ``TypedDict`` object is not a subtype of the regular ``dict[...]``
-type (and vice versa), since :py:class:`dict` allows arbitrary keys to be
-added and removed, unlike ``TypedDict``. However, any ``TypedDict`` object is
-a subtype of (that is, compatible with) ``Mapping[str, object]``, since
-:py:class:`~typing.Mapping` only provides read-only access to the dictionary items:
-
-.. code-block:: python
-
- def print_typed_dict(obj: Mapping[str, object]) -> None:
- for key, value in obj.items():
- print('{}: {}'.format(key, value))
-
- print_typed_dict(Movie(name='Toy Story', year=1995)) # OK
-
-.. note::
-
- Unless you are on Python 3.8 or newer (where ``TypedDict`` is available in
- standard library :py:mod:`typing` module) you need to install ``typing_extensions``
- using pip to use ``TypedDict``:
-
- .. code-block:: text
-
- python3 -m pip install --upgrade typing-extensions
-
- Or, if you are using Python 2:
-
- .. code-block:: text
-
- pip install --upgrade typing-extensions
-
-Totality
---------
-
-By default mypy ensures that a ``TypedDict`` object has all the specified
-keys. This will be flagged as an error:
-
-.. code-block:: python
-
- # Error: 'year' missing
- toy_story = {'name': 'Toy Story'} # type: Movie
-
-Sometimes you want to allow keys to be left out when creating a
-``TypedDict`` object. You can provide the ``total=False`` argument to
-``TypedDict(...)`` to achieve this:
-
-.. code-block:: python
-
- GuiOptions = TypedDict(
- 'GuiOptions', {'language': str, 'color': str}, total=False)
- options = {} # type: GuiOptions # Okay
- options['language'] = 'en'
-
-You may need to use :py:meth:`~dict.get` to access items of a partial (non-total)
-``TypedDict``, since indexing using ``[]`` could fail at runtime.
-However, mypy still lets use ``[]`` with a partial ``TypedDict`` -- you
-just need to be careful with it, as it could result in a :py:exc:`KeyError`.
-Requiring :py:meth:`~dict.get` everywhere would be too cumbersome. (Note that you
-are free to use :py:meth:`~dict.get` with total ``TypedDict``\s as well.)
-
-Keys that aren't required are shown with a ``?`` in error messages:
-
-.. code-block:: python
-
- # Revealed type is "TypedDict('GuiOptions', {'language'?: builtins.str,
- # 'color'?: builtins.str})"
- reveal_type(options)
-
-Totality also affects structural compatibility. You can't use a partial
-``TypedDict`` when a total one is expected. Also, a total ``TypedDict`` is not
-valid when a partial one is expected.
-
-Supported operations
---------------------
-
-``TypedDict`` objects support a subset of dictionary operations and methods.
-You must use string literals as keys when calling most of the methods,
-as otherwise mypy won't be able to check that the key is valid. List
-of supported operations:
-
-* Anything included in :py:class:`~typing.Mapping`:
-
- * ``d[key]``
- * ``key in d``
- * ``len(d)``
- * ``for key in d`` (iteration)
- * :py:meth:`d.get(key[, default]) `
- * :py:meth:`d.keys() `
- * :py:meth:`d.values() `
- * :py:meth:`d.items() `
-
-* :py:meth:`d.copy() `
-* :py:meth:`d.setdefault(key, default) `
-* :py:meth:`d1.update(d2) `
-* :py:meth:`d.pop(key[, default]) ` (partial ``TypedDict``\s only)
-* ``del d[key]`` (partial ``TypedDict``\s only)
-
-In Python 2 code, these methods are also supported:
-
-* ``has_key(key)``
-* ``viewitems()``
-* ``viewkeys()``
-* ``viewvalues()``
-
-.. note::
-
- :py:meth:`~dict.clear` and :py:meth:`~dict.popitem` are not supported since they are unsafe
- -- they could delete required ``TypedDict`` items that are not visible to
- mypy because of structural subtyping.
-
-Class-based syntax
-------------------
-
-An alternative, class-based syntax to define a ``TypedDict`` is supported
-in Python 3.6 and later:
-
-.. code-block:: python
-
- from typing_extensions import TypedDict
-
- class Movie(TypedDict):
- name: str
- year: int
-
-The above definition is equivalent to the original ``Movie``
-definition. It doesn't actually define a real class. This syntax also
-supports a form of inheritance -- subclasses can define additional
-items. However, this is primarily a notational shortcut. Since mypy
-uses structural compatibility with ``TypedDict``\s, inheritance is not
-required for compatibility. Here is an example of inheritance:
-
-.. code-block:: python
-
- class Movie(TypedDict):
- name: str
- year: int
-
- class BookBasedMovie(Movie):
- based_on: str
-
-Now ``BookBasedMovie`` has keys ``name``, ``year`` and ``based_on``.
-
-Mixing required and non-required items
---------------------------------------
-
-In addition to allowing reuse across ``TypedDict`` types, inheritance also allows
-you to mix required and non-required (using ``total=False``) items
-in a single ``TypedDict``. Example:
-
-.. code-block:: python
-
- class MovieBase(TypedDict):
- name: str
- year: int
-
- class Movie(MovieBase, total=False):
- based_on: str
-
-Now ``Movie`` has required keys ``name`` and ``year``, while ``based_on``
-can be left out when constructing an object. A ``TypedDict`` with a mix of required
-and non-required keys, such as ``Movie`` above, will only be compatible with
-another ``TypedDict`` if all required keys in the other ``TypedDict`` are required keys in the
-first ``TypedDict``, and all non-required keys of the other ``TypedDict`` are also non-required keys
-in the first ``TypedDict``.
-
-Unions of TypedDicts
---------------------
-
-Since TypedDicts are really just regular dicts at runtime, it is not possible to
-use ``isinstance`` checks to distinguish between different variants of a Union of
-TypedDict in the same way you can with regular objects.
-
-Instead, you can use the :ref:`tagged union pattern `. The referenced
-section of the docs has a full description with an example, but in short, you will
-need to give each TypedDict the same key where each value has a unique
-:ref:`Literal type `. Then, check that key to distinguish
-between your TypedDicts.
diff --git a/docs/source/mypy_daemon.rst b/docs/source/mypy_daemon.rst
index 29b554db82a9..ec12283ea3bb 100644
--- a/docs/source/mypy_daemon.rst
+++ b/docs/source/mypy_daemon.rst
@@ -152,6 +152,12 @@ Additional daemon flags
Write performance profiling information to ``FILE``. This is only available
for the ``check``, ``recheck``, and ``run`` commands.
+.. option:: --export-types
+
+ Store all expression types in memory for future use. This is useful to speed
+ up future calls to ``dmypy inspect`` (but uses more memory). Only valid for
+ ``check``, ``recheck``, and ``run`` command.
+
Static inference of annotations
*******************************
@@ -171,7 +177,7 @@ In this example, the function ``format_id()`` has no annotation:
.. code-block:: python
def format_id(user):
- return "User: {}".format(user)
+ return f"User: {user}"
root = format_id(0)
@@ -222,11 +228,6 @@ command.
Only allow some fraction of types in the suggested signature to be ``Any`` types.
The fraction ranges from ``0`` (same as ``--no-any``) to ``1``.
-.. option:: --try-text
-
- Try also using ``unicode`` wherever ``str`` is inferred. This flag may be useful
- for annotating Python 2/3 straddling code.
-
.. option:: --callsites
Only find call sites for a given function instead of suggesting a type.
@@ -243,8 +244,129 @@ command.
Set the maximum number of types to try for a function (default: ``64``).
-.. TODO: Add similar sections about go to definition, find usages, and
- reveal type when added, and then move this to a separate file.
+Statically inspect expressions
+******************************
+
+The daemon allows to get declared or inferred type of an expression (or other
+information about an expression, such as known attributes or definition location)
+using ``dmypy inspect LOCATION`` command. The location of the expression should be
+specified in the format ``path/to/file.py:line:column[:end_line:end_column]``.
+Both line and column are 1-based. Both start and end position are inclusive.
+These rules match how mypy prints the error location in error messages.
+
+If a span is given (i.e. all 4 numbers), then only an exactly matching expression
+is inspected. If only a position is given (i.e. 2 numbers, line and column), mypy
+will inspect all *expressions*, that include this position, starting from the
+innermost one.
+
+Consider this Python code snippet:
+
+.. code-block:: python
+
+ def foo(x: int, longer_name: str) -> None:
+ x
+ longer_name
+
+Here to find the type of ``x`` one needs to call ``dmypy inspect src.py:2:5:2:5``
+or ``dmypy inspect src.py:2:5``. While for ``longer_name`` one needs to call
+``dmypy inspect src.py:3:5:3:15`` or, for example, ``dmypy inspect src.py:3:10``.
+Please note that this command is only valid after daemon had a successful type
+check (without parse errors), so that types are populated, e.g. using
+``dmypy check``. In case where multiple expressions match the provided location,
+their types are returned separated by a newline.
+
+Important note: it is recommended to check files with :option:`--export-types`
+since otherwise most inspections will not work without :option:`--force-reload`.
+
+.. option:: --show INSPECTION
+
+ What kind of inspection to run for expression(s) found. Currently the supported
+ inspections are:
+
+ * ``type`` (default): Show the best known type of a given expression.
+ * ``attrs``: Show which attributes are valid for an expression (e.g. for
+ auto-completion). Format is ``{"Base1": ["name_1", "name_2", ...]; "Base2": ...}``.
+ Names are sorted by method resolution order. If expression refers to a module,
+ then module attributes will be under key like ``""``.
+ * ``definition`` (experimental): Show the definition location for a name
+ expression or member expression. Format is ``path/to/file.py:line:column:Symbol``.
+ If multiple definitions are found (e.g. for a Union attribute), they are
+ separated by comma.
+
+.. option:: --verbose
+
+ Increase verbosity of types string representation (can be repeated).
+ For example, this will print fully qualified names of instance types (like
+ ``"builtins.str"``), instead of just a short name (like ``"str"``).
+
+.. option:: --limit NUM
+
+ If the location is given as ``line:column``, this will cause daemon to
+ return only at most ``NUM`` inspections of innermost expressions.
+ Value of 0 means no limit (this is the default). For example, if one calls
+ ``dmypy inspect src.py:4:10 --limit=1`` with this code
+
+ .. code-block:: python
+
+ def foo(x: int) -> str: ..
+ def bar(x: str) -> None: ...
+ baz: int
+ bar(foo(baz))
+
+ This will output just one type ``"int"`` (for ``baz`` name expression).
+ While without the limit option, it would output all three types: ``"int"``,
+ ``"str"``, and ``"None"``.
+
+.. option:: --include-span
+
+ With this option on, the daemon will prepend each inspection result with
+ the full span of corresponding expression, formatted as ``1:2:1:4 -> "int"``.
+ This may be useful in case multiple expressions match a location.
+
+.. option:: --include-kind
+
+ With this option on, the daemon will prepend each inspection result with
+ the kind of corresponding expression, formatted as ``NameExpr -> "int"``.
+ If both this option and :option:`--include-span` are on, the kind will
+ appear first, for example ``NameExpr:1:2:1:4 -> "int"``.
+
+.. option:: --include-object-attrs
+
+ This will make the daemon include attributes of ``object`` (excluded by
+ default) in case of an ``atts`` inspection.
+
+.. option:: --union-attrs
+
+ Include attributes valid for some of possible expression types (by default
+ an intersection is returned). This is useful for union types of type variables
+ with values. For example, with this code:
+
+ .. code-block:: python
+
+ from typing import Union
+
+ class A:
+ x: int
+ z: int
+ class B:
+ y: int
+ z: int
+ var: Union[A, B]
+ var
+
+ The command ``dmypy inspect --show attrs src.py:10:1`` will return
+ ``{"A": ["z"], "B": ["z"]}``, while with ``--union-attrs`` it will return
+ ``{"A": ["x", "z"], "B": ["y", "z"]}``.
+
+.. option:: --force-reload
+
+ Force re-parsing and re-type-checking file before inspection. By default
+ this is done only when needed (for example file was not loaded from cache
+ or daemon was initially run without ``--export-types`` mypy option),
+ since reloading may be slow (up to few seconds for very large files).
+
+.. TODO: Add similar section about find usages when added, and then move
+ this to a separate file.
.. _watchman: https://facebook.github.io/watchman/
diff --git a/docs/source/mypy_light.svg b/docs/source/mypy_light.svg
new file mode 100644
index 000000000000..4eaf65dbf344
--- /dev/null
+++ b/docs/source/mypy_light.svg
@@ -0,0 +1,99 @@
+
+
diff --git a/docs/source/protocols.rst b/docs/source/protocols.rst
index cd59f841d8a0..cb51809a66d5 100644
--- a/docs/source/protocols.rst
+++ b/docs/source/protocols.rst
@@ -4,14 +4,17 @@ Protocols and structural subtyping
==================================
Mypy supports two ways of deciding whether two classes are compatible
-as types: nominal subtyping and structural subtyping. *Nominal*
-subtyping is strictly based on the class hierarchy. If class ``D``
+as types: nominal subtyping and structural subtyping.
+
+*Nominal* subtyping is strictly based on the class hierarchy. If class ``D``
inherits class ``C``, it's also a subtype of ``C``, and instances of
``D`` can be used when ``C`` instances are expected. This form of
subtyping is used by default in mypy, since it's easy to understand
and produces clear and concise error messages, and since it matches
how the native :py:func:`isinstance ` check works -- based on class
-hierarchy. *Structural* subtyping can also be useful. Class ``D`` is
+hierarchy.
+
+*Structural* subtyping is based on the operations that can be performed with an object. Class ``D`` is
a structural subtype of class ``C`` if the former has all attributes
and methods of the latter, and with compatible types.
@@ -55,11 +58,292 @@ For example, ``IntList`` below is iterable, over ``int`` values:
print_numbered(x) # OK
print_numbered([4, 5]) # Also OK
-The subsections below introduce all built-in protocols defined in
+:ref:`predefined_protocols_reference` lists all protocols defined in
:py:mod:`typing` and the signatures of the corresponding methods you need to define
to implement each protocol (the signatures can be left out, as always, but mypy
won't type check unannotated methods).
+Simple user-defined protocols
+*****************************
+
+You can define your own protocol class by inheriting the special ``Protocol``
+class:
+
+.. code-block:: python
+
+ from typing import Iterable
+ from typing_extensions import Protocol
+
+ class SupportsClose(Protocol):
+ # Empty method body (explicit '...')
+ def close(self) -> None: ...
+
+ class Resource: # No SupportsClose base class!
+
+ def close(self) -> None:
+ self.resource.release()
+
+ # ... other methods ...
+
+ def close_all(items: Iterable[SupportsClose]) -> None:
+ for item in items:
+ item.close()
+
+ close_all([Resource(), open('some/file')]) # Okay!
+
+``Resource`` is a subtype of the ``SupportsClose`` protocol since it defines
+a compatible ``close`` method. Regular file objects returned by :py:func:`open` are
+similarly compatible with the protocol, as they support ``close()``.
+
+.. note::
+
+ The ``Protocol`` base class is provided in the ``typing_extensions``
+ package for Python 3.4-3.7. Starting with Python 3.8, ``Protocol``
+ is included in the ``typing`` module.
+
+Defining subprotocols and subclassing protocols
+***********************************************
+
+You can also define subprotocols. Existing protocols can be extended
+and merged using multiple inheritance. Example:
+
+.. code-block:: python
+
+ # ... continuing from the previous example
+
+ class SupportsRead(Protocol):
+ def read(self, amount: int) -> bytes: ...
+
+ class TaggedReadableResource(SupportsClose, SupportsRead, Protocol):
+ label: str
+
+ class AdvancedResource(Resource):
+ def __init__(self, label: str) -> None:
+ self.label = label
+
+ def read(self, amount: int) -> bytes:
+ # some implementation
+ ...
+
+ resource: TaggedReadableResource
+ resource = AdvancedResource('handle with care') # OK
+
+Note that inheriting from an existing protocol does not automatically
+turn the subclass into a protocol -- it just creates a regular
+(non-protocol) class or ABC that implements the given protocol (or
+protocols). The ``Protocol`` base class must always be explicitly
+present if you are defining a protocol:
+
+.. code-block:: python
+
+ class NotAProtocol(SupportsClose): # This is NOT a protocol
+ new_attr: int
+
+ class Concrete:
+ new_attr: int = 0
+
+ def close(self) -> None:
+ ...
+
+ # Error: nominal subtyping used by default
+ x: NotAProtocol = Concrete() # Error!
+
+You can also include default implementations of methods in
+protocols. If you explicitly subclass these protocols you can inherit
+these default implementations.
+
+Explicitly including a protocol as a
+base class is also a way of documenting that your class implements a
+particular protocol, and it forces mypy to verify that your class
+implementation is actually compatible with the protocol. In particular,
+omitting a value for an attribute or a method body will make it implicitly
+abstract:
+
+.. code-block:: python
+
+ class SomeProto(Protocol):
+ attr: int # Note, no right hand side
+ def method(self) -> str: ... # Literally just ... here
+
+ class ExplicitSubclass(SomeProto):
+ pass
+
+ ExplicitSubclass() # error: Cannot instantiate abstract class 'ExplicitSubclass'
+ # with abstract attributes 'attr' and 'method'
+
+Invariance of protocol attributes
+*********************************
+
+A common issue with protocols is that protocol attributes are invariant.
+For example:
+
+.. code-block:: python
+
+ class Box(Protocol):
+ content: object
+
+ class IntBox:
+ content: int
+
+ def takes_box(box: Box) -> None: ...
+
+ takes_box(IntBox()) # error: Argument 1 to "takes_box" has incompatible type "IntBox"; expected "Box"
+ # note: Following member(s) of "IntBox" have conflicts:
+ # note: content: expected "object", got "int"
+
+This is because ``Box`` defines ``content`` as a mutable attribute.
+Here's why this is problematic:
+
+.. code-block:: python
+
+ def takes_box_evil(box: Box) -> None:
+ box.content = "asdf" # This is bad, since box.content is supposed to be an object
+
+ my_int_box = IntBox()
+ takes_box_evil(my_int_box)
+ my_int_box.content + 1 # Oops, TypeError!
+
+This can be fixed by declaring ``content`` to be read-only in the ``Box``
+protocol using ``@property``:
+
+.. code-block:: python
+
+ class Box(Protocol):
+ @property
+ def content(self) -> object: ...
+
+ class IntBox:
+ content: int
+
+ def takes_box(box: Box) -> None: ...
+
+ takes_box(IntBox(42)) # OK
+
+Recursive protocols
+*******************
+
+Protocols can be recursive (self-referential) and mutually
+recursive. This is useful for declaring abstract recursive collections
+such as trees and linked lists:
+
+.. code-block:: python
+
+ from typing import TypeVar, Optional
+ from typing_extensions import Protocol
+
+ class TreeLike(Protocol):
+ value: int
+
+ @property
+ def left(self) -> Optional['TreeLike']: ...
+
+ @property
+ def right(self) -> Optional['TreeLike']: ...
+
+ class SimpleTree:
+ def __init__(self, value: int) -> None:
+ self.value = value
+ self.left: Optional['SimpleTree'] = None
+ self.right: Optional['SimpleTree'] = None
+
+ root: TreeLike = SimpleTree(0) # OK
+
+Using isinstance() with protocols
+*********************************
+
+You can use a protocol class with :py:func:`isinstance` if you decorate it
+with the ``@runtime_checkable`` class decorator. The decorator adds
+rudimentary support for runtime structural checks:
+
+.. code-block:: python
+
+ from typing_extensions import Protocol, runtime_checkable
+
+ @runtime_checkable
+ class Portable(Protocol):
+ handles: int
+
+ class Mug:
+ def __init__(self) -> None:
+ self.handles = 1
+
+ def use(handles: int) -> None: ...
+
+ mug = Mug()
+ if isinstance(mug, Portable): # Works at runtime!
+ use(mug.handles)
+
+:py:func:`isinstance` also works with the :ref:`predefined protocols `
+in :py:mod:`typing` such as :py:class:`~typing.Iterable`.
+
+.. warning::
+ :py:func:`isinstance` with protocols is not completely safe at runtime.
+ For example, signatures of methods are not checked. The runtime
+ implementation only checks that all protocol members exist,
+ not that they have the correct type. :py:func:`issubclass` with protocols
+ will only check for the existence of methods.
+
+.. note::
+ :py:func:`isinstance` with protocols can also be surprisingly slow.
+ In many cases, you're better served by using :py:func:`hasattr` to
+ check for the presence of attributes.
+
+.. _callback_protocols:
+
+Callback protocols
+******************
+
+Protocols can be used to define flexible callback types that are hard
+(or even impossible) to express using the :py:data:`Callable[...] ` syntax, such as variadic,
+overloaded, and complex generic callbacks. They are defined with a special :py:meth:`__call__ `
+member:
+
+.. code-block:: python
+
+ from typing import Optional, Iterable
+ from typing_extensions import Protocol
+
+ class Combiner(Protocol):
+ def __call__(self, *vals: bytes, maxlen: Optional[int] = None) -> list[bytes]: ...
+
+ def batch_proc(data: Iterable[bytes], cb_results: Combiner) -> bytes:
+ for item in data:
+ ...
+
+ def good_cb(*vals: bytes, maxlen: Optional[int] = None) -> list[bytes]:
+ ...
+ def bad_cb(*vals: bytes, maxitems: Optional[int]) -> list[bytes]:
+ ...
+
+ batch_proc([], good_cb) # OK
+ batch_proc([], bad_cb) # Error! Argument 2 has incompatible type because of
+ # different name and kind in the callback
+
+Callback protocols and :py:data:`~typing.Callable` types can be used interchangeably.
+Argument names in :py:meth:`__call__ ` methods must be identical, unless
+a double underscore prefix is used. For example:
+
+.. code-block:: python
+
+ from typing import Callable, TypeVar
+ from typing_extensions import Protocol
+
+ T = TypeVar('T')
+
+ class Copy(Protocol):
+ def __call__(self, __origin: T) -> T: ...
+
+ copy_a: Callable[[T], T]
+ copy_b: Copy
+
+ copy_a = copy_b # OK
+ copy_b = copy_a # Also OK
+
+.. _predefined_protocols_reference:
+
+Predefined protocol reference
+*****************************
+
Iteration protocols
...................
@@ -283,213 +567,3 @@ AsyncContextManager[T]
traceback: Optional[TracebackType]) -> Awaitable[Optional[bool]]
See also :py:class:`~typing.AsyncContextManager`.
-
-Simple user-defined protocols
-*****************************
-
-You can define your own protocol class by inheriting the special ``Protocol``
-class:
-
-.. code-block:: python
-
- from typing import Iterable
- from typing_extensions import Protocol
-
- class SupportsClose(Protocol):
- def close(self) -> None:
- ... # Empty method body (explicit '...')
-
- class Resource: # No SupportsClose base class!
- # ... some methods ...
-
- def close(self) -> None:
- self.resource.release()
-
- def close_all(items: Iterable[SupportsClose]) -> None:
- for item in items:
- item.close()
-
- close_all([Resource(), open('some/file')]) # Okay!
-
-``Resource`` is a subtype of the ``SupportsClose`` protocol since it defines
-a compatible ``close`` method. Regular file objects returned by :py:func:`open` are
-similarly compatible with the protocol, as they support ``close()``.
-
-.. note::
-
- The ``Protocol`` base class is provided in the ``typing_extensions``
- package for Python 2.7 and 3.4-3.7. Starting with Python 3.8, ``Protocol``
- is included in the ``typing`` module.
-
-Defining subprotocols and subclassing protocols
-***********************************************
-
-You can also define subprotocols. Existing protocols can be extended
-and merged using multiple inheritance. Example:
-
-.. code-block:: python
-
- # ... continuing from the previous example
-
- class SupportsRead(Protocol):
- def read(self, amount: int) -> bytes: ...
-
- class TaggedReadableResource(SupportsClose, SupportsRead, Protocol):
- label: str
-
- class AdvancedResource(Resource):
- def __init__(self, label: str) -> None:
- self.label = label
-
- def read(self, amount: int) -> bytes:
- # some implementation
- ...
-
- resource: TaggedReadableResource
- resource = AdvancedResource('handle with care') # OK
-
-Note that inheriting from an existing protocol does not automatically
-turn the subclass into a protocol -- it just creates a regular
-(non-protocol) class or ABC that implements the given protocol (or
-protocols). The ``Protocol`` base class must always be explicitly
-present if you are defining a protocol:
-
-.. code-block:: python
-
- class NotAProtocol(SupportsClose): # This is NOT a protocol
- new_attr: int
-
- class Concrete:
- new_attr: int = 0
-
- def close(self) -> None:
- ...
-
- # Error: nominal subtyping used by default
- x: NotAProtocol = Concrete() # Error!
-
-You can also include default implementations of methods in
-protocols. If you explicitly subclass these protocols you can inherit
-these default implementations. Explicitly including a protocol as a
-base class is also a way of documenting that your class implements a
-particular protocol, and it forces mypy to verify that your class
-implementation is actually compatible with the protocol.
-
-.. note::
-
- You can use Python 3.6 variable annotations (:pep:`526`)
- to declare protocol attributes. On Python 2.7 and earlier Python 3
- versions you can use type comments and properties.
-
-Recursive protocols
-*******************
-
-Protocols can be recursive (self-referential) and mutually
-recursive. This is useful for declaring abstract recursive collections
-such as trees and linked lists:
-
-.. code-block:: python
-
- from typing import TypeVar, Optional
- from typing_extensions import Protocol
-
- class TreeLike(Protocol):
- value: int
-
- @property
- def left(self) -> Optional['TreeLike']: ...
-
- @property
- def right(self) -> Optional['TreeLike']: ...
-
- class SimpleTree:
- def __init__(self, value: int) -> None:
- self.value = value
- self.left: Optional['SimpleTree'] = None
- self.right: Optional['SimpleTree'] = None
-
- root: TreeLike = SimpleTree(0) # OK
-
-Using isinstance() with protocols
-*********************************
-
-You can use a protocol class with :py:func:`isinstance` if you decorate it
-with the ``@runtime_checkable`` class decorator. The decorator adds
-support for basic runtime structural checks:
-
-.. code-block:: python
-
- from typing_extensions import Protocol, runtime_checkable
-
- @runtime_checkable
- class Portable(Protocol):
- handles: int
-
- class Mug:
- def __init__(self) -> None:
- self.handles = 1
-
- def use(handles: int) -> None: ...
-
- mug = Mug()
- if isinstance(mug, Portable):
- use(mug.handles) # Works statically and at runtime
-
-:py:func:`isinstance` also works with the :ref:`predefined protocols `
-in :py:mod:`typing` such as :py:class:`~typing.Iterable`.
-
-.. note::
- :py:func:`isinstance` with protocols is not completely safe at runtime.
- For example, signatures of methods are not checked. The runtime
- implementation only checks that all protocol members are defined.
-
-.. _callback_protocols:
-
-Callback protocols
-******************
-
-Protocols can be used to define flexible callback types that are hard
-(or even impossible) to express using the :py:data:`Callable[...] ` syntax, such as variadic,
-overloaded, and complex generic callbacks. They are defined with a special :py:meth:`__call__ `
-member:
-
-.. code-block:: python
-
- from typing import Optional, Iterable
- from typing_extensions import Protocol
-
- class Combiner(Protocol):
- def __call__(self, *vals: bytes, maxlen: Optional[int] = None) -> list[bytes]: ...
-
- def batch_proc(data: Iterable[bytes], cb_results: Combiner) -> bytes:
- for item in data:
- ...
-
- def good_cb(*vals: bytes, maxlen: Optional[int] = None) -> list[bytes]:
- ...
- def bad_cb(*vals: bytes, maxitems: Optional[int]) -> list[bytes]:
- ...
-
- batch_proc([], good_cb) # OK
- batch_proc([], bad_cb) # Error! Argument 2 has incompatible type because of
- # different name and kind in the callback
-
-Callback protocols and :py:data:`~typing.Callable` types can be used interchangeably.
-Keyword argument names in :py:meth:`__call__ ` methods must be identical, unless
-a double underscore prefix is used. For example:
-
-.. code-block:: python
-
- from typing import Callable, TypeVar
- from typing_extensions import Protocol
-
- T = TypeVar('T')
-
- class Copy(Protocol):
- def __call__(self, __origin: T) -> T: ...
-
- copy_a: Callable[[T], T]
- copy_b: Copy
-
- copy_a = copy_b # OK
- copy_b = copy_a # Also OK
diff --git a/docs/source/python2.rst b/docs/source/python2.rst
deleted file mode 100644
index 67ea4f80d760..000000000000
--- a/docs/source/python2.rst
+++ /dev/null
@@ -1,134 +0,0 @@
-.. _python2:
-
-Type checking Python 2 code
-===========================
-
-For code that needs to be Python 2.7 compatible, function type
-annotations are given in comments, since the function annotation
-syntax was introduced in Python 3. The comment-based syntax is
-specified in :pep:`484`.
-
-Mypy requires typed-ast in order to check Python 2 code. You can install it
-using ``pip install 'mypy[python2]'``.
-
-Run mypy in Python 2 mode by using the :option:`--py2 ` option::
-
- $ mypy --py2 program.py
-
-To run your program, you must have the ``typing`` module in your
-Python 2 module search path. Use ``pip install typing`` to install the
-module. This also works for Python 3 versions prior to 3.5 that don't
-include :py:mod:`typing` in the standard library.
-
-The example below illustrates the Python 2 function type annotation
-syntax. This syntax is also valid in Python 3 mode:
-
-.. code-block:: python
-
- from typing import List
-
- def hello(): # type: () -> None
- print 'hello'
-
- class Example:
- def method(self, lst, opt=0, *args, **kwargs):
- # type: (List[str], int, *str, **bool) -> int
- """Docstring comes after type comment."""
- ...
-
-It's worth going through these details carefully to avoid surprises:
-
-- You don't provide an annotation for the ``self`` / ``cls`` variable of
- methods.
-
-- Docstring always comes *after* the type comment.
-
-- For ``*args`` and ``**kwargs`` the type should be prefixed with
- ``*`` or ``**``, respectively (except when using the multi-line
- annotation syntax described below). Again, the above example
- illustrates this.
-
-- Things like ``Any`` must be imported from ``typing``, even if they
- are only used in comments.
-
-- In Python 2 mode ``str`` is implicitly promoted to ``unicode``, similar
- to how ``int`` is compatible with ``float``. This is unlike ``bytes`` and
- ``str`` in Python 3, which are incompatible. ``bytes`` in Python 2 is
- equivalent to ``str``. (This might change in the future.)
-
-.. _multi_line_annotation:
-
-Multi-line Python 2 function annotations
-----------------------------------------
-
-Mypy also supports a multi-line comment annotation syntax. You
-can provide a separate annotation for each argument using the variable
-annotation syntax. When using the single-line annotation syntax
-described above, functions with long argument lists tend to result in
-overly long type comments and it's often tricky to see which argument
-type corresponds to which argument. The alternative, multi-line
-annotation syntax makes long annotations easier to read and write.
-
-Here is an example (from :pep:`484`):
-
-.. code-block:: python
-
- def send_email(address, # type: Union[str, List[str]]
- sender, # type: str
- cc, # type: Optional[List[str]]
- bcc, # type: Optional[List[str]]
- subject='',
- body=None # type: List[str]
- ):
- # type: (...) -> bool
- """Send an email message. Return True if successful."""
-
-
-You write a separate annotation for each function argument on the same
-line as the argument. Each annotation must be on a separate line. If
-you leave out an annotation for an argument, it defaults to
-``Any``. You provide a return type annotation in the body of the
-function using the form ``# type: (...) -> rt``, where ``rt`` is the
-return type. Note that the return type annotation contains literal
-three dots.
-
-When using multi-line comments, you do not need to prefix the
-types of your ``*arg`` and ``**kwarg`` parameters with ``*`` or ``**``.
-For example, here is how you would annotate the first example using
-multi-line comments:
-
-.. code-block:: python
-
- from typing import List
-
- class Example:
- def method(self,
- lst, # type: List[str]
- opt=0, # type: int
- *args, # type: str
- **kwargs # type: bool
- ):
- # type: (...) -> int
- """Docstring comes after type comment."""
- ...
-
-
-Additional notes
-----------------
-
-- You should include types for arguments with default values in the
- annotation. The ``opt`` argument of ``method`` in the example at the
- beginning of this section is an example of this.
-
-- The annotation can be on the same line as the function header or on
- the following line.
-
-- Variables use a comment-based type syntax (explained in
- :ref:`explicit-var-types`).
-
-- You don't need to use string literal escapes for forward references
- within comments (string literal escapes are explained later).
-
-- Mypy uses a separate set of library stub files in `typeshed
- `_ for Python 2. Library support
- may vary between Python 2 and Python 3.
diff --git a/docs/source/running_mypy.rst b/docs/source/running_mypy.rst
index 070e4556c04e..b0cefec9dafa 100644
--- a/docs/source/running_mypy.rst
+++ b/docs/source/running_mypy.rst
@@ -26,10 +26,6 @@ Specifying code to be checked
Mypy lets you specify what files it should type check in several different ways.
-Note that if you use namespace packages (in particular, packages without
-``__init__.py``), you'll need to specify :option:`--namespace-packages `.
-
1. First, you can pass in paths to Python files and directories you
want to type check. For example::
@@ -83,6 +79,9 @@ Note that if you use namespace packages (in particular, packages without
...will type check the above string as a mini-program (and in this case,
will report that ``list[int]`` is not callable).
+You can also use the :confval:`files` option in your :file:`mypy.ini` file to specify which
+files to check, in which case you can simply run ``mypy`` with no arguments.
+
Reading a list of files from a file
***********************************
@@ -104,6 +103,82 @@ flags, the recommended approach is to use a
:ref:`configuration file ` instead.
+.. _mapping-paths-to-modules:
+
+Mapping file paths to modules
+*****************************
+
+One of the main ways you can tell mypy what to type check
+is by providing mypy a list of paths. For example::
+
+ $ mypy file_1.py foo/file_2.py file_3.pyi some/directory
+
+This section describes how exactly mypy maps the provided paths
+to modules to type check.
+
+- Mypy will check all paths provided that correspond to files.
+
+- Mypy will recursively discover and check all files ending in ``.py`` or
+ ``.pyi`` in directory paths provided, after accounting for
+ :option:`--exclude `.
+
+- For each file to be checked, mypy will attempt to associate the file (e.g.
+ ``project/foo/bar/baz.py``) with a fully qualified module name (e.g.
+ ``foo.bar.baz``). The directory the package is in (``project``) is then
+ added to mypy's module search paths.
+
+How mypy determines fully qualified module names depends on if the options
+:option:`--no-namespace-packages ` and
+:option:`--explicit-package-bases ` are set.
+
+1. If :option:`--no-namespace-packages ` is set,
+ mypy will rely solely upon the presence of ``__init__.py[i]`` files to
+ determine the fully qualified module name. That is, mypy will crawl up the
+ directory tree for as long as it continues to find ``__init__.py`` (or
+ ``__init__.pyi``) files.
+
+ For example, if your directory tree consists of ``pkg/subpkg/mod.py``, mypy
+ would require ``pkg/__init__.py`` and ``pkg/subpkg/__init__.py`` to exist in
+ order correctly associate ``mod.py`` with ``pkg.subpkg.mod``
+
+2. The default case. If :option:`--namespace-packages ` is on, but :option:`--explicit-package-bases ` is off, mypy will allow for the possibility that
+ directories without ``__init__.py[i]`` are packages. Specifically, mypy will
+ look at all parent directories of the file and use the location of the
+ highest ``__init__.py[i]`` in the directory tree to determine the top-level
+ package.
+
+ For example, say your directory tree consists solely of ``pkg/__init__.py``
+ and ``pkg/a/b/c/d/mod.py``. When determining ``mod.py``'s fully qualified
+ module name, mypy will look at ``pkg/__init__.py`` and conclude that the
+ associated module name is ``pkg.a.b.c.d.mod``.
+
+3. You'll notice that the above case still relies on ``__init__.py``. If
+ you can't put an ``__init__.py`` in your top-level package, but still wish to
+ pass paths (as opposed to packages or modules using the ``-p`` or ``-m``
+ flags), :option:`--explicit-package-bases `
+ provides a solution.
+
+ With :option:`--explicit-package-bases `, mypy
+ will locate the nearest parent directory that is a member of the ``MYPYPATH``
+ environment variable, the :confval:`mypy_path` config or is the current
+ working directory. Mypy will then use the relative path to determine the
+ fully qualified module name.
+
+ For example, say your directory tree consists solely of
+ ``src/namespace_pkg/mod.py``. If you run the following command, mypy
+ will correctly associate ``mod.py`` with ``namespace_pkg.mod``::
+
+ $ MYPYPATH=src mypy --namespace-packages --explicit-package-bases .
+
+If you pass a file not ending in ``.py[i]``, the module name assumed is
+``__main__`` (matching the behavior of the Python interpreter), unless
+:option:`--scripts-are-modules ` is passed.
+
+Passing :option:`-v ` will show you the files and associated module
+names that mypy will check.
+
How mypy handles imports
************************
@@ -138,7 +213,7 @@ the import. This can cause errors that look like the following:
.. code-block:: text
main.py:1: error: Skipping analyzing 'django': module is installed, but missing library stubs or py.typed marker
- main.py:2: error: Library stubs not installed for "requests" (or incompatible with Python 3.8)
+ main.py:2: error: Library stubs not installed for "requests"
main.py:3: error: Cannot find implementation or library stub for module named "this_module_does_not_exist"
If you get any of these errors on an import, mypy will assume the type of that
@@ -153,6 +228,11 @@ attribute of the module will automatically succeed:
# But this type checks, and x will have type 'Any'
x = does_not_exist.foobar()
+This can result in mypy failing to warn you about errors in your code. Since
+operations on ``Any`` result in ``Any``, these dynamic types can propagate
+through your code, making type checking less effective. See
+:ref:`dynamic-typing` for more information.
+
The next sections describe what each of these errors means and recommended next steps; scroll to
the section that matches your error.
@@ -170,12 +250,12 @@ unless they either have declared themselves to be
themselves on `typeshed `_, the repository
of types for the standard library and some 3rd party libraries.
-If you are getting this error, try:
+If you are getting this error, try to obtain type hints for the library you're using:
1. Upgrading the version of the library you're using, in case a newer version
has started to include type hints.
-2. Searching to see if there is a :ref:`PEP 561 compliant stub package `.
+2. Searching to see if there is a :ref:`PEP 561 compliant stub package `
corresponding to your third party library. Stub packages let you install
type hints independently from the library itself.
@@ -189,7 +269,7 @@ If you are getting this error, try:
adding the location to the ``MYPYPATH`` environment variable.
These stub files do not need to be complete! A good strategy is to use
- stubgen, a program that comes bundled with mypy, to generate a first
+ :ref:`stubgen `, a program that comes bundled with mypy, to generate a first
rough draft of the stubs. You can then iterate on just the parts of the
library you need.
@@ -198,16 +278,19 @@ If you are getting this error, try:
:ref:`PEP 561 compliant packages `.
If you are unable to find any existing type hints nor have time to write your
-own, you can instead *suppress* the errors. All this will do is make mypy stop
-reporting an error on the line containing the import: the imported module
-will continue to be of type ``Any``.
+own, you can instead *suppress* the errors.
+
+All this will do is make mypy stop reporting an error on the line containing the
+import: the imported module will continue to be of type ``Any``, and mypy may
+not catch errors in its use.
1. To suppress a *single* missing import error, add a ``# type: ignore`` at the end of the
line containing the import.
-2. To suppress *all* missing import imports errors from a single library, add
- a section to your :ref:`mypy config file ` for that library setting
- :confval:`ignore_missing_imports` to True. For example, suppose your codebase
+2. To suppress *all* missing import errors from a single library, add
+ a per-module section to your :ref:`mypy config file ` setting
+ :confval:`ignore_missing_imports` to True for that library. For example,
+ suppose your codebase
makes heavy use of an (untyped) library named ``foobar``. You can silence
all import errors associated with that library and that library alone by
adding the following section to your config file::
@@ -243,38 +326,39 @@ the library, you will get a message like this:
.. code-block:: text
- main.py:1: error: Library stubs not installed for "yaml" (or incompatible with Python 3.8)
+ main.py:1: error: Library stubs not installed for "yaml"
main.py:1: note: Hint: "python3 -m pip install types-PyYAML"
main.py:1: note: (or run "mypy --install-types" to install all missing stub packages)
-You can resolve the issue by running the suggested pip command or
-commands. Alternatively, you can use :option:`--install-types ` to install all known missing stubs:
+You can resolve the issue by running the suggested pip commands.
+If you're running mypy in CI, you can ensure the presence of any stub packages
+you need the same as you would any other test dependency, e.g. by adding them to
+the appropriate ``requirements.txt`` file.
+
+Alternatively, add the :option:`--install-types `
+to your mypy command to install all known missing stubs:
.. code-block:: text
mypy --install-types
-This installs any stub packages that were suggested in the previous
-mypy run. You can also use your normal mypy command line with the
-extra :option:`--install-types ` option to
-install missing stubs at the end of the run (if any were found).
-
-Use :option:`--install-types ` with
-:option:`--non-interactive ` to install all suggested
-stub packages without asking for confirmation, *and* type check your
-code, in a single command:
+This is slower than explicitly installing stubs, since it effectively
+runs mypy twice -- the first time to find the missing stubs, and
+the second time to type check your code properly after mypy has
+installed the stubs. It also can make controlling stub versions harder,
+resulting in less reproducible type checking.
-.. code-block:: text
+By default, :option:`--install-types ` shows a confirmation prompt.
+Use :option:`--non-interactive ` to install all suggested
+stub packages without asking for confirmation *and* type check your code:
- mypy --install-types --non-interactive src/
+If you've already installed the relevant third-party libraries in an environment
+other than the one mypy is running in, you can use :option:`--python-executable
+` flag to point to the Python executable for that
+environment, and mypy will find packages installed for that Python executable.
-This can be useful in Continuous Integration jobs if you'd prefer not
-to manage stub packages manually. This is somewhat slower than
-explicitly installing stubs before running mypy, since it may type
-check your code twice -- the first time to find the missing stubs, and
-the second time to type check your code properly after mypy has
-installed the stubs.
+If you've installed the relevant stub packages and are still getting this error,
+see the :ref:`section below `.
.. _missing-type-hints-for-third-party-library:
@@ -298,6 +382,11 @@ this error, try:
line flag to point the Python interpreter containing your installed
third party packages.
+ You can confirm that you are running mypy from the environment you expect
+ by running it like ``python -m mypy ...``. You can confirm that you are
+ installing into the environment you expect by running pip like
+ ``python -m pip ...``.
+
2. Reading the :ref:`finding-imports` section below to make sure you
understand how exactly mypy searches for and finds modules and modify
how you're invoking mypy accordingly.
@@ -314,18 +403,64 @@ this error, try:
you must run ``mypy ~/foo-project/src`` (or set the ``MYPYPATH`` to
``~/foo-project/src``.
-4. If you are using namespace packages -- packages which do not contain
- ``__init__.py`` files within each subfolder -- using the
- :option:`--namespace-packages ` command
- line flag.
+.. _finding-imports:
+
+How imports are found
+*********************
+
+When mypy encounters an ``import`` statement or receives module
+names from the command line via the :option:`--module ` or :option:`--package `
+flags, mypy tries to find the module on the file system similar
+to the way Python finds it. However, there are some differences.
+
+First, mypy has its own search path.
+This is computed from the following items:
-In some rare cases, you may get the "Cannot find implementation or library
-stub for module" error even when the module is installed in your system.
-This can happen when the module is both missing type hints and is installed
-on your system in a unconventional way.
+- The ``MYPYPATH`` environment variable
+ (a list of directories, colon-separated on UNIX systems, semicolon-separated on Windows).
+- The :confval:`mypy_path` config file option.
+- The directories containing the sources given on the command line
+ (see :ref:`Mapping file paths to modules `).
+- The installed packages marked as safe for type checking (see
+ :ref:`PEP 561 support `)
+- The relevant directories of the
+ `typeshed `_ repo.
-In this case, follow the steps above on how to handle
-:ref:`missing type hints in third party libraries `.
+.. note::
+
+ You cannot point to a stub-only package (:pep:`561`) via the ``MYPYPATH``, it must be
+ installed (see :ref:`PEP 561 support `)
+
+Second, mypy searches for stub files in addition to regular Python files
+and packages.
+The rules for searching for a module ``foo`` are as follows:
+
+- The search looks in each of the directories in the search path
+ (see above) until a match is found.
+- If a package named ``foo`` is found (i.e. a directory
+ ``foo`` containing an ``__init__.py`` or ``__init__.pyi`` file)
+ that's a match.
+- If a stub file named ``foo.pyi`` is found, that's a match.
+- If a Python module named ``foo.py`` is found, that's a match.
+
+These matches are tried in order, so that if multiple matches are found
+in the same directory on the search path
+(e.g. a package and a Python file, or a stub file and a Python file)
+the first one in the above list wins.
+
+In particular, if a Python file and a stub file are both present in the
+same directory on the search path, only the stub file is used.
+(However, if the files are in different directories, the one found
+in the earlier directory is used.)
+
+Setting :confval:`mypy_path`/``MYPYPATH`` is mostly useful in the case
+where you want to try running mypy against multiple distinct
+sets of files that happen to share some common dependencies.
+
+For example, if you have multiple projects that happen to be
+using the same set of work-in-progress stubs, it could be
+convenient to just have your ``MYPYPATH`` point to a single
+directory containing the stubs.
.. _follow-imports:
@@ -388,165 +523,3 @@ hard-to-debug errors.
Adjusting import following behaviour is often most useful when restricted to
specific modules. This can be accomplished by setting a per-module
:confval:`follow_imports` config option.
-
-
-.. _mapping-paths-to-modules:
-
-Mapping file paths to modules
-*****************************
-
-One of the main ways you can tell mypy what to type check
-is by providing mypy a list of paths. For example::
-
- $ mypy file_1.py foo/file_2.py file_3.pyi some/directory
-
-This section describes how exactly mypy maps the provided paths
-to modules to type check.
-
-- Mypy will check all paths provided that correspond to files.
-
-- Mypy will recursively discover and check all files ending in ``.py`` or
- ``.pyi`` in directory paths provided, after accounting for
- :option:`--exclude `.
-
-- For each file to be checked, mypy will attempt to associate the file (e.g.
- ``project/foo/bar/baz.py``) with a fully qualified module name (e.g.
- ``foo.bar.baz``). The directory the package is in (``project``) is then
- added to mypy's module search paths.
-
-How mypy determines fully qualified module names depends on if the options
-:option:`--namespace-packages ` and
-:option:`--explicit-package-bases ` are set.
-
-1. If :option:`--namespace-packages ` is off,
- mypy will rely solely upon the presence of ``__init__.py[i]`` files to
- determine the fully qualified module name. That is, mypy will crawl up the
- directory tree for as long as it continues to find ``__init__.py`` (or
- ``__init__.pyi``) files.
-
- For example, if your directory tree consists of ``pkg/subpkg/mod.py``, mypy
- would require ``pkg/__init__.py`` and ``pkg/subpkg/__init__.py`` to exist in
- order correctly associate ``mod.py`` with ``pkg.subpkg.mod``
-
-2. If :option:`--namespace-packages ` is on, but
- :option:`--explicit-package-bases ` is off,
- mypy will allow for the possibility that directories without
- ``__init__.py[i]`` are packages. Specifically, mypy will look at all parent
- directories of the file and use the location of the highest
- ``__init__.py[i]`` in the directory tree to determine the top-level package.
-
- For example, say your directory tree consists solely of ``pkg/__init__.py``
- and ``pkg/a/b/c/d/mod.py``. When determining ``mod.py``'s fully qualified
- module name, mypy will look at ``pkg/__init__.py`` and conclude that the
- associated module name is ``pkg.a.b.c.d.mod``.
-
-3. You'll notice that the above case still relies on ``__init__.py``. If
- you can't put an ``__init__.py`` in your top-level package, but still wish to
- pass paths (as opposed to packages or modules using the ``-p`` or ``-m``
- flags), :option:`--explicit-package-bases `
- provides a solution.
-
- With :option:`--explicit-package-bases `, mypy
- will locate the nearest parent directory that is a member of the ``MYPYPATH``
- environment variable, the :confval:`mypy_path` config or is the current
- working directory. Mypy will then use the relative path to determine the
- fully qualified module name.
-
- For example, say your directory tree consists solely of
- ``src/namespace_pkg/mod.py``. If you run the following command, mypy
- will correctly associate ``mod.py`` with ``namespace_pkg.mod``::
-
- $ MYPYPATH=src mypy --namespace-packages --explicit-package-bases .
-
-If you pass a file not ending in ``.py[i]``, the module name assumed is
-``__main__`` (matching the behavior of the Python interpreter), unless
-:option:`--scripts-are-modules ` is passed.
-
-Passing :option:`-v ` will show you the files and associated module
-names that mypy will check.
-
-
-.. _finding-imports:
-
-How imports are found
-*********************
-
-When mypy encounters an ``import`` statement or receives module
-names from the command line via the :option:`--module ` or :option:`--package `
-flags, mypy tries to find the module on the file system similar
-to the way Python finds it. However, there are some differences.
-
-First, mypy has its own search path.
-This is computed from the following items:
-
-- The ``MYPYPATH`` environment variable
- (a colon-separated list of directories).
-- The :confval:`mypy_path` config file option.
-- The directories containing the sources given on the command line
- (see :ref:`Mapping file paths to modules `).
-- The installed packages marked as safe for type checking (see
- :ref:`PEP 561 support `)
-- The relevant directories of the
- `typeshed `_ repo.
-
-.. note::
-
- You cannot point to a stub-only package (:pep:`561`) via the ``MYPYPATH``, it must be
- installed (see :ref:`PEP 561 support `)
-
-Second, mypy searches for stub files in addition to regular Python files
-and packages.
-The rules for searching for a module ``foo`` are as follows:
-
-- The search looks in each of the directories in the search path
- (see above) until a match is found.
-- If a package named ``foo`` is found (i.e. a directory
- ``foo`` containing an ``__init__.py`` or ``__init__.pyi`` file)
- that's a match.
-- If a stub file named ``foo.pyi`` is found, that's a match.
-- If a Python module named ``foo.py`` is found, that's a match.
-
-These matches are tried in order, so that if multiple matches are found
-in the same directory on the search path
-(e.g. a package and a Python file, or a stub file and a Python file)
-the first one in the above list wins.
-
-In particular, if a Python file and a stub file are both present in the
-same directory on the search path, only the stub file is used.
-(However, if the files are in different directories, the one found
-in the earlier directory is used.)
-
-Other advice and best practices
-*******************************
-
-There are multiple ways of telling mypy what files to type check, ranging
-from passing in command line arguments to using the :confval:`files` or :confval:`mypy_path`
-config file options to setting the
-``MYPYPATH`` environment variable.
-
-However, in practice, it is usually sufficient to just use either
-command line arguments or the :confval:`files` config file option (the two
-are largely interchangeable).
-
-Setting :confval:`mypy_path`/``MYPYPATH`` is mostly useful in the case
-where you want to try running mypy against multiple distinct
-sets of files that happen to share some common dependencies.
-
-For example, if you have multiple projects that happen to be
-using the same set of work-in-progress stubs, it could be
-convenient to just have your ``MYPYPATH`` point to a single
-directory containing the stubs.
-
-Directories specific to Python 2 (@python2)
-*******************************************
-
-When type checking in Python 2 mode, mypy also looks for files under
-the ``@python2`` subdirectory of each ``MYPYPATH`` and ``mypy_path``
-entry, if the subdirectory exists. Files under the subdirectory take
-precedence over the parent directory. This can be used to provide
-separate Python 2 versions of stubs.
-
-.. note::
-
- This does not need to be used (and cannot be used) with
- :ref:`PEP 561 compliant stub packages `.
diff --git a/docs/source/runtime_troubles.rst b/docs/source/runtime_troubles.rst
index 1bab66194e47..a62652111de6 100644
--- a/docs/source/runtime_troubles.rst
+++ b/docs/source/runtime_troubles.rst
@@ -8,8 +8,8 @@ version of Python considers legal code. This section describes these scenarios
and explains how to get your code running again. Generally speaking, we have
three tools at our disposal:
-* For Python 3.7 through 3.9, use of ``from __future__ import annotations``
- (:pep:`563`), made the default in Python 3.11 and later
+* Use of ``from __future__ import annotations`` (:pep:`563`)
+ (this behaviour may eventually be made the default in a future Python version)
* Use of string literal types or type comments
* Use of ``typing.TYPE_CHECKING``
@@ -18,11 +18,33 @@ problems you may encounter.
.. _string-literal-types:
-String literal types
---------------------
+String literal types and type comments
+--------------------------------------
+
+Mypy allows you to add type annotations using ``# type:`` type comments.
+For example:
+
+.. code-block:: python
+
+ a = 1 # type: int
+
+ def f(x): # type: (int) -> int
+ return x + 1
+
+ # Alternative type comment syntax for functions with many arguments
+ def send_email(
+ address, # type: Union[str, List[str]]
+ sender, # type: str
+ cc, # type: Optional[List[str]]
+ subject='',
+ body=None # type: List[str]
+ ):
+ # type: (...) -> bool
Type comments can't cause runtime errors because comments are not evaluated by
-Python. In a similar way, using string literal types sidesteps the problem of
+Python.
+
+In a similar way, using string literal types sidesteps the problem of
annotations that would cause runtime errors.
Any type can be entered as a string literal, and you can combine
@@ -30,8 +52,8 @@ string-literal types with non-string-literal types freely:
.. code-block:: python
- def f(a: list['A']) -> None: ... # OK
- def g(n: 'int') -> None: ... # OK, though not useful
+ def f(a: list['A']) -> None: ... # OK, prevents NameError since A is defined later
+ def g(n: 'int') -> None: ... # Also OK, though not useful
class A: pass
@@ -47,9 +69,10 @@ Future annotations import (PEP 563)
-----------------------------------
Many of the issues described here are caused by Python trying to evaluate
-annotations. From Python 3.11 on, Python will no longer attempt to evaluate
-function and variable annotations. This behaviour is made available in Python
-3.7 and later through the use of ``from __future__ import annotations``.
+annotations. Future Python versions (potentially Python 3.12) will by default no
+longer attempt to evaluate function and variable annotations. This behaviour is
+made available in Python 3.7 and later through the use of
+``from __future__ import annotations``.
This can be thought of as automatic string literal-ification of all function and
variable annotations. Note that function and variable annotations are still
@@ -74,7 +97,7 @@ required to be valid Python syntax. For more details, see :pep:`563`.
class B: ...
class C: ...
-.. note::
+.. warning::
Some libraries may have use cases for dynamic evaluation of annotations, for
instance, through use of ``typing.get_type_hints`` or ``eval``. If your
@@ -273,8 +296,8 @@ the built-in collections or those from :py:mod:`collections.abc`:
y: dict[int, str]
z: Sequence[str] = x
-There is limited support for using this syntax in Python 3.7 and later as well.
-If you use ``from __future__ import annotations``, mypy will understand this
+There is limited support for using this syntax in Python 3.7 and later as well:
+if you use ``from __future__ import annotations``, mypy will understand this
syntax in annotations. However, since this will not be supported by the Python
interpreter at runtime, make sure you're aware of the caveats mentioned in the
notes at :ref:`future annotations import`.
@@ -285,8 +308,8 @@ Using X | Y syntax for Unions
Starting with Python 3.10 (:pep:`604`), you can spell union types as ``x: int |
str``, instead of ``x: typing.Union[int, str]``.
-There is limited support for using this syntax in Python 3.7 and later as well.
-If you use ``from __future__ import annotations``, mypy will understand this
+There is limited support for using this syntax in Python 3.7 and later as well:
+if you use ``from __future__ import annotations``, mypy will understand this
syntax in annotations, string literal types, type comments and stub files.
However, since this will not be supported by the Python interpreter at runtime
(if evaluated, ``int | str`` will raise ``TypeError: unsupported operand type(s)
diff --git a/docs/source/stubgen.rst b/docs/source/stubgen.rst
index 33fdac2089f7..f06c9c066bb7 100644
--- a/docs/source/stubgen.rst
+++ b/docs/source/stubgen.rst
@@ -147,10 +147,6 @@ Additional flags
Show help message and exit.
-.. option:: --py2
-
- Run stubgen in Python 2 mode (the default is Python 3 mode).
-
.. option:: --ignore-errors
If an exception was raised during stub generation, continue to process any
@@ -172,13 +168,6 @@ Additional flags
Specify module search directories, separated by colons (only used if
:option:`--no-import` is given).
-.. option:: --python-executable PATH
-
- Use Python interpreter at ``PATH`` for importing modules and runtime
- introspection. This has no effect with :option:`--no-import`, and this only works
- in Python 2 mode. In Python 3 mode the Python interpreter used to run stubgen
- will always be used.
-
.. option:: -o PATH, --output PATH
Change the output directory. By default the stubs are written in the
diff --git a/docs/source/stubs.rst b/docs/source/stubs.rst
index 38eded7ce57d..7c84a9718b3e 100644
--- a/docs/source/stubs.rst
+++ b/docs/source/stubs.rst
@@ -3,12 +3,15 @@
Stub files
==========
+A *stub file* is a file containing a skeleton of the public interface
+of that Python module, including classes, variables, functions -- and
+most importantly, their types.
+
Mypy uses stub files stored in the
`typeshed `_ repository to determine
the types of standard library and third-party library functions, classes,
and other definitions. You can also create your own stubs that will be
-used to type check your code. The basic properties of stubs were introduced
-back in :ref:`stubs-intro`.
+used to type check your code.
Creating a stub
***************
@@ -62,7 +65,7 @@ in your programs and stub files.
Stub file syntax
****************
-Stub files are written in normal Python 3 syntax, but generally
+Stub files are written in normal Python syntax, but generally
leaving out runtime logic like variable initializers, function bodies,
and default arguments.
@@ -90,12 +93,6 @@ stub file as three dots:
:ref:`callable types ` and :ref:`tuple types
`.
-.. note::
-
- It is always legal to use Python 3 syntax in stub files, even when
- writing Python 2 code. The example above is a valid stub file
- for both Python 2 and 3.
-
Using stub file syntax at runtime
*********************************
@@ -133,10 +130,3 @@ For example:
# type "ellipsis", argument has type "list[str]")
def not_ok(self, foo: list[str] = ...) -> None:
print(foo)
-
-.. note::
-
- Ellipsis expressions are legal syntax in Python 3 only. This means
- it is not possible to elide default arguments in Python 2 code.
- You can still elide function bodies in Python 2 by using either
- the ``pass`` statement or by throwing a :py:exc:`NotImplementedError`.
diff --git a/docs/source/stubtest.rst b/docs/source/stubtest.rst
index 828931fbdf2b..a8279eb6c239 100644
--- a/docs/source/stubtest.rst
+++ b/docs/source/stubtest.rst
@@ -41,6 +41,10 @@ stubs and implementation or to check for stub completeness. It's used to
test Python's official collection of library stubs,
`typeshed `_.
+.. warning::
+
+ stubtest will import and execute Python code from the packages it checks.
+
Example
*******
@@ -72,7 +76,7 @@ Here's a quick example of what stubtest can do:
Stub: at line 1
builtins.int
Runtime:
- hello, stubtest
+ 'hello, stubtest'
Usage
@@ -86,7 +90,14 @@ is installed in the same environment as the library to be tested. In some
cases, setting ``PYTHONPATH`` can help stubtest find the code to import.
Similarly, stubtest must be able to find the stubs to be checked. Stubtest
-respects the ``MYPYPATH`` environment variable.
+respects the ``MYPYPATH`` environment variable -- consider using this if you
+receive a complaint along the lines of "failed to find stubs".
+
+Note that stubtest requires mypy to be able to analyse stubs. If mypy is unable
+to analyse stubs, you may get an error on the lines of "not checking stubs due
+to mypy build errors". In this case, you will need to mitigate those errors
+before stubtest will run. Despite potential overlap in errors here, stubtest is
+not intended as a substitute for running mypy directly.
If you wish to ignore some of stubtest's complaints, stubtest supports a
pretty handy allowlist system.
diff --git a/docs/source/type_inference_and_annotations.rst b/docs/source/type_inference_and_annotations.rst
index 8150f88e579e..6adb4e651224 100644
--- a/docs/source/type_inference_and_annotations.rst
+++ b/docs/source/type_inference_and_annotations.rst
@@ -1,22 +1,35 @@
+.. _type-inference-and-annotations:
+
Type inference and type annotations
===================================
Type inference
**************
-Mypy considers the initial assignment as the definition of a variable.
-If you do not explicitly
-specify the type of the variable, mypy infers the type based on the
-static type of the value expression:
+For most variables, if you do not explicitly specify its type, mypy will
+infer the correct type based on what is initially assigned to the variable.
.. code-block:: python
- i = 1 # Infer type "int" for i
- l = [1, 2] # Infer type "list[int]" for l
+ # Mypy will infer the type of these variables, despite no annotations
+ i = 1
+ reveal_type(i) # Revealed type is "builtins.int"
+ l = [1, 2]
+ reveal_type(l) # Revealed type is "builtins.list[builtins.int]"
+
+
+.. note::
-Type inference is not used in dynamically typed functions (those
-without a function type annotation) — every local variable type defaults
-to ``Any`` in such functions. ``Any`` is discussed later in more detail.
+ Note that mypy will not use type inference in dynamically typed functions
+ (those without a function type annotation) — every local variable type
+ defaults to ``Any`` in such functions. For more details, see :ref:`dynamic-typing`.
+
+ .. code-block:: python
+
+ def untyped_function():
+ i = 1
+ reveal_type(i) # Revealed type is "Any"
+ # 'reveal_type' always outputs 'Any' in unchecked functions
.. _explicit-var-types:
@@ -35,37 +48,33 @@ variable type annotation:
Without the type annotation, the type of ``x`` would be just ``int``. We
use an annotation to give it a more general type ``Union[int, str]`` (this
type means that the value can be either an ``int`` or a ``str``).
-Mypy checks that the type of the initializer is compatible with the
-declared type. The following example is not valid, since the initializer is
-a floating point number, and this is incompatible with the declared
-type:
-.. code-block:: python
+The best way to think about this is that the type annotation sets the type of
+the variable, not the type of the expression. For instance, mypy will complain
+about the following code:
- x: Union[int, str] = 1.1 # Error!
+.. code-block:: python
-The variable annotation syntax is available starting from Python 3.6.
-In earlier Python versions, you can use a special comment after an
-assignment statement to declare the type of a variable:
+ x: Union[int, str] = 1.1 # error: Incompatible types in assignment
+ # (expression has type "float", variable has type "Union[int, str]")
-.. code-block:: python
+.. note::
- x = 1 # type: Union[int, str]
+ To explicitly override the type of an expression you can use
+ :py:func:`cast(\, \) `.
+ See :ref:`casts` for details.
-We'll use both syntax variants in examples. The syntax variants are
-mostly interchangeable, but the variable annotation syntax allows
-defining the type of a variable without initialization, which is not
-possible with the comment syntax:
+Note that you can explicitly declare the type of a variable without
+giving it an initial value:
.. code-block:: python
- x: str # Declare type of 'x' without initialization
+ # We only unpack two values, so there's no right-hand side value
+ # for mypy to infer the type of "cs" from:
+ a, b, *cs = 1, 2 # error: Need type annotation for "cs"
-.. note::
-
- The best way to think about this is that the type annotation sets the
- type of the variable, not the type of the expression. To force the
- type of an expression you can use :py:func:`cast(\, \) `.
+ rs: list[int] # no assignment!
+ p, q, *rs = 1, 2 # OK
Explicit types for collections
******************************
@@ -84,15 +93,9 @@ In these cases you can give the type explicitly using a type annotation:
.. code-block:: python
- l: list[int] = [] # Create empty list with type list[int]
+ l: list[int] = [] # Create empty list of int
d: dict[str, int] = {} # Create empty dictionary (str -> int)
-Similarly, you can also give an explicit type when creating an empty set:
-
-.. code-block:: python
-
- s: set[int] = set()
-
.. note::
Using type arguments (e.g. ``list[int]``) on builtin collections like
@@ -105,13 +108,14 @@ Similarly, you can also give an explicit type when creating an empty set:
Compatibility of container types
********************************
-The following program generates a mypy error, since ``list[int]``
-is not compatible with ``list[object]``:
+A quick note: container types can sometimes be unintuitive. We'll discuss this
+more in :ref:`variance`. For example, the following program generates a mypy error,
+because mypy treats ``list[int]`` as incompatible with ``list[object]``:
.. code-block:: python
def f(l: list[object], k: list[int]) -> None:
- l = k # Type check error: incompatible types in assignment
+ l = k # error: Incompatible types in assignment
The reason why the above assignment is disallowed is that allowing the
assignment could result in non-int values stored in a list of ``int``:
@@ -123,33 +127,32 @@ assignment could result in non-int values stored in a list of ``int``:
l.append('x')
print(k[-1]) # Ouch; a string in list[int]
-Other container types like :py:class:`dict` and :py:class:`set` behave similarly. We
-will discuss how you can work around this in :ref:`variance`.
+Other container types like :py:class:`dict` and :py:class:`set` behave similarly.
-You can still run the above program; it prints ``x``. This illustrates
-the fact that static types are used during type checking, but they do
-not affect the runtime behavior of programs. You can run programs with
-type check failures, which is often very handy when performing a large
-refactoring. Thus you can always 'work around' the type system, and it
+You can still run the above program; it prints ``x``. This illustrates the fact
+that static types do not affect the runtime behavior of programs. You can run
+programs with type check failures, which is often very handy when performing a
+large refactoring. Thus you can always 'work around' the type system, and it
doesn't really limit what you can do in your program.
Context in type inference
*************************
-Type inference is *bidirectional* and takes context into account. For
-example, the following is valid:
+Type inference is *bidirectional* and takes context into account.
+
+Mypy will take into account the type of the variable on the left-hand side
+of an assignment when inferring the type of the expression on the right-hand
+side. For example, the following will type check:
.. code-block:: python
def f(l: list[object]) -> None:
l = [1, 2] # Infer type list[object] for [1, 2], not list[int]
-In an assignment, the type context is determined by the assignment
-target. In this case this is ``l``, which has the type
-``list[object]``. The value expression ``[1, 2]`` is type checked in
-this context and given the type ``list[object]``. In the previous
-example we introduced a new variable ``l``, and here the type context
-was empty.
+
+The value expression ``[1, 2]`` is type checked with the additional
+context that it is being assigned to a variable of type ``list[object]``.
+This is used to infer the type of the *expression* as ``list[object]``.
Declared argument types are also used for type context. In this program
mypy knows that the empty list ``[]`` should have type ``list[int]`` based
@@ -182,49 +185,7 @@ Working around the issue is easy by adding a type annotation:
a: list[int] = [] # OK
foo(a)
-Declaring multiple variable types at a time
-*******************************************
-
-You can declare more than a single variable at a time, but only with
-a type comment. In order to nicely work with multiple assignment, you
-must give each variable a type separately:
-
-.. code-block:: python
-
- i, found = 0, False # type: int, bool
-
-You can optionally use parentheses around the types, assignment targets
-and assigned expression:
-
-.. code-block:: python
-
- i, found = 0, False # type: (int, bool) # OK
- (i, found) = 0, False # type: int, bool # OK
- i, found = (0, False) # type: int, bool # OK
- (i, found) = (0, False) # type: (int, bool) # OK
-
-Starred expressions
-*******************
-
-In most cases, mypy can infer the type of starred expressions from the
-right-hand side of an assignment, but not always:
-
-.. code-block:: python
-
- a, *bs = 1, 2, 3 # OK
- p, q, *rs = 1, 2 # Error: Type of rs cannot be inferred
-
-On first line, the type of ``bs`` is inferred to be
-``list[int]``. However, on the second line, mypy cannot infer the type
-of ``rs``, because there is no right-hand side value for ``rs`` to
-infer the type from. In cases like these, the starred expression needs
-to be annotated with a starred type:
-
-.. code-block:: python
-
- p, q, *rs = 1, 2 # type: int, int, list[int]
-
-Here, the type of ``rs`` is set to ``list[int]``.
+.. _silencing-type-errors:
Silencing type errors
*********************
@@ -232,22 +193,24 @@ Silencing type errors
You might want to disable type checking on specific lines, or within specific
files in your codebase. To do that, you can use a ``# type: ignore`` comment.
-For example, say that the web framework that you use now takes an integer
-argument to ``run()``, which starts it on localhost on that port. Like so:
+For example, say in its latest update, the web framework you use can now take an
+integer argument to ``run()``, which starts it on localhost on that port.
+Like so:
.. code-block:: python
# Starting app on http://localhost:8000
app.run(8000)
-However, the type stubs that the package uses is not up-to-date, and it still
-expects only ``str`` types for ``run()``. This would give you the following error:
+However, the devs forgot to update their type annotations for
+``run``, so mypy still thinks ``run`` only expects ``str`` types.
+This would give you the following error:
.. code-block:: text
error: Argument 1 to "run" of "A" has incompatible type "int"; expected "str"
-If you cannot directly fix the type stubs yourself, you can temporarily
+If you cannot directly fix the web framework yourself, you can temporarily
disable type checking on that line, by adding a ``# type: ignore``:
.. code-block:: python
@@ -265,11 +228,12 @@ short explanation of the bug. To do that, use this format:
.. code-block:: python
# Starting app on http://localhost:8000
- app.run(8000) # type: ignore # `run()` now accepts an `int`, as a port
+ app.run(8000) # type: ignore # `run()` in v2.0 accepts an `int`, as a port
+Type ignore error codes
+-----------------------
-Mypy displays an error code for each error if you use
-:option:`--show-error-codes `:
+By default, mypy displays an error code for each error:
.. code-block:: text
@@ -280,12 +244,55 @@ It is possible to add a specific error-code in your ignore comment (e.g.
``# type: ignore[attr-defined]``) to clarify what's being silenced. You can
find more information about error codes :ref:`here `.
-Similarly, you can also ignore all mypy checks in a file, by adding a
-``# type: ignore`` at the top of the file:
+Other ways to silence errors
+----------------------------
+
+You can get mypy to silence errors about a specific variable by dynamically
+typing it with ``Any``. See :ref:`dynamic-typing` for more information.
.. code-block:: python
- # type: ignore
+ from typing import Any
+
+ def f(x: Any, y: str) -> None:
+ x = 'hello'
+ x += 1 # OK
+
+You can ignore all mypy errors in a file by adding a
+``# mypy: ignore-errors`` at the top of the file:
+
+.. code-block:: python
+
+ # mypy: ignore-errors
# This is a test file, skipping type checking in it.
import unittest
...
+
+You can also specify per-module configuration options in your :ref:`config-file`.
+For example:
+
+.. code-block:: ini
+
+ # Don't report errors in the 'package_to_fix_later' package
+ [mypy-package_to_fix_later.*]
+ ignore_errors = True
+
+ # Disable specific error codes in the 'tests' package
+ # Also don't require type annotations
+ [mypy-tests.*]
+ disable_error_code = var-annotated, has-type
+ allow_untyped_defs = True
+
+ # Silence import errors from the 'library_missing_types' package
+ [mypy-library_missing_types.*]
+ ignore_missing_imports = True
+
+Finally, adding a ``@typing.no_type_check`` decorator to a class, method or
+function causes mypy to avoid type checking that class, method or function
+and to treat it as not having any type annotations.
+
+.. code-block:: python
+
+ @typing.no_type_check
+ def foo() -> str:
+ return 12345 # No error!
diff --git a/docs/source/type_narrowing.rst b/docs/source/type_narrowing.rst
index 806835ed33a5..72a816679140 100644
--- a/docs/source/type_narrowing.rst
+++ b/docs/source/type_narrowing.rst
@@ -16,7 +16,7 @@ The simplest way to narrow a type is to use one of the supported expressions:
- :py:func:`isinstance` like in ``isinstance(obj, float)`` will narrow ``obj`` to have ``float`` type
- :py:func:`issubclass` like in ``issubclass(cls, MyClass)`` will narrow ``cls`` to be ``Type[MyClass]``
-- :py:func:`type` like in ``type(obj) is int`` will narrow ``obj`` to have ``int`` type
+- :py:class:`type` like in ``type(obj) is int`` will narrow ``obj`` to have ``int`` type
- :py:func:`callable` like in ``callable(obj)`` will narrow object to callable type
Type narrowing is contextual. For example, based on the condition, mypy will narrow an expression only within an ``if`` branch:
diff --git a/docs/source/typed_dict.rst b/docs/source/typed_dict.rst
new file mode 100644
index 000000000000..19a717d7feb7
--- /dev/null
+++ b/docs/source/typed_dict.rst
@@ -0,0 +1,250 @@
+.. _typeddict:
+
+TypedDict
+*********
+
+Python programs often use dictionaries with string keys to represent objects.
+``TypedDict`` lets you give precise types for dictionaries that represent
+objects with a fixed schema, such as ``{'id': 1, 'items': ['x']}``.
+
+Here is a typical example:
+
+.. code-block:: python
+
+ movie = {'name': 'Blade Runner', 'year': 1982}
+
+Only a fixed set of string keys is expected (``'name'`` and
+``'year'`` above), and each key has an independent value type (``str``
+for ``'name'`` and ``int`` for ``'year'`` above). We've previously
+seen the ``dict[K, V]`` type, which lets you declare uniform
+dictionary types, where every value has the same type, and arbitrary keys
+are supported. This is clearly not a good fit for
+``movie`` above. Instead, you can use a ``TypedDict`` to give a precise
+type for objects like ``movie``, where the type of each
+dictionary value depends on the key:
+
+.. code-block:: python
+
+ from typing_extensions import TypedDict
+
+ Movie = TypedDict('Movie', {'name': str, 'year': int})
+
+ movie: Movie = {'name': 'Blade Runner', 'year': 1982}
+
+``Movie`` is a ``TypedDict`` type with two items: ``'name'`` (with type ``str``)
+and ``'year'`` (with type ``int``). Note that we used an explicit type
+annotation for the ``movie`` variable. This type annotation is
+important -- without it, mypy will try to infer a regular, uniform
+:py:class:`dict` type for ``movie``, which is not what we want here.
+
+.. note::
+
+ If you pass a ``TypedDict`` object as an argument to a function, no
+ type annotation is usually necessary since mypy can infer the
+ desired type based on the declared argument type. Also, if an
+ assignment target has been previously defined, and it has a
+ ``TypedDict`` type, mypy will treat the assigned value as a ``TypedDict``,
+ not :py:class:`dict`.
+
+Now mypy will recognize these as valid:
+
+.. code-block:: python
+
+ name = movie['name'] # Okay; type of name is str
+ year = movie['year'] # Okay; type of year is int
+
+Mypy will detect an invalid key as an error:
+
+.. code-block:: python
+
+ director = movie['director'] # Error: 'director' is not a valid key
+
+Mypy will also reject a runtime-computed expression as a key, as
+it can't verify that it's a valid key. You can only use string
+literals as ``TypedDict`` keys.
+
+The ``TypedDict`` type object can also act as a constructor. It
+returns a normal :py:class:`dict` object at runtime -- a ``TypedDict`` does
+not define a new runtime type:
+
+.. code-block:: python
+
+ toy_story = Movie(name='Toy Story', year=1995)
+
+This is equivalent to just constructing a dictionary directly using
+``{ ... }`` or ``dict(key=value, ...)``. The constructor form is
+sometimes convenient, since it can be used without a type annotation,
+and it also makes the type of the object explicit.
+
+Like all types, ``TypedDict``\s can be used as components to build
+arbitrarily complex types. For example, you can define nested
+``TypedDict``\s and containers with ``TypedDict`` items.
+Unlike most other types, mypy uses structural compatibility checking
+(or structural subtyping) with ``TypedDict``\s. A ``TypedDict`` object with
+extra items is compatible with (a subtype of) a narrower
+``TypedDict``, assuming item types are compatible (*totality* also affects
+subtyping, as discussed below).
+
+A ``TypedDict`` object is not a subtype of the regular ``dict[...]``
+type (and vice versa), since :py:class:`dict` allows arbitrary keys to be
+added and removed, unlike ``TypedDict``. However, any ``TypedDict`` object is
+a subtype of (that is, compatible with) ``Mapping[str, object]``, since
+:py:class:`~typing.Mapping` only provides read-only access to the dictionary items:
+
+.. code-block:: python
+
+ def print_typed_dict(obj: Mapping[str, object]) -> None:
+ for key, value in obj.items():
+ print(f'{key}: {value}')
+
+ print_typed_dict(Movie(name='Toy Story', year=1995)) # OK
+
+.. note::
+
+ Unless you are on Python 3.8 or newer (where ``TypedDict`` is available in
+ standard library :py:mod:`typing` module) you need to install ``typing_extensions``
+ using pip to use ``TypedDict``:
+
+ .. code-block:: text
+
+ python3 -m pip install --upgrade typing-extensions
+
+Totality
+--------
+
+By default mypy ensures that a ``TypedDict`` object has all the specified
+keys. This will be flagged as an error:
+
+.. code-block:: python
+
+ # Error: 'year' missing
+ toy_story: Movie = {'name': 'Toy Story'}
+
+Sometimes you want to allow keys to be left out when creating a
+``TypedDict`` object. You can provide the ``total=False`` argument to
+``TypedDict(...)`` to achieve this:
+
+.. code-block:: python
+
+ GuiOptions = TypedDict(
+ 'GuiOptions', {'language': str, 'color': str}, total=False)
+ options: GuiOptions = {} # Okay
+ options['language'] = 'en'
+
+You may need to use :py:meth:`~dict.get` to access items of a partial (non-total)
+``TypedDict``, since indexing using ``[]`` could fail at runtime.
+However, mypy still lets use ``[]`` with a partial ``TypedDict`` -- you
+just need to be careful with it, as it could result in a :py:exc:`KeyError`.
+Requiring :py:meth:`~dict.get` everywhere would be too cumbersome. (Note that you
+are free to use :py:meth:`~dict.get` with total ``TypedDict``\s as well.)
+
+Keys that aren't required are shown with a ``?`` in error messages:
+
+.. code-block:: python
+
+ # Revealed type is "TypedDict('GuiOptions', {'language'?: builtins.str,
+ # 'color'?: builtins.str})"
+ reveal_type(options)
+
+Totality also affects structural compatibility. You can't use a partial
+``TypedDict`` when a total one is expected. Also, a total ``TypedDict`` is not
+valid when a partial one is expected.
+
+Supported operations
+--------------------
+
+``TypedDict`` objects support a subset of dictionary operations and methods.
+You must use string literals as keys when calling most of the methods,
+as otherwise mypy won't be able to check that the key is valid. List
+of supported operations:
+
+* Anything included in :py:class:`~typing.Mapping`:
+
+ * ``d[key]``
+ * ``key in d``
+ * ``len(d)``
+ * ``for key in d`` (iteration)
+ * :py:meth:`d.get(key[, default]) `
+ * :py:meth:`d.keys() `
+ * :py:meth:`d.values() `
+ * :py:meth:`d.items() `
+
+* :py:meth:`d.copy() `
+* :py:meth:`d.setdefault(key, default) `
+* :py:meth:`d1.update(d2) `
+* :py:meth:`d.pop(key[, default]) ` (partial ``TypedDict``\s only)
+* ``del d[key]`` (partial ``TypedDict``\s only)
+
+.. note::
+
+ :py:meth:`~dict.clear` and :py:meth:`~dict.popitem` are not supported since they are unsafe
+ -- they could delete required ``TypedDict`` items that are not visible to
+ mypy because of structural subtyping.
+
+Class-based syntax
+------------------
+
+An alternative, class-based syntax to define a ``TypedDict`` is supported
+in Python 3.6 and later:
+
+.. code-block:: python
+
+ from typing_extensions import TypedDict
+
+ class Movie(TypedDict):
+ name: str
+ year: int
+
+The above definition is equivalent to the original ``Movie``
+definition. It doesn't actually define a real class. This syntax also
+supports a form of inheritance -- subclasses can define additional
+items. However, this is primarily a notational shortcut. Since mypy
+uses structural compatibility with ``TypedDict``\s, inheritance is not
+required for compatibility. Here is an example of inheritance:
+
+.. code-block:: python
+
+ class Movie(TypedDict):
+ name: str
+ year: int
+
+ class BookBasedMovie(Movie):
+ based_on: str
+
+Now ``BookBasedMovie`` has keys ``name``, ``year`` and ``based_on``.
+
+Mixing required and non-required items
+--------------------------------------
+
+In addition to allowing reuse across ``TypedDict`` types, inheritance also allows
+you to mix required and non-required (using ``total=False``) items
+in a single ``TypedDict``. Example:
+
+.. code-block:: python
+
+ class MovieBase(TypedDict):
+ name: str
+ year: int
+
+ class Movie(MovieBase, total=False):
+ based_on: str
+
+Now ``Movie`` has required keys ``name`` and ``year``, while ``based_on``
+can be left out when constructing an object. A ``TypedDict`` with a mix of required
+and non-required keys, such as ``Movie`` above, will only be compatible with
+another ``TypedDict`` if all required keys in the other ``TypedDict`` are required keys in the
+first ``TypedDict``, and all non-required keys of the other ``TypedDict`` are also non-required keys
+in the first ``TypedDict``.
+
+Unions of TypedDicts
+--------------------
+
+Since TypedDicts are really just regular dicts at runtime, it is not possible to
+use ``isinstance`` checks to distinguish between different variants of a Union of
+TypedDict in the same way you can with regular objects.
+
+Instead, you can use the :ref:`tagged union pattern `. The referenced
+section of the docs has a full description with an example, but in short, you will
+need to give each TypedDict the same key where each value has a unique
+:ref:`Literal type `. Then, check that key to distinguish
+between your TypedDicts.
diff --git a/misc/actions_stubs.py b/misc/actions_stubs.py
deleted file mode 100644
index 978af7187ffe..000000000000
--- a/misc/actions_stubs.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python3
-import os
-import shutil
-from typing import Tuple, Any
-try:
- import click
-except ImportError:
- print("You need the module \'click\'")
- exit(1)
-
-base_path = os.getcwd()
-
-# I don't know how to set callables with different args
-def apply_all(func: Any, directory: str, extension: str,
- to_extension: str='', exclude: Tuple[str]=('',),
- recursive: bool=True, debug: bool=False) -> None:
- excluded = [x+extension for x in exclude] if exclude else []
- for p, d, files in os.walk(os.path.join(base_path,directory)):
- for f in files:
- if "{}".format(f) in excluded:
- continue
- inner_path = os.path.join(p,f)
- if not inner_path.endswith(extension):
- continue
- if to_extension:
- new_path = "{}{}".format(inner_path[:-len(extension)],to_extension)
- func(inner_path,new_path)
- else:
- func(inner_path)
- if not recursive:
- break
-
-def confirm(resp: bool=False, **kargs) -> bool:
- kargs['rest'] = "to this {f2}/*{e2}".format(**kargs) if kargs.get('f2') else ''
- prompt = "{act} all files {rec}matching this expression {f1}/*{e1} {rest}".format(**kargs)
- prompt.format(**kargs)
- prompt = "{} [{}]|{}: ".format(prompt, 'Y' if resp else 'N', 'n' if resp else 'y')
- while True:
- ans = input(prompt).lower()
- if not ans:
- return resp
- if ans not in ['y','n']:
- print( 'Please, enter (y) or (n).')
- continue
- if ans == 'y':
- return True
- else:
- return False
-
-actions = ['cp', 'mv', 'rm']
-@click.command(context_settings=dict(help_option_names=['-h', '--help']))
-@click.option('--action', '-a', type=click.Choice(actions), required=True, help="What do I have to do :-)")
-@click.option('--dir', '-d', 'directory', default='stubs', help="Directory to start search!")
-@click.option('--ext', '-e', 'extension', default='.py', help="Extension \"from\" will be applied the action. Default .py")
-@click.option('--to', '-t', 'to_extension', default='.pyi', help="Extension \"to\" will be applied the action if can. Default .pyi")
-@click.option('--exclude', '-x', multiple=True, default=('__init__',), help="For every appear, will ignore this files. (can set multiples times)")
-@click.option('--not-recursive', '-n', default=True, is_flag=True, help="Set if don't want to walk recursively.")
-def main(action: str, directory: str, extension: str, to_extension: str,
- exclude: Tuple[str], not_recursive: bool) -> None:
- """
- This script helps to copy/move/remove files based on their extension.
-
- The three actions will ask you for confirmation.
-
- Examples (by default the script search in stubs directory):
-
- - Change extension of all stubs from .py to .pyi:
-
- python -a mv
-
- - Revert the previous action.
-
- python -a mv -e .pyi -t .py
-
- - If you want to ignore "awesome.py" files.
-
- python -a [cp|mv|rm] -x awesome
-
- - If you want to ignore "awesome.py" and "__init__.py" files.
-
- python -a [cp|mv|rm] -x awesome -x __init__
-
- - If you want to remove all ".todo" files in "todo" directory, but not recursively:
-
- python -a rm -e .todo -d todo -r
-
- """
- if action not in actions:
- print("Your action have to be one of this: {}".format(', '.join(actions)))
- return
-
- rec = "[Recursively] " if not_recursive else ''
- if not extension.startswith('.'):
- extension = ".{}".format(extension)
- if not to_extension.startswith('.'):
- to_extension = ".{}".format(to_extension)
- if directory.endswith('/'):
- directory = directory[:-1]
- if action == 'cp':
- if confirm(act='Copy',rec=rec, f1=directory, e1=extension, f2=directory, e2=to_extension):
- apply_all(shutil.copy, directory, extension, to_extension, exclude, not_recursive)
- elif action == 'rm':
- if confirm(act='Remove',rec=rec, f1=directory, e1=extension):
- apply_all(os.remove, directory, extension, exclude=exclude, recursive=not_recursive)
- elif action == 'mv':
- if confirm(act='Move',rec=rec, f1=directory, e1=extension, f2=directory, e2=to_extension):
- apply_all(shutil.move, directory, extension, to_extension, exclude, not_recursive)
-
-
-if __name__ == '__main__':
- main()
diff --git a/misc/analyze_cache.py b/misc/analyze_cache.py
index 334526a93742..8b805d8da0bc 100644
--- a/misc/analyze_cache.py
+++ b/misc/analyze_cache.py
@@ -1,19 +1,28 @@
#!/usr/bin/env python
-from typing import Any, Dict, Iterable, List, Optional
-from collections import Counter
+from __future__ import annotations
+import json
import os
import os.path
-import json
+from collections import Counter
+from typing import Any, Dict, Iterable
+from typing_extensions import Final, TypeAlias as _TypeAlias
-ROOT = ".mypy_cache/3.5"
+ROOT: Final = ".mypy_cache/3.5"
+
+JsonDict: _TypeAlias = Dict[str, Any]
-JsonDict = Dict[str, Any]
class CacheData:
- def __init__(self, filename: str, data_json: JsonDict, meta_json: JsonDict,
- data_size: int, meta_size: int) -> None:
+ def __init__(
+ self,
+ filename: str,
+ data_json: JsonDict,
+ meta_json: JsonDict,
+ data_size: int,
+ meta_size: int,
+ ) -> None:
self.filename = filename
self.data = data_json
self.meta = meta_json
@@ -21,7 +30,7 @@ def __init__(self, filename: str, data_json: JsonDict, meta_json: JsonDict,
self.meta_size = meta_size
@property
- def total_size(self):
+ def total_size(self) -> int:
return self.data_size + self.meta_size
@@ -33,21 +42,23 @@ def extract(chunks: Iterable[JsonDict]) -> Iterable[JsonDict]:
yield from extract(chunk.values())
elif isinstance(chunk, list):
yield from extract(chunk)
+
yield from extract([chunk.data for chunk in chunks])
def load_json(data_path: str, meta_path: str) -> CacheData:
- with open(data_path, 'r') as ds:
+ with open(data_path) as ds:
data_json = json.load(ds)
- with open(meta_path, 'r') as ms:
+ with open(meta_path) as ms:
meta_json = json.load(ms)
data_size = os.path.getsize(data_path)
meta_size = os.path.getsize(meta_path)
- return CacheData(data_path.replace(".data.json", ".*.json"),
- data_json, meta_json, data_size, meta_size)
+ return CacheData(
+ data_path.replace(".data.json", ".*.json"), data_json, meta_json, data_size, meta_size
+ )
def get_files(root: str) -> Iterable[CacheData]:
@@ -56,28 +67,29 @@ def get_files(root: str) -> Iterable[CacheData]:
if filename.endswith(".data.json"):
meta_filename = filename.replace(".data.json", ".meta.json")
yield load_json(
- os.path.join(dirpath, filename),
- os.path.join(dirpath, meta_filename))
+ os.path.join(dirpath, filename), os.path.join(dirpath, meta_filename)
+ )
def pluck(name: str, chunks: Iterable[JsonDict]) -> Iterable[JsonDict]:
- return (chunk for chunk in chunks if chunk['.class'] == name)
+ return (chunk for chunk in chunks if chunk[".class"] == name)
-def report_counter(counter: Counter, amount: Optional[int] = None) -> None:
+def report_counter(counter: Counter[str], amount: int | None = None) -> None:
for name, count in counter.most_common(amount):
- print(' {: <8} {}'.format(count, name))
+ print(f" {count: <8} {name}")
print()
-def report_most_common(chunks: List[JsonDict], amount: Optional[int] = None) -> None:
+def report_most_common(chunks: list[JsonDict], amount: int | None = None) -> None:
report_counter(Counter(str(chunk) for chunk in chunks), amount)
def compress(chunk: JsonDict) -> JsonDict:
- cache = {} # type: Dict[int, JsonDict]
+ cache: dict[int, JsonDict] = {}
counter = 0
- def helper(chunk: Any) -> Any:
+
+ def helper(chunk: JsonDict) -> JsonDict:
nonlocal counter
if not isinstance(chunk, dict):
return chunk
@@ -89,8 +101,8 @@ def helper(chunk: Any) -> Any:
if id in cache:
return cache[id]
else:
- cache[id] = {'.id': counter}
- chunk['.cache_id'] = counter
+ cache[id] = {".id": counter}
+ chunk[".cache_id"] = counter
counter += 1
for name in sorted(chunk.keys()):
@@ -101,21 +113,24 @@ def helper(chunk: Any) -> Any:
chunk[name] = helper(value)
return chunk
+
out = helper(chunk)
return out
+
def decompress(chunk: JsonDict) -> JsonDict:
- cache = {} # type: Dict[int, JsonDict]
- def helper(chunk: Any) -> Any:
+ cache: dict[int, JsonDict] = {}
+
+ def helper(chunk: JsonDict) -> JsonDict:
if not isinstance(chunk, dict):
return chunk
- if '.id' in chunk:
- return cache[chunk['.id']]
+ if ".id" in chunk:
+ return cache[chunk[".id"]]
counter = None
- if '.cache_id' in chunk:
- counter = chunk['.cache_id']
- del chunk['.cache_id']
+ if ".cache_id" in chunk:
+ counter = chunk[".cache_id"]
+ del chunk[".cache_id"]
for name in sorted(chunk.keys()):
value = chunk[name]
@@ -128,9 +143,8 @@ def helper(chunk: Any) -> Any:
cache[counter] = chunk
return chunk
- return helper(chunk)
-
+ return helper(chunk)
def main() -> None:
@@ -138,7 +152,7 @@ def main() -> None:
class_chunks = list(extract_classes(json_chunks))
total_size = sum(chunk.total_size for chunk in json_chunks)
- print("Total cache size: {:.3f} megabytes".format(total_size / (1024 * 1024)))
+ print(f"Total cache size: {total_size / (1024 * 1024):.3f} megabytes")
print()
class_name_counter = Counter(chunk[".class"] for chunk in class_chunks)
@@ -150,24 +164,24 @@ def main() -> None:
build = None
for chunk in json_chunks:
- if 'build.*.json' in chunk.filename:
+ if "build.*.json" in chunk.filename:
build = chunk
break
+ assert build is not None
original = json.dumps(build.data, sort_keys=True)
- print("Size of build.data.json, in kilobytes: {:.3f}".format(len(original) / 1024))
+ print(f"Size of build.data.json, in kilobytes: {len(original) / 1024:.3f}")
build.data = compress(build.data)
compressed = json.dumps(build.data, sort_keys=True)
- print("Size of compressed build.data.json, in kilobytes: {:.3f}".format(len(compressed) / 1024))
+ print(f"Size of compressed build.data.json, in kilobytes: {len(compressed) / 1024:.3f}")
build.data = decompress(build.data)
decompressed = json.dumps(build.data, sort_keys=True)
- print("Size of decompressed build.data.json, in kilobytes: {:.3f}".format(len(decompressed) / 1024))
+ print(f"Size of decompressed build.data.json, in kilobytes: {len(decompressed) / 1024:.3f}")
print("Lossless conversion back", original == decompressed)
-
- '''var_chunks = list(pluck("Var", class_chunks))
+ """var_chunks = list(pluck("Var", class_chunks))
report_most_common(var_chunks, 20)
print()
@@ -182,8 +196,8 @@ def main() -> None:
print()
print("Most common")
report_most_common(class_chunks, 20)
- print()'''
+ print()"""
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/misc/apply-cache-diff.py b/misc/apply-cache-diff.py
index 543ece9981ab..29c55247de92 100644
--- a/misc/apply-cache-diff.py
+++ b/misc/apply-cache-diff.py
@@ -5,6 +5,8 @@
many cases instead of full cache artifacts.
"""
+from __future__ import annotations
+
import argparse
import json
import os
@@ -12,7 +14,7 @@
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-from mypy.metastore import MetadataStore, FilesystemMetadataStore, SqliteMetadataStore
+from mypy.metastore import FilesystemMetadataStore, MetadataStore, SqliteMetadataStore
def make_cache(input_dir: str, sqlite: bool) -> MetadataStore:
@@ -24,7 +26,7 @@ def make_cache(input_dir: str, sqlite: bool) -> MetadataStore:
def apply_diff(cache_dir: str, diff_file: str, sqlite: bool = False) -> None:
cache = make_cache(cache_dir, sqlite)
- with open(diff_file, "r") as f:
+ with open(diff_file) as f:
diff = json.load(f)
old_deps = json.loads(cache.read("@deps.meta.json"))
@@ -34,7 +36,7 @@ def apply_diff(cache_dir: str, diff_file: str, sqlite: bool = False) -> None:
cache.remove(file)
else:
cache.write(file, data)
- if file.endswith('.meta.json') and "@deps" not in file:
+ if file.endswith(".meta.json") and "@deps" not in file:
meta = json.loads(data)
old_deps["snapshot"][meta["id"]] = meta["hash"]
@@ -45,16 +47,13 @@ def apply_diff(cache_dir: str, diff_file: str, sqlite: bool = False) -> None:
def main() -> None:
parser = argparse.ArgumentParser()
- parser.add_argument('--sqlite', action='store_true', default=False,
- help='Use a sqlite cache')
- parser.add_argument('cache_dir',
- help="Directory for the cache")
- parser.add_argument('diff',
- help="Cache diff file")
+ parser.add_argument("--sqlite", action="store_true", default=False, help="Use a sqlite cache")
+ parser.add_argument("cache_dir", help="Directory for the cache")
+ parser.add_argument("diff", help="Cache diff file")
args = parser.parse_args()
apply_diff(args.cache_dir, args.diff, args.sqlite)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/misc/async_matrix.py b/misc/async_matrix.py
index c266d0400aba..d4612dd81799 100644
--- a/misc/async_matrix.py
+++ b/misc/async_matrix.py
@@ -5,97 +5,125 @@
testFullCoroutineMatrix in test-data/unit/check-async-await.test.
"""
+from __future__ import annotations
+
import sys
from types import coroutine
from typing import Any, Awaitable, Generator, Iterator
# The various things you might try to use in `await` or `yield from`.
+
def plain_generator() -> Generator[str, None, int]:
- yield 'a'
+ yield "a"
return 1
+
async def plain_coroutine() -> int:
return 1
+
@coroutine
def decorated_generator() -> Generator[str, None, int]:
- yield 'a'
+ yield "a"
return 1
+
@coroutine
async def decorated_coroutine() -> int:
return 1
+
class It(Iterator[str]):
stop = False
- def __iter__(self) -> 'It':
+
+ def __iter__(self) -> It:
return self
+
def __next__(self) -> str:
if self.stop:
- raise StopIteration('end')
+ raise StopIteration("end")
else:
self.stop = True
- return 'a'
+ return "a"
+
def other_iterator() -> It:
return It()
+
class Aw(Awaitable[int]):
def __await__(self) -> Generator[str, Any, int]:
- yield 'a'
+ yield "a"
return 1
+
def other_coroutine() -> Aw:
return Aw()
+
# The various contexts in which `await` or `yield from` might occur.
+
def plain_host_generator(func) -> Generator[str, None, None]:
- yield 'a'
+ yield "a"
x = 0
f = func()
try:
- x = yield from f
+ x = yield from f # noqa: F841
finally:
try:
f.close()
except AttributeError:
pass
+
async def plain_host_coroutine(func) -> None:
x = 0
- x = await func()
+ x = await func() # noqa: F841
+
@coroutine
def decorated_host_generator(func) -> Generator[str, None, None]:
- yield 'a'
+ yield "a"
x = 0
f = func()
try:
- x = yield from f
+ x = yield from f # noqa: F841
finally:
try:
f.close()
except AttributeError:
pass
+
@coroutine
async def decorated_host_coroutine(func) -> None:
x = 0
- x = await func()
+ x = await func() # noqa: F841
+
# Main driver.
-def main():
- verbose = ('-v' in sys.argv)
- for host in [plain_host_generator, plain_host_coroutine,
- decorated_host_generator, decorated_host_coroutine]:
+
+def main() -> None:
+ verbose = "-v" in sys.argv
+ for host in [
+ plain_host_generator,
+ plain_host_coroutine,
+ decorated_host_generator,
+ decorated_host_coroutine,
+ ]:
print()
print("==== Host:", host.__name__)
- for func in [plain_generator, plain_coroutine,
- decorated_generator, decorated_coroutine,
- other_iterator, other_coroutine]:
+ for func in [
+ plain_generator,
+ plain_coroutine,
+ decorated_generator,
+ decorated_coroutine,
+ other_iterator,
+ other_coroutine,
+ ]:
print(" ---- Func:", func.__name__)
try:
f = host(func)
@@ -114,7 +142,8 @@ def main():
except Exception as e:
print(" error:", repr(e))
+
# Run main().
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/misc/build-debug-python.sh b/misc/build-debug-python.sh
index 2f32a46ce885..f652d6ad9937 100755
--- a/misc/build-debug-python.sh
+++ b/misc/build-debug-python.sh
@@ -1,7 +1,7 @@
#!/bin/bash -eux
# Build a debug build of python, install it, and create a venv for it
-# This is mainly intended for use in our travis builds but it can work
+# This is mainly intended for use in our github actions builds but it can work
# locally. (Though it unfortunately uses brew on OS X to deal with openssl
# nonsense.)
# Usage: build-debug-python.sh
diff --git a/misc/build_wheel.py b/misc/build_wheel.py
index 9d34242ead41..4389c80a14db 100644
--- a/misc/build_wheel.py
+++ b/misc/build_wheel.py
@@ -1,141 +1,12 @@
-"""Script to build compiled binary wheels that can be uploaded to PyPI.
-
-The main GitHub workflow where this script is used:
+"""
+The main GitHub workflow where wheels are built:
https://github.com/mypyc/mypy_mypyc-wheels/blob/master/.github/workflows/build.yml
-This uses cibuildwheel (https://github.com/pypa/cibuildwheel) to build the wheels.
-
-Usage:
-
- build_wheel.py --python-version --output-dir
-
-Wheels for the given Python version will be created in the given directory.
-Python version is in form "39".
-
-This works on macOS, Windows and Linux.
-
-You can test locally by using --extra-opts. macOS example:
+The script that builds wheels:
+https://github.com/mypyc/mypy_mypyc-wheels/blob/master/build_wheel.py
- mypy/misc/build_wheel.py --python-version 39 --output-dir out --extra-opts="--platform macos"
+That script is a light wrapper around cibuildwheel. Now that cibuildwheel has native configuration
+and better support for local builds, we could probably replace the script.
"""
-import argparse
-import os
-import subprocess
-from typing import Dict
-
-# Clang package we use on Linux
-LLVM_URL = 'https://github.com/mypyc/mypy_mypyc-wheels/releases/download/llvm/llvm-centos-5.tar.gz'
-
-# Mypy repository root
-ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
-
-
-def create_environ(python_version: str) -> Dict[str, str]:
- """Set up environment variables for cibuildwheel."""
- env = os.environ.copy()
-
- env['CIBW_BUILD'] = f"cp{python_version}-*"
-
- # Don't build 32-bit wheels
- env['CIBW_SKIP'] = "*-manylinux_i686 *-win32"
-
- # Apple Silicon support
- # When cross-compiling on Intel, it is not possible to test arm64 and
- # the arm64 part of a universal2 wheel. Warnings will be silenced with
- # following CIBW_TEST_SKIP
- env['CIBW_ARCHS_MACOS'] = "x86_64 arm64 universal2"
- env['CIBW_TEST_SKIP'] = "*-macosx_arm64 *_universal2:arm64"
-
- env['CIBW_BUILD_VERBOSITY'] = '1'
-
- # mypy's isolated builds don't specify the requirements mypyc needs, so install
- # requirements and don't use isolated builds. we need to use build-requirements.txt
- # with recent mypy commits to get stub packages needed for compilation.
- env['CIBW_BEFORE_BUILD'] = """
- pip install -r {package}/build-requirements.txt
- """.replace('\n', ' ')
-
- # download a copy of clang to use to compile on linux. this was probably built in 2018,
- # speeds up compilation 2x
- env['CIBW_BEFORE_BUILD_LINUX'] = """
- (cd / && curl -L %s | tar xzf -) &&
- pip install -r {package}/build-requirements.txt
- """.replace('\n', ' ') % LLVM_URL
-
- # the double negative is counterintuitive, https://github.com/pypa/pip/issues/5735
- env['CIBW_ENVIRONMENT'] = 'MYPY_USE_MYPYC=1 MYPYC_OPT_LEVEL=3 PIP_NO_BUILD_ISOLATION=no'
- env['CIBW_ENVIRONMENT_LINUX'] = (
- 'MYPY_USE_MYPYC=1 MYPYC_OPT_LEVEL=3 PIP_NO_BUILD_ISOLATION=no ' +
- 'CC=/opt/llvm/bin/clang'
- )
- env['CIBW_ENVIRONMENT_WINDOWS'] = (
- 'MYPY_USE_MYPYC=1 MYPYC_OPT_LEVEL=2 PIP_NO_BUILD_ISOLATION=no'
- )
-
- # lxml doesn't have a wheel for Python 3.10 on the manylinux image we use.
- # lxml has historically been slow to support new Pythons as well.
- env['CIBW_BEFORE_TEST'] = """
- (
- grep -v lxml {project}/mypy/test-requirements.txt > /tmp/test-requirements.txt
- && cp {project}/mypy/mypy-requirements.txt /tmp/mypy-requirements.txt
- && cp {project}/mypy/build-requirements.txt /tmp/build-requirements.txt
- && pip install -r /tmp/test-requirements.txt
- )
- """.replace('\n', ' ')
- # lxml currently has wheels on Windows and doesn't have grep, so special case
- env['CIBW_BEFORE_TEST_WINDOWS'] = "pip install -r {project}/mypy/test-requirements.txt"
-
- # pytest looks for configuration files in the parent directories of where the tests live.
- # since we are trying to run the tests from their installed location, we copy those into
- # the venv. Ew ew ew.
- # We don't run tests that need lxml since we don't install lxml
- # We don't run external mypyc tests since there's some issue with compilation on the
- # manylinux image we use.
- env['CIBW_TEST_COMMAND'] = """
- (
- DIR=$(python -c 'import mypy, os; dn = os.path.dirname; print(dn(dn(mypy.__path__[0])))')
- && cp '{project}/mypy/pytest.ini' '{project}/mypy/conftest.py' $DIR
-
- && MYPY_TEST_DIR=$(python -c 'import mypy.test; print(mypy.test.__path__[0])')
- && MYPY_TEST_PREFIX='{project}/mypy' pytest $MYPY_TEST_DIR -k 'not (reports.test or testreports)'
-
- && MYPYC_TEST_DIR=$(python -c 'import mypyc.test; print(mypyc.test.__path__[0])')
- && MYPY_TEST_PREFIX='{project}/mypy' pytest $MYPYC_TEST_DIR -k 'not test_external'
- )
- """.replace('\n', ' ')
-
- # i ran into some flaky tests on windows, so only run testcheck. it looks like we
- # previously didn't run any tests on windows wheels, so this is a net win.
- env['CIBW_TEST_COMMAND_WINDOWS'] = """
- bash -c "
- (
- DIR=$(python -c 'import mypy, os; dn = os.path.dirname; print(dn(dn(mypy.__path__[0])))')
- && TEST_DIR=$(python -c 'import mypy.test; print(mypy.test.__path__[0])')
- && cp '{project}/mypy/pytest.ini' '{project}/mypy/conftest.py' $DIR
- && MYPY_TEST_PREFIX='{project}/mypy' pytest $TEST_DIR/testcheck.py
- )
- "
- """.replace('\n', ' ')
- return env
-
-
-def main() -> None:
- parser = argparse.ArgumentParser()
- parser.add_argument('--python-version', required=True, metavar='XY',
- help='Python version (e.g. 38 or 39)')
- parser.add_argument('--output-dir', required=True, metavar='DIR',
- help='Output directory for created wheels')
- parser.add_argument('--extra-opts', default='', metavar='OPTIONS',
- help='Extra options passed to cibuildwheel verbatim')
- args = parser.parse_args()
- python_version = args.python_version
- output_dir = args.output_dir
- extra_opts = args.extra_opts
- environ = create_environ(python_version)
- script = f'python -m cibuildwheel {extra_opts} --output-dir {output_dir} {ROOT_DIR}'
- subprocess.check_call(script, shell=True, env=environ)
-
-
-if __name__ == '__main__':
- main()
+raise ImportError("This script has been moved back to https://github.com/mypyc/mypy_mypyc-wheels")
diff --git a/misc/cherry-pick-typeshed.py b/misc/cherry-pick-typeshed.py
index 627c8990a155..af08009c2a8f 100644
--- a/misc/cherry-pick-typeshed.py
+++ b/misc/cherry-pick-typeshed.py
@@ -5,6 +5,8 @@
python3 misc/cherry-pick-typeshed.py --typeshed-dir dir hash
"""
+from __future__ import annotations
+
import argparse
import os.path
import re
@@ -24,9 +26,7 @@ def main() -> None:
parser.add_argument(
"--typeshed-dir", help="location of typeshed", metavar="dir", required=True
)
- parser.add_argument(
- "commit", help="typeshed commit hash to cherry-pick"
- )
+ parser.add_argument("commit", help="typeshed commit hash to cherry-pick")
args = parser.parse_args()
typeshed_dir = args.typeshed_dir
commit = args.commit
@@ -37,24 +37,26 @@ def main() -> None:
sys.exit(f"error: Invalid commit {commit!r}")
if not os.path.exists("mypy") or not os.path.exists("mypyc"):
- sys.exit(f"error: This script must be run at the mypy repository root directory")
+ sys.exit("error: This script must be run at the mypy repository root directory")
with tempfile.TemporaryDirectory() as d:
diff_file = os.path.join(d, "diff")
- out = subprocess.run(["git", "show", commit],
- capture_output=True,
- text=True,
- check=True,
- cwd=typeshed_dir)
+ out = subprocess.run(
+ ["git", "show", commit], capture_output=True, text=True, check=True, cwd=typeshed_dir
+ )
with open(diff_file, "w") as f:
f.write(out.stdout)
- subprocess.run(["git",
- "apply",
- "--index",
- "--directory=mypy/typeshed",
- "--exclude=**/tests/**",
- diff_file],
- check=True)
+ subprocess.run(
+ [
+ "git",
+ "apply",
+ "--index",
+ "--directory=mypy/typeshed",
+ "--exclude=**/tests/**",
+ diff_file,
+ ],
+ check=True,
+ )
title = parse_commit_title(out.stdout)
subprocess.run(["git", "commit", "-m", f"Typeshed cherry-pick: {title}"], check=True)
@@ -63,5 +65,5 @@ def main() -> None:
print(f"Cherry-picked commit {commit} from {typeshed_dir}")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/misc/convert-cache.py b/misc/convert-cache.py
index 412238cfbc02..e5da9c2650d5 100755
--- a/misc/convert-cache.py
+++ b/misc/convert-cache.py
@@ -5,36 +5,48 @@
See mypy/metastore.py for details.
"""
-import sys
+from __future__ import annotations
+
import os
+import sys
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import argparse
-from mypy.metastore import FilesystemMetadataStore, SqliteMetadataStore
+
+from mypy.metastore import FilesystemMetadataStore, MetadataStore, SqliteMetadataStore
def main() -> None:
parser = argparse.ArgumentParser()
- parser.add_argument('--to-sqlite', action='store_true', default=False,
- help='Convert to a sqlite cache (default: convert from)')
- parser.add_argument('--output_dir', action='store', default=None,
- help="Output cache location (default: same as input)")
- parser.add_argument('input_dir',
- help="Input directory for the cache")
+ parser.add_argument(
+ "--to-sqlite",
+ action="store_true",
+ default=False,
+ help="Convert to a sqlite cache (default: convert from)",
+ )
+ parser.add_argument(
+ "--output_dir",
+ action="store",
+ default=None,
+ help="Output cache location (default: same as input)",
+ )
+ parser.add_argument("input_dir", help="Input directory for the cache")
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir or input_dir
if args.to_sqlite:
- input, output = FilesystemMetadataStore(input_dir), SqliteMetadataStore(output_dir)
+ input: MetadataStore = FilesystemMetadataStore(input_dir)
+ output: MetadataStore = SqliteMetadataStore(output_dir)
else:
input, output = SqliteMetadataStore(input_dir), FilesystemMetadataStore(output_dir)
for s in input.list_all():
- if s.endswith('.json'):
+ if s.endswith(".json"):
assert output.write(s, input.read(s), input.getmtime(s)), "Failed to write cache file!"
output.commit()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/misc/diff-cache.py b/misc/diff-cache.py
index 11811cc3ae55..15d3e5a83983 100644
--- a/misc/diff-cache.py
+++ b/misc/diff-cache.py
@@ -5,13 +5,14 @@
many cases instead of full cache artifacts.
"""
+from __future__ import annotations
+
import argparse
import json
import os
import sys
-
from collections import defaultdict
-from typing import Any, Dict, Optional, Set
+from typing import Any
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
@@ -25,7 +26,7 @@ def make_cache(input_dir: str, sqlite: bool) -> MetadataStore:
return FilesystemMetadataStore(input_dir)
-def merge_deps(all: Dict[str, Set[str]], new: Dict[str, Set[str]]) -> None:
+def merge_deps(all: dict[str, set[str]], new: dict[str, set[str]]) -> None:
for k, v in new.items():
all.setdefault(k, set()).update(v)
@@ -59,12 +60,8 @@ def unzip(x: Any) -> Any:
def main() -> None:
parser = argparse.ArgumentParser()
- parser.add_argument(
- "--verbose", action="store_true", default=False, help="Increase verbosity"
- )
- parser.add_argument(
- "--sqlite", action="store_true", default=False, help="Use a sqlite cache"
- )
+ parser.add_argument("--verbose", action="store_true", default=False, help="Increase verbosity")
+ parser.add_argument("--sqlite", action="store_true", default=False, help="Use a sqlite cache")
parser.add_argument("input_dir1", help="Input directory for the cache")
parser.add_argument("input_dir2", help="Input directory for the cache")
parser.add_argument("output", help="Output file")
@@ -73,13 +70,13 @@ def main() -> None:
cache1 = make_cache(args.input_dir1, args.sqlite)
cache2 = make_cache(args.input_dir2, args.sqlite)
- type_misses: Dict[str, int] = defaultdict(int)
- type_hits: Dict[str, int] = defaultdict(int)
+ type_misses: dict[str, int] = defaultdict(int)
+ type_hits: dict[str, int] = defaultdict(int)
- updates: Dict[str, Optional[str]] = {}
+ updates: dict[str, str | None] = {}
- deps1: Dict[str, Set[str]] = {}
- deps2: Dict[str, Set[str]] = {}
+ deps1: dict[str, set[str]] = {}
+ deps2: dict[str, set[str]] = {}
misses = hits = 0
cache1_all = list(cache1.list_all())
diff --git a/misc/docker/Dockerfile b/misc/docker/Dockerfile
new file mode 100644
index 000000000000..3327f9e38815
--- /dev/null
+++ b/misc/docker/Dockerfile
@@ -0,0 +1,12 @@
+FROM ubuntu:latest
+
+WORKDIR /mypy
+
+RUN apt-get update
+RUN apt-get install -y python3 python3-pip clang
+
+COPY mypy-requirements.txt .
+COPY test-requirements.txt .
+COPY build-requirements.txt .
+
+RUN pip3 install -r test-requirements.txt
diff --git a/misc/docker/README.md b/misc/docker/README.md
new file mode 100644
index 000000000000..839f9761cb03
--- /dev/null
+++ b/misc/docker/README.md
@@ -0,0 +1,101 @@
+Running mypy and mypyc tests in a Docker container
+==================================================
+
+This directory contains scripts for running mypy and mypyc tests in a
+Linux Docker container. This allows running Linux tests on a different
+operating system that supports Docker, or running tests in an
+isolated, predictable environment on a Linux host operating system.
+
+Why use Docker?
+---------------
+
+Mypyc tests can be significantly faster in a Docker container than
+running natively on macOS.
+
+Also, if it's inconvient to install the necessary dependencies on the
+host operating system, or there are issues getting some tests to pass
+on the host operating system, using a container can be an easy
+workaround.
+
+Prerequisites
+-------------
+
+First install Docker. On macOS, both Docker Desktop (proprietary, but
+with a free of charge subscription for some use cases) and Colima (MIT
+license) should work as runtimes.
+
+You may have to explicitly start the runtime first. Colima example
+(replace '8' with the number of CPU cores you have):
+
+```
+$ colima start -c 8
+
+```
+
+How to run tests
+----------------
+
+You need to build the container with all necessary dependencies before
+you can run tests:
+
+```
+$ python3 misc/docker/build.py
+```
+
+This creates a `mypy-test` Docker container that you can use to run
+tests.
+
+You may need to run the script as root:
+
+```
+$ sudo python3 misc/docker/build.py
+```
+
+If you have a stale container which isn't up-to-date, use `--no-cache`
+`--pull` to force rebuilding everything:
+
+```
+$ python3 misc/docker/build.py --no-cache --pull
+```
+
+Now you can run tests by using the `misc/docker/run.sh` script. Give
+it the pytest command line you want to run as arguments. For example,
+you can run mypyc tests like this:
+
+```
+$ misc/docker/run.sh pytest mypyc
+```
+
+You can also use `-k `, `-n0`, `-q`, etc.
+
+Again, you may need to run `run.sh` as root:
+
+```
+$ sudo misc/docker/run.sh pytest mypyc
+```
+
+You can also use `runtests.py` in the container. Example:
+
+```
+$ misc/docker/run.sh ./runtests.py self lint
+```
+
+Notes
+-----
+
+File system changes within the container are not visible to the host
+system. You can't use the container to format code using Black, for
+example.
+
+On a mac, you may want to give additional CPU to the VM used to run
+the container. The default allocation may be way too low (e.g. 2 CPU
+cores). For example, use the `-c` option when starting the VM if you
+use Colima:
+
+```
+$ colima start -c 8
+```
+
+Giving access to all available CPUs to the Linux VM tends to provide
+the best performance. This is not needed on a Linux host, since the
+container is not run in a VM.
diff --git a/misc/docker/build.py b/misc/docker/build.py
new file mode 100644
index 000000000000..2103be3f110f
--- /dev/null
+++ b/misc/docker/build.py
@@ -0,0 +1,46 @@
+"""Build a "mypy-test" Linux Docker container for running mypy/mypyc tests.
+
+This allows running Linux tests under a non-Linux operating system. Mypyc
+tests can also run much faster under Linux that the host OS.
+
+NOTE: You may need to run this as root (using sudo).
+
+Run with "--no-cache" to force reinstallation of mypy dependencies.
+Run with "--pull" to force update of the Linux (Ubuntu) base image.
+
+After you've built the container, use "run.sh" to run tests. Example:
+
+ misc/docker/run.sh pytest mypyc/
+"""
+
+import argparse
+import os
+import subprocess
+import sys
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser(
+ description="""Build a 'mypy-test' Docker container for running mypy/mypyc tests. You may
+ need to run this as root (using sudo)."""
+ )
+ parser.add_argument("--no-cache", action="store_true", help="Force rebuilding")
+ parser.add_argument("--pull", action="store_true", help="Force pulling fresh Linux base image")
+ args = parser.parse_args()
+
+ dockerdir = os.path.dirname(os.path.abspath(__file__))
+ dockerfile = os.path.join(dockerdir, "Dockerfile")
+ rootdir = os.path.join(dockerdir, "..", "..")
+
+ cmdline = ["docker", "build", "-t", "mypy-test", "-f", dockerfile]
+ if args.no_cache:
+ cmdline.append("--no-cache")
+ if args.pull:
+ cmdline.append("--pull")
+ cmdline.append(rootdir)
+ result = subprocess.run(cmdline)
+ sys.exit(result.returncode)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/misc/docker/run-wrapper.sh b/misc/docker/run-wrapper.sh
new file mode 100755
index 000000000000..77e77d99af34
--- /dev/null
+++ b/misc/docker/run-wrapper.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Internal wrapper script used to run commands in a container
+
+# Copy all the files we need from the mypy repo directory shared with
+# the host to a local directory. Accessing files using a shared
+# directory on a mac can be *very* slow.
+echo "copying files to the container..."
+cp -R /repo/{mypy,mypyc,test-data,misc} .
+cp /repo/{pytest.ini,conftest.py,runtests.py,pyproject.toml,setup.cfg} .
+cp /repo/{mypy_self_check.ini,mypy_bootstrap.ini} .
+
+# Run the wrapped command
+"$@"
diff --git a/misc/docker/run.sh b/misc/docker/run.sh
new file mode 100755
index 000000000000..c8fc0e510e8e
--- /dev/null
+++ b/misc/docker/run.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Run mypy or mypyc tests in a Docker container that was built using misc/docker/build.py.
+#
+# Usage: misc/docker/run.sh ...
+#
+# For example, run mypyc tests like this:
+#
+# misc/docker/run.sh pytest mypyc
+#
+# NOTE: You may need to run this as root (using sudo).
+
+SCRIPT_DIR=$(cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd)
+MYPY_DIR="$SCRIPT_DIR/../.."
+
+docker run -ti --rm -v "$MYPY_DIR:/repo" mypy-test /repo/misc/docker/run-wrapper.sh "$@"
diff --git a/misc/dump-ast.py b/misc/dump-ast.py
index 8ded2389e77d..6f70bbc8c9ed 100755
--- a/misc/dump-ast.py
+++ b/misc/dump-ast.py
@@ -3,22 +3,21 @@
Parse source files and print the abstract syntax trees.
"""
-from typing import Tuple
-import sys
+from __future__ import annotations
+
import argparse
+import sys
+from mypy import defaults
from mypy.errors import CompileError
from mypy.options import Options
-from mypy import defaults
from mypy.parse import parse
-def dump(fname: str,
- python_version: Tuple[int, int],
- quiet: bool = False) -> None:
+def dump(fname: str, python_version: tuple[int, int], quiet: bool = False) -> None:
options = Options()
options.python_version = python_version
- with open(fname, 'rb') as f:
+ with open(fname, "rb") as f:
s = f.read()
tree = parse(s, fname, None, errors=None, options=options)
if not quiet:
@@ -28,28 +27,22 @@ def dump(fname: str,
def main() -> None:
# Parse a file and dump the AST (or display errors).
parser = argparse.ArgumentParser(
- description="Parse source files and print the abstract syntax tree (AST).",
+ description="Parse source files and print the abstract syntax tree (AST)."
)
- parser.add_argument('--py2', action='store_true', help='parse FILEs as Python 2')
- parser.add_argument('--quiet', action='store_true', help='do not print AST')
- parser.add_argument('FILE', nargs='*', help='files to parse')
+ parser.add_argument("--quiet", action="store_true", help="do not print AST")
+ parser.add_argument("FILE", nargs="*", help="files to parse")
args = parser.parse_args()
- if args.py2:
- pyversion = defaults.PYTHON2_VERSION
- else:
- pyversion = defaults.PYTHON3_VERSION
-
status = 0
for fname in args.FILE:
try:
- dump(fname, pyversion, args.quiet)
+ dump(fname, defaults.PYTHON3_VERSION, args.quiet)
except CompileError as e:
for msg in e.messages:
- sys.stderr.write('%s\n' % msg)
+ sys.stderr.write("%s\n" % msg)
status = 1
sys.exit(status)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/scripts/find_type.py b/misc/find_type.py
similarity index 70%
rename from scripts/find_type.py
rename to misc/find_type.py
index f488fca9f0ee..0031c72aea9f 100755
--- a/scripts/find_type.py
+++ b/misc/find_type.py
@@ -17,60 +17,72 @@
# " Convert to 0-based column offsets
# let startcol = startcol - 1
# " Change this line to point to the find_type.py script.
-# execute '!python3 /path/to/mypy/scripts/find_type.py % ' . startline . ' ' . startcol . ' ' . endline . ' ' . endcol . ' ' . mypycmd
+# execute '!python3 /path/to/mypy/misc/find_type.py % ' . startline . ' ' . startcol . ' ' . endline . ' ' . endcol . ' ' . mypycmd
# endfunction
# vnoremap t :call RevealType()
#
# For an Emacs example, see misc/macs.el.
-from typing import List, Tuple, Optional
+from __future__ import annotations
+
+import os.path
+import re
import subprocess
import sys
import tempfile
-import os.path
-import re
-REVEAL_TYPE_START = 'reveal_type('
-REVEAL_TYPE_END = ')'
+REVEAL_TYPE_START = "reveal_type("
+REVEAL_TYPE_END = ")"
+
def update_line(line: str, s: str, pos: int) -> str:
return line[:pos] + s + line[pos:]
-def run_mypy(mypy_and_args: List[str], filename: str, tmp_name: str) -> str:
- proc = subprocess.run(mypy_and_args + ['--shadow-file', filename, tmp_name], stdout=subprocess.PIPE)
- assert(isinstance(proc.stdout, bytes)) # Guaranteed to be true because we called run with universal_newlines=False
+
+def run_mypy(mypy_and_args: list[str], filename: str, tmp_name: str) -> str:
+ proc = subprocess.run(
+ mypy_and_args + ["--shadow-file", filename, tmp_name], stdout=subprocess.PIPE
+ )
+ assert isinstance(
+ proc.stdout, bytes
+ ) # Guaranteed to be true because we called run with universal_newlines=False
return proc.stdout.decode(encoding="utf-8")
-def get_revealed_type(line: str, relevant_file: str, relevant_line: int) -> Optional[str]:
+
+def get_revealed_type(line: str, relevant_file: str, relevant_line: int) -> str | None:
m = re.match(r'(.+?):(\d+): note: Revealed type is "(.*)"$', line)
- if (m and
- int(m.group(2)) == relevant_line and
- os.path.samefile(relevant_file, m.group(1))):
+ if m and int(m.group(2)) == relevant_line and os.path.samefile(relevant_file, m.group(1)):
return m.group(3)
else:
return None
-def process_output(output: str, filename: str, start_line: int) -> Tuple[Optional[str], bool]:
+
+def process_output(output: str, filename: str, start_line: int) -> tuple[str | None, bool]:
error_found = False
for line in output.splitlines():
t = get_revealed_type(line, filename, start_line)
if t:
return t, error_found
- elif 'error:' in line:
+ elif "error:" in line:
error_found = True
return None, True # finding no reveal_type is an error
-def main():
- filename, start_line_str, start_col_str, end_line_str, end_col_str, *mypy_and_args = sys.argv[1:]
+
+def main() -> None:
+ filename, start_line_str, start_col_str, end_line_str, end_col_str, *mypy_and_args = sys.argv[
+ 1:
+ ]
start_line = int(start_line_str)
start_col = int(start_col_str)
end_line = int(end_line_str)
end_col = int(end_col_str)
- with open(filename, 'r') as f:
+ with open(filename) as f:
lines = f.readlines()
- lines[end_line - 1] = update_line(lines[end_line - 1], REVEAL_TYPE_END, end_col) # insert after end_col
+ lines[end_line - 1] = update_line(
+ lines[end_line - 1], REVEAL_TYPE_END, end_col
+ ) # insert after end_col
lines[start_line - 1] = update_line(lines[start_line - 1], REVEAL_TYPE_START, start_col)
- with tempfile.NamedTemporaryFile(mode='w', prefix='mypy') as tmp_f:
+ with tempfile.NamedTemporaryFile(mode="w", prefix="mypy") as tmp_f:
tmp_f.writelines(lines)
tmp_f.flush()
diff --git a/misc/fix_annotate.py b/misc/fix_annotate.py
index 0b552bf51d7a..7fffba8a8507 100644
--- a/misc/fix_annotate.py
+++ b/misc/fix_annotate.py
@@ -27,15 +27,14 @@ def foo(self, bar, baz=12):
Finally, it knows that __init__() is supposed to return None.
"""
-from __future__ import print_function
+from __future__ import annotations
import os
import re
-
from lib2to3.fixer_base import BaseFix
+from lib2to3.fixer_util import syms, token, touch_import
from lib2to3.patcomp import compile_pattern
from lib2to3.pytree import Leaf, Node
-from lib2to3.fixer_util import token, syms, touch_import
class FixAnnotate(BaseFix):
@@ -51,13 +50,13 @@ class FixAnnotate(BaseFix):
funcdef< 'def' name=any parameters< '(' [args=any] ')' > ':' suite=any+ >
"""
- counter = None if not os.getenv('MAXFIXES') else int(os.getenv('MAXFIXES'))
+ counter = None if not os.getenv("MAXFIXES") else int(os.getenv("MAXFIXES"))
def transform(self, node, results):
if FixAnnotate.counter is not None:
if FixAnnotate.counter <= 0:
return
- suite = results['suite']
+ suite = results["suite"]
children = suite[0].children
# NOTE: I've reverse-engineered the structure of the parse tree.
@@ -73,15 +72,15 @@ def transform(self, node, results):
#
# "Compact" functions (e.g. "def foo(x, y): return max(x, y)")
# have a different structure that isn't matched by PATTERN.
-
- ## print('-'*60)
- ## print(node)
- ## for i, ch in enumerate(children):
- ## print(i, repr(ch.prefix), repr(ch))
-
+ #
+ # print('-'*60)
+ # print(node)
+ # for i, ch in enumerate(children):
+ # print(i, repr(ch.prefix), repr(ch))
+ #
# Check if there's already an annotation.
for ch in children:
- if ch.prefix.lstrip().startswith('# type:'):
+ if ch.prefix.lstrip().startswith("# type:"):
return # There's already a # type: comment here; don't change anything.
# Compute the annotation
@@ -90,26 +89,28 @@ def transform(self, node, results):
# Insert '# type: {annot}' comment.
# For reference, see lib2to3/fixes/fix_tuple_params.py in stdlib.
if len(children) >= 2 and children[1].type == token.INDENT:
- children[1].prefix = '%s# type: %s\n%s' % (children[1].value, annot, children[1].prefix)
+ children[1].prefix = "{}# type: {}\n{}".format(
+ children[1].value, annot, children[1].prefix
+ )
children[1].changed()
if FixAnnotate.counter is not None:
FixAnnotate.counter -= 1
# Also add 'from typing import Any' at the top.
- if 'Any' in annot:
- touch_import('typing', 'Any', node)
+ if "Any" in annot:
+ touch_import("typing", "Any", node)
def make_annotation(self, node, results):
- name = results['name']
+ name = results["name"]
assert isinstance(name, Leaf), repr(name)
assert name.type == token.NAME, repr(name)
decorators = self.get_decorators(node)
is_method = self.is_method(node)
- if name.value == '__init__' or not self.has_return_exprs(node):
- restype = 'None'
+ if name.value == "__init__" or not self.has_return_exprs(node):
+ restype = "None"
else:
- restype = 'Any'
- args = results.get('args')
+ restype = "Any"
+ args = results.get("args")
argtypes = []
if isinstance(args, Node):
children = args.children
@@ -119,48 +120,48 @@ def make_annotation(self, node, results):
children = []
# Interpret children according to the following grammar:
# (('*'|'**')? NAME ['=' expr] ','?)*
- stars = inferred_type = ''
+ stars = inferred_type = ""
in_default = False
at_start = True
for child in children:
if isinstance(child, Leaf):
- if child.value in ('*', '**'):
+ if child.value in ("*", "**"):
stars += child.value
elif child.type == token.NAME and not in_default:
- if not is_method or not at_start or 'staticmethod' in decorators:
- inferred_type = 'Any'
+ if not is_method or not at_start or "staticmethod" in decorators:
+ inferred_type = "Any"
else:
# Always skip the first argument if it's named 'self'.
# Always skip the first argument of a class method.
- if child.value == 'self' or 'classmethod' in decorators:
+ if child.value == "self" or "classmethod" in decorators:
pass
else:
- inferred_type = 'Any'
- elif child.value == '=':
+ inferred_type = "Any"
+ elif child.value == "=":
in_default = True
- elif in_default and child.value != ',':
+ elif in_default and child.value != ",":
if child.type == token.NUMBER:
- if re.match(r'\d+[lL]?$', child.value):
- inferred_type = 'int'
+ if re.match(r"\d+[lL]?$", child.value):
+ inferred_type = "int"
else:
- inferred_type = 'float' # TODO: complex?
+ inferred_type = "float" # TODO: complex?
elif child.type == token.STRING:
- if child.value.startswith(('u', 'U')):
- inferred_type = 'unicode'
+ if child.value.startswith(("u", "U")):
+ inferred_type = "unicode"
else:
- inferred_type = 'str'
- elif child.type == token.NAME and child.value in ('True', 'False'):
- inferred_type = 'bool'
- elif child.value == ',':
+ inferred_type = "str"
+ elif child.type == token.NAME and child.value in ("True", "False"):
+ inferred_type = "bool"
+ elif child.value == ",":
if inferred_type:
argtypes.append(stars + inferred_type)
# Reset
- stars = inferred_type = ''
+ stars = inferred_type = ""
in_default = False
at_start = False
if inferred_type:
argtypes.append(stars + inferred_type)
- return '(' + ', '.join(argtypes) + ') -> ' + restype
+ return "(" + ", ".join(argtypes) + ") -> " + restype
# The parse tree has a different shape when there is a single
# decorator vs. when there are multiple decorators.
@@ -181,7 +182,7 @@ def get_decorators(self, node):
results = {}
if not self.decorated.match(node.parent, results):
return []
- decorators = results.get('dd') or [results['d']]
+ decorators = results.get("dd") or [results["d"]]
decs = []
for d in decorators:
for child in d.children:
@@ -212,8 +213,7 @@ def has_return_exprs(self, node):
results = {}
if self.return_expr.match(node, results):
return True
- for child in node.children:
- if child.type not in (syms.funcdef, syms.classdef):
- if self.has_return_exprs(child):
- return True
- return False
+ return any(
+ child.type not in (syms.funcdef, syms.classdef) and self.has_return_exprs(child)
+ for child in node.children
+ )
diff --git a/misc/incremental_checker.py b/misc/incremental_checker.py
index 0c659bee7023..85239b6462b8 100755
--- a/misc/incremental_checker.py
+++ b/misc/incremental_checker.py
@@ -31,9 +31,8 @@
python3 misc/incremental_checker.py commit 2a432b
"""
-from typing import Any, Dict, List, Optional, Tuple
+from __future__ import annotations
-from argparse import ArgumentParser, RawDescriptionHelpFormatter, Namespace
import base64
import json
import os
@@ -44,19 +43,21 @@
import sys
import textwrap
import time
+from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter
+from typing import Any, Dict
+from typing_extensions import Final, TypeAlias as _TypeAlias
+CACHE_PATH: Final = ".incremental_checker_cache.json"
+MYPY_REPO_URL: Final = "https://github.com/python/mypy.git"
+MYPY_TARGET_FILE: Final = "mypy"
+DAEMON_CMD: Final = ["python3", "-m", "mypy.dmypy"]
-CACHE_PATH = ".incremental_checker_cache.json"
-MYPY_REPO_URL = "https://github.com/python/mypy.git"
-MYPY_TARGET_FILE = "mypy"
-DAEMON_CMD = ["python3", "-m", "mypy.dmypy"]
-
-JsonDict = Dict[str, Any]
+JsonDict: _TypeAlias = Dict[str, Any]
def print_offset(text: str, indent_length: int = 4) -> None:
print()
- print(textwrap.indent(text, ' ' * indent_length))
+ print(textwrap.indent(text, " " * indent_length))
print()
@@ -65,23 +66,21 @@ def delete_folder(folder_path: str) -> None:
shutil.rmtree(folder_path)
-def execute(command: List[str], fail_on_error: bool = True) -> Tuple[str, str, int]:
+def execute(command: list[str], fail_on_error: bool = True) -> tuple[str, str, int]:
proc = subprocess.Popen(
- ' '.join(command),
- stderr=subprocess.PIPE,
- stdout=subprocess.PIPE,
- shell=True)
- stdout_bytes, stderr_bytes = proc.communicate() # type: Tuple[bytes, bytes]
- stdout, stderr = stdout_bytes.decode('utf-8'), stderr_bytes.decode('utf-8')
+ " ".join(command), stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True
+ )
+ stdout_bytes, stderr_bytes = proc.communicate()
+ stdout, stderr = stdout_bytes.decode("utf-8"), stderr_bytes.decode("utf-8")
if fail_on_error and proc.returncode != 0:
- print('EXECUTED COMMAND:', repr(command))
- print('RETURN CODE:', proc.returncode)
+ print("EXECUTED COMMAND:", repr(command))
+ print("RETURN CODE:", proc.returncode)
print()
- print('STDOUT:')
+ print("STDOUT:")
print_offset(stdout)
- print('STDERR:')
+ print("STDERR:")
print_offset(stderr)
- raise RuntimeError('Unexpected error from external tool.')
+ raise RuntimeError("Unexpected error from external tool.")
return stdout, stderr, proc.returncode
@@ -92,40 +91,43 @@ def ensure_environment_is_ready(mypy_path: str, temp_repo_path: str, mypy_cache_
def initialize_repo(repo_url: str, temp_repo_path: str, branch: str) -> None:
- print("Cloning repo {0} to {1}".format(repo_url, temp_repo_path))
+ print(f"Cloning repo {repo_url} to {temp_repo_path}")
execute(["git", "clone", repo_url, temp_repo_path])
if branch is not None:
- print("Checking out branch {}".format(branch))
+ print(f"Checking out branch {branch}")
execute(["git", "-C", temp_repo_path, "checkout", branch])
-def get_commits(repo_folder_path: str, commit_range: str) -> List[Tuple[str, str]]:
- raw_data, _stderr, _errcode = execute([
- "git", "-C", repo_folder_path, "log", "--reverse", "--oneline", commit_range])
+def get_commits(repo_folder_path: str, commit_range: str) -> list[tuple[str, str]]:
+ raw_data, _stderr, _errcode = execute(
+ ["git", "-C", repo_folder_path, "log", "--reverse", "--oneline", commit_range]
+ )
output = []
- for line in raw_data.strip().split('\n'):
- commit_id, _, message = line.partition(' ')
+ for line in raw_data.strip().split("\n"):
+ commit_id, _, message = line.partition(" ")
output.append((commit_id, message))
return output
-def get_commits_starting_at(repo_folder_path: str, start_commit: str) -> List[Tuple[str, str]]:
- print("Fetching commits starting at {0}".format(start_commit))
- return get_commits(repo_folder_path, '{0}^..HEAD'.format(start_commit))
+def get_commits_starting_at(repo_folder_path: str, start_commit: str) -> list[tuple[str, str]]:
+ print(f"Fetching commits starting at {start_commit}")
+ return get_commits(repo_folder_path, f"{start_commit}^..HEAD")
-def get_nth_commit(repo_folder_path: str, n: int) -> Tuple[str, str]:
- print("Fetching last {} commits (or all, if there are fewer commits than n)".format(n))
- return get_commits(repo_folder_path, '-{}'.format(n))[0]
+def get_nth_commit(repo_folder_path: str, n: int) -> tuple[str, str]:
+ print(f"Fetching last {n} commits (or all, if there are fewer commits than n)")
+ return get_commits(repo_folder_path, f"-{n}")[0]
-def run_mypy(target_file_path: Optional[str],
- mypy_cache_path: str,
- mypy_script: Optional[str],
- *,
- incremental: bool = False,
- daemon: bool = False,
- verbose: bool = False) -> Tuple[float, str, Dict[str, Any]]:
+def run_mypy(
+ target_file_path: str | None,
+ mypy_cache_path: str,
+ mypy_script: str | None,
+ *,
+ incremental: bool = False,
+ daemon: bool = False,
+ verbose: bool = False,
+) -> tuple[float, str, dict[str, Any]]:
"""Runs mypy against `target_file_path` and returns what mypy prints to stdout as a string.
If `incremental` is set to True, this function will use store and retrieve all caching data
@@ -134,7 +136,7 @@ def run_mypy(target_file_path: Optional[str],
If `daemon` is True, we use daemon mode; the daemon must be started and stopped by the caller.
"""
- stats = {} # type: Dict[str, Any]
+ stats: dict[str, Any] = {}
if daemon:
command = DAEMON_CMD + ["check", "-v"]
else:
@@ -160,24 +162,31 @@ def run_mypy(target_file_path: Optional[str],
return runtime, output, stats
-def filter_daemon_stats(output: str) -> Tuple[str, Dict[str, Any]]:
- stats = {} # type: Dict[str, Any]
+def filter_daemon_stats(output: str) -> tuple[str, dict[str, Any]]:
+ stats: dict[str, Any] = {}
lines = output.splitlines()
output_lines = []
for line in lines:
- m = re.match(r'(\w+)\s+:\s+(.*)', line)
+ m = re.match(r"(\w+)\s+:\s+(.*)", line)
if m:
key, value = m.groups()
stats[key] = value
else:
output_lines.append(line)
if output_lines:
- output_lines.append('\n')
- return '\n'.join(output_lines), stats
+ output_lines.append("\n")
+ return "\n".join(output_lines), stats
def start_daemon(mypy_cache_path: str) -> None:
- cmd = DAEMON_CMD + ["restart", "--log-file", "./@incr-chk-logs", "--", "--cache-dir", mypy_cache_path]
+ cmd = DAEMON_CMD + [
+ "restart",
+ "--log-file",
+ "./@incr-chk-logs",
+ "--",
+ "--cache-dir",
+ mypy_cache_path,
+ ]
execute(cmd)
@@ -187,23 +196,27 @@ def stop_daemon() -> None:
def load_cache(incremental_cache_path: str = CACHE_PATH) -> JsonDict:
if os.path.exists(incremental_cache_path):
- with open(incremental_cache_path, 'r') as stream:
- return json.load(stream)
+ with open(incremental_cache_path) as stream:
+ cache = json.load(stream)
+ assert isinstance(cache, dict)
+ return cache
else:
return {}
def save_cache(cache: JsonDict, incremental_cache_path: str = CACHE_PATH) -> None:
- with open(incremental_cache_path, 'w') as stream:
+ with open(incremental_cache_path, "w") as stream:
json.dump(cache, stream, indent=2)
-def set_expected(commits: List[Tuple[str, str]],
- cache: JsonDict,
- temp_repo_path: str,
- target_file_path: Optional[str],
- mypy_cache_path: str,
- mypy_script: Optional[str]) -> None:
+def set_expected(
+ commits: list[tuple[str, str]],
+ cache: JsonDict,
+ temp_repo_path: str,
+ target_file_path: str | None,
+ mypy_cache_path: str,
+ mypy_script: str | None,
+) -> None:
"""Populates the given `cache` with the expected results for all of the given `commits`.
This function runs mypy on the `target_file_path` inside the `temp_repo_path`, and stores
@@ -213,30 +226,33 @@ def set_expected(commits: List[Tuple[str, str]],
skip evaluating that commit and move on to the next."""
for commit_id, message in commits:
if commit_id in cache:
- print('Skipping commit (already cached): {0}: "{1}"'.format(commit_id, message))
+ print(f'Skipping commit (already cached): {commit_id}: "{message}"')
else:
- print('Caching expected output for commit {0}: "{1}"'.format(commit_id, message))
+ print(f'Caching expected output for commit {commit_id}: "{message}"')
execute(["git", "-C", temp_repo_path, "checkout", commit_id])
- runtime, output, stats = run_mypy(target_file_path, mypy_cache_path, mypy_script,
- incremental=False)
- cache[commit_id] = {'runtime': runtime, 'output': output}
+ runtime, output, stats = run_mypy(
+ target_file_path, mypy_cache_path, mypy_script, incremental=False
+ )
+ cache[commit_id] = {"runtime": runtime, "output": output}
if output == "":
- print(" Clean output ({:.3f} sec)".format(runtime))
+ print(f" Clean output ({runtime:.3f} sec)")
else:
- print(" Output ({:.3f} sec)".format(runtime))
+ print(f" Output ({runtime:.3f} sec)")
print_offset(output, 8)
print()
-def test_incremental(commits: List[Tuple[str, str]],
- cache: JsonDict,
- temp_repo_path: str,
- target_file_path: Optional[str],
- mypy_cache_path: str,
- *,
- mypy_script: Optional[str] = None,
- daemon: bool = False,
- exit_on_error: bool = False) -> None:
+def test_incremental(
+ commits: list[tuple[str, str]],
+ cache: JsonDict,
+ temp_repo_path: str,
+ target_file_path: str | None,
+ mypy_cache_path: str,
+ *,
+ mypy_script: str | None = None,
+ daemon: bool = False,
+ exit_on_error: bool = False,
+) -> None:
"""Runs incremental mode on all `commits` to verify the output matches the expected output.
This function runs mypy on the `target_file_path` inside the `temp_repo_path`. The
@@ -244,38 +260,38 @@ def test_incremental(commits: List[Tuple[str, str]],
"""
print("Note: first commit is evaluated twice to warm up cache")
commits = [commits[0]] + commits
- overall_stats = {} # type: Dict[str, float]
+ overall_stats: dict[str, float] = {}
for commit_id, message in commits:
- print('Now testing commit {0}: "{1}"'.format(commit_id, message))
+ print(f'Now testing commit {commit_id}: "{message}"')
execute(["git", "-C", temp_repo_path, "checkout", commit_id])
- runtime, output, stats = run_mypy(target_file_path, mypy_cache_path, mypy_script,
- incremental=True, daemon=daemon)
+ runtime, output, stats = run_mypy(
+ target_file_path, mypy_cache_path, mypy_script, incremental=True, daemon=daemon
+ )
relevant_stats = combine_stats(overall_stats, stats)
- expected_runtime = cache[commit_id]['runtime'] # type: float
- expected_output = cache[commit_id]['output'] # type: str
+ expected_runtime: float = cache[commit_id]["runtime"]
+ expected_output: str = cache[commit_id]["output"]
if output != expected_output:
print(" Output does not match expected result!")
- print(" Expected output ({:.3f} sec):".format(expected_runtime))
+ print(f" Expected output ({expected_runtime:.3f} sec):")
print_offset(expected_output, 8)
- print(" Actual output: ({:.3f} sec):".format(runtime))
+ print(f" Actual output: ({runtime:.3f} sec):")
print_offset(output, 8)
if exit_on_error:
break
else:
print(" Output matches expected result!")
- print(" Incremental: {:.3f} sec".format(runtime))
- print(" Original: {:.3f} sec".format(expected_runtime))
+ print(f" Incremental: {runtime:.3f} sec")
+ print(f" Original: {expected_runtime:.3f} sec")
if relevant_stats:
- print(" Stats: {}".format(relevant_stats))
+ print(f" Stats: {relevant_stats}")
if overall_stats:
print("Overall stats:", overall_stats)
-def combine_stats(overall_stats: Dict[str, float],
- new_stats: Dict[str, Any]) -> Dict[str, float]:
- INTERESTING_KEYS = ['build_time', 'gc_time']
+def combine_stats(overall_stats: dict[str, float], new_stats: dict[str, Any]) -> dict[str, float]:
+ INTERESTING_KEYS = ["build_time", "gc_time"]
# For now, we only support float keys
- relevant_stats = {} # type: Dict[str, float]
+ relevant_stats: dict[str, float] = {}
for key in INTERESTING_KEYS:
if key in new_stats:
value = float(new_stats[key])
@@ -289,11 +305,18 @@ def cleanup(temp_repo_path: str, mypy_cache_path: str) -> None:
delete_folder(mypy_cache_path)
-def test_repo(target_repo_url: str, temp_repo_path: str,
- target_file_path: Optional[str],
- mypy_path: str, incremental_cache_path: str, mypy_cache_path: str,
- range_type: str, range_start: str, branch: str,
- params: Namespace) -> None:
+def test_repo(
+ target_repo_url: str,
+ temp_repo_path: str,
+ target_file_path: str | None,
+ mypy_path: str,
+ incremental_cache_path: str,
+ mypy_cache_path: str,
+ range_type: str,
+ range_start: str,
+ branch: str,
+ params: Namespace,
+) -> None:
"""Tests incremental mode against the repo specified in `target_repo_url`.
This algorithm runs in five main stages:
@@ -324,70 +347,111 @@ def test_repo(target_repo_url: str, temp_repo_path: str,
elif range_type == "commit":
start_commit = range_start
else:
- raise RuntimeError("Invalid option: {}".format(range_type))
+ raise RuntimeError(f"Invalid option: {range_type}")
commits = get_commits_starting_at(temp_repo_path, start_commit)
if params.limit:
- commits = commits[:params.limit]
+ commits = commits[: params.limit]
if params.sample:
- seed = params.seed or base64.urlsafe_b64encode(os.urandom(15)).decode('ascii')
+ seed = params.seed or base64.urlsafe_b64encode(os.urandom(15)).decode("ascii")
random.seed(seed)
commits = random.sample(commits, params.sample)
print("Sampled down to %d commits using random seed %s" % (len(commits), seed))
# Stage 3: Find and cache expected results for each commit (without incremental mode)
cache = load_cache(incremental_cache_path)
- set_expected(commits, cache, temp_repo_path, target_file_path, mypy_cache_path,
- mypy_script=params.mypy_script)
+ set_expected(
+ commits,
+ cache,
+ temp_repo_path,
+ target_file_path,
+ mypy_cache_path,
+ mypy_script=params.mypy_script,
+ )
save_cache(cache, incremental_cache_path)
# Stage 4: Rewind and re-run mypy (with incremental mode enabled)
if params.daemon:
- print('Starting daemon')
+ print("Starting daemon")
start_daemon(mypy_cache_path)
- test_incremental(commits, cache, temp_repo_path, target_file_path, mypy_cache_path,
- mypy_script=params.mypy_script, daemon=params.daemon,
- exit_on_error=params.exit_on_error)
+ test_incremental(
+ commits,
+ cache,
+ temp_repo_path,
+ target_file_path,
+ mypy_cache_path,
+ mypy_script=params.mypy_script,
+ daemon=params.daemon,
+ exit_on_error=params.exit_on_error,
+ )
# Stage 5: Remove temp files, stop daemon
if not params.keep_temporary_files:
cleanup(temp_repo_path, mypy_cache_path)
if params.daemon:
- print('Stopping daemon')
+ print("Stopping daemon")
stop_daemon()
def main() -> None:
- help_factory = (lambda prog: RawDescriptionHelpFormatter(prog=prog, max_help_position=32)) # type: Any
+ help_factory: Any = lambda prog: RawDescriptionHelpFormatter(prog=prog, max_help_position=32)
parser = ArgumentParser(
- prog='incremental_checker',
- description=__doc__,
- formatter_class=help_factory)
-
- parser.add_argument("range_type", metavar="START_TYPE", choices=["last", "commit"],
- help="must be one of 'last' or 'commit'")
- parser.add_argument("range_start", metavar="COMMIT_ID_OR_NUMBER",
- help="the commit id to start from, or the number of "
- "commits to move back (see above)")
- parser.add_argument("-r", "--repo_url", default=MYPY_REPO_URL, metavar="URL",
- help="the repo to clone and run tests on")
- parser.add_argument("-f", "--file-path", default=MYPY_TARGET_FILE, metavar="FILE",
- help="the name of the file or directory to typecheck")
- parser.add_argument("-x", "--exit-on-error", action='store_true',
- help="Exits as soon as an error occurs")
- parser.add_argument("--keep-temporary-files", action='store_true',
- help="Keep temporary files on exit")
- parser.add_argument("--cache-path", default=CACHE_PATH, metavar="DIR",
- help="sets a custom location to store cache data")
- parser.add_argument("--branch", default=None, metavar="NAME",
- help="check out and test a custom branch"
- "uses the default if not specified")
+ prog="incremental_checker", description=__doc__, formatter_class=help_factory
+ )
+
+ parser.add_argument(
+ "range_type",
+ metavar="START_TYPE",
+ choices=["last", "commit"],
+ help="must be one of 'last' or 'commit'",
+ )
+ parser.add_argument(
+ "range_start",
+ metavar="COMMIT_ID_OR_NUMBER",
+ help="the commit id to start from, or the number of commits to move back (see above)",
+ )
+ parser.add_argument(
+ "-r",
+ "--repo_url",
+ default=MYPY_REPO_URL,
+ metavar="URL",
+ help="the repo to clone and run tests on",
+ )
+ parser.add_argument(
+ "-f",
+ "--file-path",
+ default=MYPY_TARGET_FILE,
+ metavar="FILE",
+ help="the name of the file or directory to typecheck",
+ )
+ parser.add_argument(
+ "-x", "--exit-on-error", action="store_true", help="Exits as soon as an error occurs"
+ )
+ parser.add_argument(
+ "--keep-temporary-files", action="store_true", help="Keep temporary files on exit"
+ )
+ parser.add_argument(
+ "--cache-path",
+ default=CACHE_PATH,
+ metavar="DIR",
+ help="sets a custom location to store cache data",
+ )
+ parser.add_argument(
+ "--branch",
+ default=None,
+ metavar="NAME",
+ help="check out and test a custom branch uses the default if not specified",
+ )
parser.add_argument("--sample", type=int, help="use a random sample of size SAMPLE")
parser.add_argument("--seed", type=str, help="random seed")
- parser.add_argument("--limit", type=int,
- help="maximum number of commits to use (default until end)")
+ parser.add_argument(
+ "--limit", type=int, help="maximum number of commits to use (default until end)"
+ )
parser.add_argument("--mypy-script", type=str, help="alternate mypy script to run")
- parser.add_argument("--daemon", action='store_true',
- help="use mypy daemon instead of incremental (highly experimental)")
+ parser.add_argument(
+ "--daemon",
+ action="store_true",
+ help="use mypy daemon instead of incremental (highly experimental)",
+ )
if len(sys.argv[1:]) == 0:
parser.print_help()
@@ -419,17 +483,25 @@ def main() -> None:
# The path to store the mypy incremental mode cache data
mypy_cache_path = os.path.abspath(os.path.join(mypy_path, "misc", ".mypy_cache"))
- print("Assuming mypy is located at {0}".format(mypy_path))
- print("Temp repo will be cloned at {0}".format(temp_repo_path))
- print("Testing file/dir located at {0}".format(target_file_path))
- print("Using cache data located at {0}".format(incremental_cache_path))
+ print(f"Assuming mypy is located at {mypy_path}")
+ print(f"Temp repo will be cloned at {temp_repo_path}")
+ print(f"Testing file/dir located at {target_file_path}")
+ print(f"Using cache data located at {incremental_cache_path}")
print()
- test_repo(params.repo_url, temp_repo_path, target_file_path,
- mypy_path, incremental_cache_path, mypy_cache_path,
- params.range_type, params.range_start, params.branch,
- params)
-
-
-if __name__ == '__main__':
+ test_repo(
+ params.repo_url,
+ temp_repo_path,
+ target_file_path,
+ mypy_path,
+ incremental_cache_path,
+ mypy_cache_path,
+ params.range_type,
+ params.range_start,
+ params.branch,
+ params,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/misc/macs.el b/misc/macs.el
index 67d80aa575b0..f4cf6702b989 100644
--- a/misc/macs.el
+++ b/misc/macs.el
@@ -11,7 +11,7 @@
(thereline (line-number-at-pos there))
(therecol (save-excursion (goto-char there) (current-column))))
(shell-command
- (format "cd ~/src/mypy; python3 ./scripts/find_type.py %s %s %s %s %s python3 -m mypy -i mypy"
+ (format "cd ~/src/mypy; python3 ./misc/find_type.py %s %s %s %s %s python3 -m mypy -i mypy"
filename hereline herecol thereline therecol)
)
)
diff --git a/misc/perf_checker.py b/misc/perf_checker.py
index e55f8ccd38fe..20c313e61af9 100644
--- a/misc/perf_checker.py
+++ b/misc/perf_checker.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
-from typing import Callable, List, Tuple
+from __future__ import annotations
import os
import shutil
@@ -8,6 +8,7 @@
import subprocess
import textwrap
import time
+from typing import Callable
class Command:
@@ -18,7 +19,7 @@ def __init__(self, setup: Callable[[], None], command: Callable[[], None]) -> No
def print_offset(text: str, indent_length: int = 4) -> None:
print()
- print(textwrap.indent(text, ' ' * indent_length))
+ print(textwrap.indent(text, " " * indent_length))
print()
@@ -27,26 +28,24 @@ def delete_folder(folder_path: str) -> None:
shutil.rmtree(folder_path)
-def execute(command: List[str]) -> None:
+def execute(command: list[str]) -> None:
proc = subprocess.Popen(
- ' '.join(command),
- stderr=subprocess.PIPE,
- stdout=subprocess.PIPE,
- shell=True)
- stdout_bytes, stderr_bytes = proc.communicate() # type: Tuple[bytes, bytes]
- stdout, stderr = stdout_bytes.decode('utf-8'), stderr_bytes.decode('utf-8')
+ " ".join(command), stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True
+ )
+ stdout_bytes, stderr_bytes = proc.communicate()
+ stdout, stderr = stdout_bytes.decode("utf-8"), stderr_bytes.decode("utf-8")
if proc.returncode != 0:
- print('EXECUTED COMMAND:', repr(command))
- print('RETURN CODE:', proc.returncode)
+ print("EXECUTED COMMAND:", repr(command))
+ print("RETURN CODE:", proc.returncode)
print()
- print('STDOUT:')
+ print("STDOUT:")
print_offset(stdout)
- print('STDERR:')
+ print("STDERR:")
print_offset(stderr)
- raise RuntimeError('Unexpected error from external tool.')
+ raise RuntimeError("Unexpected error from external tool.")
-def trial(num_trials: int, command: Command) -> List[float]:
+def trial(num_trials: int, command: Command) -> list[float]:
trials = []
for i in range(num_trials):
command.setup()
@@ -57,11 +56,11 @@ def trial(num_trials: int, command: Command) -> List[float]:
return trials
-def report(name: str, times: List[float]) -> None:
- print("{}:".format(name))
- print(" Times: {}".format(times))
- print(" Mean: {}".format(statistics.mean(times)))
- print(" Stdev: {}".format(statistics.stdev(times)))
+def report(name: str, times: list[float]) -> None:
+ print(f"{name}:")
+ print(f" Times: {times}")
+ print(f" Mean: {statistics.mean(times)}")
+ print(f" Stdev: {statistics.stdev(times)}")
print()
@@ -69,25 +68,28 @@ def main() -> None:
trials = 3
print("Testing baseline")
- baseline = trial(trials, Command(
- lambda: None,
- lambda: execute(["python3", "-m", "mypy", "mypy"])))
+ baseline = trial(
+ trials, Command(lambda: None, lambda: execute(["python3", "-m", "mypy", "mypy"]))
+ )
report("Baseline", baseline)
print("Testing cold cache")
- cold_cache = trial(trials, Command(
- lambda: delete_folder(".mypy_cache"),
- lambda: execute(["python3", "-m", "mypy", "-i", "mypy"])))
+ cold_cache = trial(
+ trials,
+ Command(
+ lambda: delete_folder(".mypy_cache"),
+ lambda: execute(["python3", "-m", "mypy", "-i", "mypy"]),
+ ),
+ )
report("Cold cache", cold_cache)
print("Testing warm cache")
execute(["python3", "-m", "mypy", "-i", "mypy"])
- warm_cache = trial(trials, Command(
- lambda: None,
- lambda: execute(["python3", "-m", "mypy", "-i", "mypy"])))
+ warm_cache = trial(
+ trials, Command(lambda: None, lambda: execute(["python3", "-m", "mypy", "-i", "mypy"]))
+ )
report("Warm cache", warm_cache)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
-
diff --git a/misc/perf_compare.py b/misc/perf_compare.py
new file mode 100644
index 000000000000..be05bb6ddc32
--- /dev/null
+++ b/misc/perf_compare.py
@@ -0,0 +1,146 @@
+"""Compare performance of mypyc-compiled mypy between one or more commits/branches.
+
+Simple usage:
+
+ python misc/perf_compare.py my-branch master ...
+
+What this does:
+
+ * Create a temp clone of the mypy repo for each target commit to measure
+ * Checkout a target commit in each of the clones
+ * Compile mypyc in each of the clones *in parallel*
+ * Create another temp clone of the mypy repo as the code to check
+ * Self check with each of the compiled mypys N times
+ * Report the average runtimes and relative performance
+ * Remove the temp clones
+"""
+
+from __future__ import annotations
+
+import argparse
+import glob
+import os
+import random
+import shutil
+import statistics
+import subprocess
+import sys
+import threading
+import time
+
+
+def heading(s: str) -> None:
+ print()
+ print(f"=== {s} ===")
+ print()
+
+
+def build_mypy(target_dir: str) -> None:
+ env = os.environ.copy()
+ env["CC"] = "clang"
+ env["MYPYC_OPT_LEVEL"] = "2"
+ cmd = [sys.executable, "setup.py", "--use-mypyc", "build_ext", "--inplace"]
+ subprocess.run(cmd, env=env, check=True, cwd=target_dir)
+
+
+def clone(target_dir: str, commit: str | None) -> None:
+ heading(f"Cloning mypy to {target_dir}")
+ repo_dir = os.getcwd()
+ if os.path.isdir(target_dir):
+ print(f"{target_dir} exists: deleting")
+ shutil.rmtree(target_dir)
+ subprocess.run(["git", "clone", repo_dir, target_dir], check=True)
+ if commit:
+ subprocess.run(["git", "checkout", commit], check=True, cwd=target_dir)
+
+
+def run_benchmark(compiled_dir: str, check_dir: str) -> float:
+ cache_dir = os.path.join(compiled_dir, ".mypy_cache")
+ if os.path.isdir(cache_dir):
+ shutil.rmtree(cache_dir)
+ env = os.environ.copy()
+ env["PYTHONPATH"] = os.path.abspath(compiled_dir)
+ abschk = os.path.abspath(check_dir)
+ cmd = [
+ sys.executable,
+ "-m",
+ "mypy",
+ "--config-file",
+ os.path.join(abschk, "mypy_self_check.ini"),
+ ]
+ cmd += glob.glob(os.path.join(abschk, "mypy/*.py"))
+ cmd += glob.glob(os.path.join(abschk, "mypy/*/*.py"))
+ t0 = time.time()
+ # Ignore errors, since some commits being measured may generate additional errors.
+ subprocess.run(cmd, cwd=compiled_dir, env=env)
+ return time.time() - t0
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("commit", nargs="+")
+ args = parser.parse_args()
+ commits = args.commit
+ num_runs = 16
+
+ if not (os.path.isdir(".git") and os.path.isdir("mypyc")):
+ sys.exit("error: Run this the mypy repo root")
+
+ build_threads = []
+ target_dirs = []
+ for i, commit in enumerate(commits):
+ target_dir = f"mypy.{i}.tmpdir"
+ target_dirs.append(target_dir)
+ clone(target_dir, commit)
+ t = threading.Thread(target=lambda: build_mypy(target_dir))
+ t.start()
+ build_threads.append(t)
+
+ self_check_dir = "mypy.self.tmpdir"
+ clone(self_check_dir, commits[0])
+
+ heading("Compiling mypy")
+ print("(This will take a while...)")
+
+ for t in build_threads:
+ t.join()
+
+ print(f"Finished compiling mypy ({len(commits)} builds)")
+
+ heading("Performing measurements")
+
+ results: dict[str, list[float]] = {}
+ for n in range(num_runs):
+ if n == 0:
+ print("Warmup...")
+ else:
+ print(f"Run {n}/{num_runs - 1}...")
+ items = list(enumerate(commits))
+ random.shuffle(items)
+ for i, commit in items:
+ tt = run_benchmark(target_dirs[i], self_check_dir)
+ # Don't record the first warm-up run
+ if n > 0:
+ print(f"{commit}: t={tt:.3f}s")
+ results.setdefault(commit, []).append(tt)
+
+ print()
+ heading("Results")
+ first = -1.0
+ for commit in commits:
+ tt = statistics.mean(results[commit])
+ if first < 0:
+ delta = "0.0%"
+ first = tt
+ else:
+ d = (tt / first) - 1
+ delta = f"{d:+.1%}"
+ print(f"{commit:<25} {tt:.3f}s ({delta})")
+
+ shutil.rmtree(self_check_dir)
+ for target_dir in target_dirs:
+ shutil.rmtree(target_dir)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/misc/proper_plugin.py b/misc/proper_plugin.py
index acd77500cd5d..a8a8e80ef360 100644
--- a/misc/proper_plugin.py
+++ b/misc/proper_plugin.py
@@ -1,13 +1,24 @@
-from mypy.plugin import Plugin, FunctionContext
-from mypy.types import (
- FunctionLike, Type, Instance, CallableType, UnionType, get_proper_type, ProperType,
- get_proper_types, TupleType, NoneTyp, AnyType
-)
+from __future__ import annotations
+
+from typing import Callable
+
+from mypy.checker import TypeChecker
from mypy.nodes import TypeInfo
+from mypy.plugin import FunctionContext, Plugin
from mypy.subtypes import is_proper_subtype
-
-from typing_extensions import Type as typing_Type
-from typing import Optional, Callable
+from mypy.types import (
+ AnyType,
+ CallableType,
+ FunctionLike,
+ Instance,
+ NoneTyp,
+ ProperType,
+ TupleType,
+ Type,
+ UnionType,
+ get_proper_type,
+ get_proper_types,
+)
class ProperTypePlugin(Plugin):
@@ -22,13 +33,13 @@ class ProperTypePlugin(Plugin):
But after introducing a new type TypeAliasType (and removing immediate expansion)
all these became dangerous because typ may be e.g. an alias to union.
"""
- def get_function_hook(self, fullname: str
- ) -> Optional[Callable[[FunctionContext], Type]]:
- if fullname == 'builtins.isinstance':
+
+ def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
+ if fullname == "builtins.isinstance":
return isinstance_proper_hook
- if fullname == 'mypy.types.get_proper_type':
+ if fullname == "mypy.types.get_proper_type":
return proper_type_hook
- if fullname == 'mypy.types.get_proper_types':
+ if fullname == "mypy.types.get_proper_types":
return proper_types_hook
return None
@@ -39,41 +50,53 @@ def isinstance_proper_hook(ctx: FunctionContext) -> Type:
right = get_proper_type(ctx.arg_types[1][0])
for arg in ctx.arg_types[0]:
- if (is_improper_type(arg) or
- isinstance(get_proper_type(arg), AnyType) and is_dangerous_target(right)):
+ if (
+ is_improper_type(arg) or isinstance(get_proper_type(arg), AnyType)
+ ) and is_dangerous_target(right):
if is_special_target(right):
return ctx.default_return_type
- ctx.api.fail('Never apply isinstance() to unexpanded types;'
- ' use mypy.types.get_proper_type() first', ctx.context)
- ctx.api.note('If you pass on the original type' # type: ignore[attr-defined]
- ' after the check, always use its unexpanded version', ctx.context)
+ ctx.api.fail(
+ "Never apply isinstance() to unexpanded types;"
+ " use mypy.types.get_proper_type() first",
+ ctx.context,
+ )
+ ctx.api.note( # type: ignore[attr-defined]
+ "If you pass on the original type"
+ " after the check, always use its unexpanded version",
+ ctx.context,
+ )
return ctx.default_return_type
def is_special_target(right: ProperType) -> bool:
"""Whitelist some special cases for use in isinstance() with improper types."""
if isinstance(right, FunctionLike) and right.is_type_obj():
- if right.type_object().fullname == 'builtins.tuple':
+ if right.type_object().fullname == "builtins.tuple":
# Used with Union[Type, Tuple[Type, ...]].
return True
if right.type_object().fullname in (
- 'mypy.types.Type',
- 'mypy.types.ProperType',
- 'mypy.types.TypeAliasType'
+ "mypy.types.Type",
+ "mypy.types.ProperType",
+ "mypy.types.TypeAliasType",
):
# Special case: things like assert isinstance(typ, ProperType) are always OK.
return True
if right.type_object().fullname in (
- 'mypy.types.UnboundType',
- 'mypy.types.TypeVarType',
- 'mypy.types.ParamSpecType',
- 'mypy.types.RawExpressionType',
- 'mypy.types.EllipsisType',
- 'mypy.types.StarType',
- 'mypy.types.TypeList',
- 'mypy.types.CallableArgument',
- 'mypy.types.PartialType',
- 'mypy.types.ErasedType'
+ "mypy.types.UnboundType",
+ "mypy.types.TypeVarLikeType",
+ "mypy.types.TypeVarType",
+ "mypy.types.UnpackType",
+ "mypy.types.TypeVarTupleType",
+ "mypy.types.ParamSpecType",
+ "mypy.types.RawExpressionType",
+ "mypy.types.EllipsisType",
+ "mypy.types.StarType",
+ "mypy.types.TypeList",
+ "mypy.types.CallableArgument",
+ "mypy.types.PartialType",
+ "mypy.types.ErasedType",
+ "mypy.types.DeletedType",
+ "mypy.types.RequiredType",
):
# Special case: these are not valid targets for a type alias and thus safe.
# TODO: introduce a SyntheticType base to simplify this?
@@ -88,7 +111,7 @@ def is_improper_type(typ: Type) -> bool:
typ = get_proper_type(typ)
if isinstance(typ, Instance):
info = typ.type
- return info.has_base('mypy.types.Type') and not info.has_base('mypy.types.ProperType')
+ return info.has_base("mypy.types.Type") and not info.has_base("mypy.types.ProperType")
if isinstance(typ, UnionType):
return any(is_improper_type(t) for t in typ.items)
return False
@@ -99,7 +122,7 @@ def is_dangerous_target(typ: ProperType) -> bool:
if isinstance(typ, TupleType):
return any(is_dangerous_target(get_proper_type(t)) for t in typ.items)
if isinstance(typ, CallableType) and typ.is_type_obj():
- return typ.type_object().has_base('mypy.types.Type')
+ return typ.type_object().has_base("mypy.types.Type")
return False
@@ -113,7 +136,7 @@ def proper_type_hook(ctx: FunctionContext) -> Type:
# Minimize amount of spurious errors from overload machinery.
# TODO: call the hook on the overload as a whole?
if isinstance(arg_type, (UnionType, Instance)):
- ctx.api.fail('Redundant call to get_proper_type()', ctx.context)
+ ctx.api.fail("Redundant call to get_proper_type()", ctx.context)
return ctx.default_return_type
@@ -124,18 +147,20 @@ def proper_types_hook(ctx: FunctionContext) -> Type:
arg_type = arg_types[0]
proper_type = get_proper_type_instance(ctx)
item_type = UnionType.make_union([NoneTyp(), proper_type])
- ok_type = ctx.api.named_generic_type('typing.Iterable', [item_type])
+ ok_type = ctx.api.named_generic_type("typing.Iterable", [item_type])
if is_proper_subtype(arg_type, ok_type):
- ctx.api.fail('Redundant call to get_proper_types()', ctx.context)
+ ctx.api.fail("Redundant call to get_proper_types()", ctx.context)
return ctx.default_return_type
def get_proper_type_instance(ctx: FunctionContext) -> Instance:
- types = ctx.api.modules['mypy.types'] # type: ignore
- proper_type_info = types.names['ProperType']
+ checker = ctx.api
+ assert isinstance(checker, TypeChecker)
+ types = checker.modules["mypy.types"]
+ proper_type_info = types.names["ProperType"]
assert isinstance(proper_type_info.node, TypeInfo)
return Instance(proper_type_info.node, [])
-def plugin(version: str) -> typing_Type[ProperTypePlugin]:
+def plugin(version: str) -> type[ProperTypePlugin]:
return ProperTypePlugin
diff --git a/misc/sync-typeshed.py b/misc/sync-typeshed.py
index 93cbd951e0f6..8eeb9be7f4f8 100644
--- a/misc/sync-typeshed.py
+++ b/misc/sync-typeshed.py
@@ -7,80 +7,164 @@
By default, sync to the latest typeshed commit.
"""
+from __future__ import annotations
+
import argparse
+import functools
import os
+import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
-from typing import Optional
+from collections.abc import Mapping
+
+import requests
def check_state() -> None:
- if not os.path.isfile('README.md'):
- sys.exit('error: The current working directory must be the mypy repository root')
- out = subprocess.check_output(['git', 'status', '-s', os.path.join('mypy', 'typeshed')])
+ if not os.path.isfile("README.md") and not os.path.isdir("mypy"):
+ sys.exit("error: The current working directory must be the mypy repository root")
+ out = subprocess.check_output(["git", "status", "-s", os.path.join("mypy", "typeshed")])
if out:
# If there are local changes under mypy/typeshed, they would be lost.
sys.exit('error: Output of "git status -s mypy/typeshed" must be empty')
-def update_typeshed(typeshed_dir: str, commit: Optional[str]) -> str:
+def update_typeshed(typeshed_dir: str, commit: str | None) -> str:
"""Update contents of local typeshed copy.
Return the normalized typeshed commit hash.
"""
- assert os.path.isdir(os.path.join(typeshed_dir, 'stdlib'))
- assert os.path.isdir(os.path.join(typeshed_dir, 'stubs'))
+ assert os.path.isdir(os.path.join(typeshed_dir, "stdlib"))
+ assert os.path.isdir(os.path.join(typeshed_dir, "stubs"))
if commit:
- subprocess.run(['git', 'checkout', commit], check=True, cwd=typeshed_dir)
+ subprocess.run(["git", "checkout", commit], check=True, cwd=typeshed_dir)
commit = git_head_commit(typeshed_dir)
- stdlib_dir = os.path.join('mypy', 'typeshed', 'stdlib')
+
+ stdlib_dir = os.path.join("mypy", "typeshed", "stdlib")
# Remove existing stubs.
shutil.rmtree(stdlib_dir)
# Copy new stdlib stubs.
- shutil.copytree(os.path.join(typeshed_dir, 'stdlib'), stdlib_dir)
+ shutil.copytree(os.path.join(typeshed_dir, "stdlib"), stdlib_dir)
# Copy mypy_extensions stubs. We don't want to use a stub package, since it's
# treated specially by mypy and we make assumptions about what's there.
- stubs_dir = os.path.join('mypy', 'typeshed', 'stubs')
+ stubs_dir = os.path.join("mypy", "typeshed", "stubs")
shutil.rmtree(stubs_dir)
os.makedirs(stubs_dir)
- shutil.copytree(os.path.join(typeshed_dir, 'stubs', 'mypy-extensions'),
- os.path.join(stubs_dir, 'mypy-extensions'))
- shutil.copy(os.path.join(typeshed_dir, 'LICENSE'), os.path.join('mypy', 'typeshed'))
+ shutil.copytree(
+ os.path.join(typeshed_dir, "stubs", "mypy-extensions"),
+ os.path.join(stubs_dir, "mypy-extensions"),
+ )
+ shutil.copy(os.path.join(typeshed_dir, "LICENSE"), os.path.join("mypy", "typeshed"))
return commit
def git_head_commit(repo: str) -> str:
- commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=repo).decode('ascii')
+ commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=repo).decode("ascii")
return commit.strip()
+@functools.cache
+def get_github_api_headers() -> Mapping[str, str]:
+ headers = {"Accept": "application/vnd.github.v3+json"}
+ secret = os.environ.get("GITHUB_TOKEN")
+ if secret is not None:
+ headers["Authorization"] = (
+ f"token {secret}" if secret.startswith("ghp") else f"Bearer {secret}"
+ )
+ return headers
+
+
+@functools.cache
+def get_origin_owner() -> str:
+ output = subprocess.check_output(["git", "remote", "get-url", "origin"], text=True).strip()
+ match = re.match(
+ r"(git@github.com:|https://github.com/)(?P[^/]+)/(?P[^/\s]+)", output
+ )
+ assert match is not None, f"Couldn't identify origin's owner: {output!r}"
+ assert (
+ match.group("repo").removesuffix(".git") == "mypy"
+ ), f'Unexpected repo: {match.group("repo")!r}'
+ return match.group("owner")
+
+
+def create_or_update_pull_request(*, title: str, body: str, branch_name: str) -> None:
+ fork_owner = get_origin_owner()
+
+ with requests.post(
+ "https://api.github.com/repos/python/mypy/pulls",
+ json={
+ "title": title,
+ "body": body,
+ "head": f"{fork_owner}:{branch_name}",
+ "base": "master",
+ },
+ headers=get_github_api_headers(),
+ ) as response:
+ resp_json = response.json()
+ if response.status_code == 422 and any(
+ "A pull request already exists" in e.get("message", "")
+ for e in resp_json.get("errors", [])
+ ):
+ # Find the existing PR
+ with requests.get(
+ "https://api.github.com/repos/python/mypy/pulls",
+ params={"state": "open", "head": f"{fork_owner}:{branch_name}", "base": "master"},
+ headers=get_github_api_headers(),
+ ) as response:
+ response.raise_for_status()
+ resp_json = response.json()
+ assert len(resp_json) >= 1
+ pr_number = resp_json[0]["number"]
+ # Update the PR's title and body
+ with requests.patch(
+ f"https://api.github.com/repos/python/mypy/pulls/{pr_number}",
+ json={"title": title, "body": body},
+ headers=get_github_api_headers(),
+ ) as response:
+ response.raise_for_status()
+ return
+ response.raise_for_status()
+
+
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
- "--commit", default=None,
- help="Typeshed commit (default to latest master if using a repository clone)"
+ "--commit",
+ default=None,
+ help="Typeshed commit (default to latest main if using a repository clone)",
)
parser.add_argument(
- "--typeshed-dir", default=None,
- help="Location of typeshed (default to a temporary repository clone)"
+ "--typeshed-dir",
+ default=None,
+ help="Location of typeshed (default to a temporary repository clone)",
+ )
+ parser.add_argument(
+ "--make-pr",
+ action="store_true",
+ help="Whether to make a PR with the changes (default to no)",
)
args = parser.parse_args()
+
check_state()
- print('Update contents of mypy/typeshed from typeshed? [yN] ', end='')
- answer = input()
- if answer.lower() != 'y':
- sys.exit('Aborting')
+
+ if args.make_pr:
+ if os.environ.get("GITHUB_TOKEN") is None:
+ raise ValueError("GITHUB_TOKEN environment variable must be set")
+
+ branch_name = "mypybot/sync-typeshed"
+ subprocess.run(["git", "checkout", "-B", branch_name, "origin/master"], check=True)
if not args.typeshed_dir:
# Clone typeshed repo if no directory given.
with tempfile.TemporaryDirectory() as tempdir:
- print('Cloning typeshed in {}...'.format(tempdir))
- subprocess.run(['git', 'clone', 'https://github.com/python/typeshed.git'],
- check=True, cwd=tempdir)
- repo = os.path.join(tempdir, 'typeshed')
+ print(f"Cloning typeshed in {tempdir}...")
+ subprocess.run(
+ ["git", "clone", "https://github.com/python/typeshed.git"], check=True, cwd=tempdir
+ )
+ repo = os.path.join(tempdir, "typeshed")
commit = update_typeshed(repo, args.commit)
else:
commit = update_typeshed(args.typeshed_dir, args.commit)
@@ -88,16 +172,38 @@ def main() -> None:
assert commit
# Create a commit
- message = textwrap.dedent("""\
+ message = textwrap.dedent(
+ f"""\
Sync typeshed
Source commit:
https://github.com/python/typeshed/commit/{commit}
- """.format(commit=commit))
- subprocess.run(['git', 'add', '--all', os.path.join('mypy', 'typeshed')], check=True)
- subprocess.run(['git', 'commit', '-m', message], check=True)
- print('Created typeshed sync commit.')
+ """
+ )
+ subprocess.run(["git", "add", "--all", os.path.join("mypy", "typeshed")], check=True)
+ subprocess.run(["git", "commit", "-m", message], check=True)
+ print("Created typeshed sync commit.")
+
+ commits_to_cherry_pick = [
+ "780534b13722b7b0422178c049a1cbbf4ea4255b", # LiteralString reverts
+ "5319fa34a8004c1568bb6f032a07b8b14cc95bed", # sum reverts
+ "0062994228fb62975c6cef4d2c80d00c7aa1c545", # ctypes reverts
+ ]
+ for commit in commits_to_cherry_pick:
+ subprocess.run(["git", "cherry-pick", commit], check=True)
+ print(f"Cherry-picked {commit}.")
+
+ if args.make_pr:
+ subprocess.run(["git", "push", "--force", "origin", branch_name], check=True)
+ print("Pushed commit.")
+
+ warning = "Note that you will need to close and re-open the PR in order to trigger CI."
+
+ create_or_update_pull_request(
+ title="Sync typeshed", body=message + "\n" + warning, branch_name=branch_name
+ )
+ print("Created PR.")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/misc/test-stubgenc.sh b/misc/test-stubgenc.sh
index 175c912e6712..7da135f0bf16 100755
--- a/misc/test-stubgenc.sh
+++ b/misc/test-stubgenc.sh
@@ -1,14 +1,19 @@
#!/bin/bash
-# This script is expected to be run from root of the mypy repo
+
+set -e
+set -x
+
+cd "$(dirname $0)/.."
# Install dependencies, demo project and mypy
python -m pip install -r test-requirements.txt
-python -m pip install pybind11-mypy-demo==0.0.1
+python -m pip install ./test-data/pybind11_mypy_demo
python -m pip install .
# Remove expected stubs and generate new inplace
-rm -rf test-data/stubgen/pybind11_mypy_demo
-stubgen -p pybind11_mypy_demo -o test-data/stubgen/
+STUBGEN_OUTPUT_FOLDER=./test-data/pybind11_mypy_demo/stubgen
+rm -rf $STUBGEN_OUTPUT_FOLDER/*
+stubgen -p pybind11_mypy_demo -o $STUBGEN_OUTPUT_FOLDER
# Compare generated stubs to expected ones
-git diff --exit-code test-data/stubgen/pybind11_mypy_demo
+git diff --exit-code $STUBGEN_OUTPUT_FOLDER
diff --git a/misc/test_case_to_actual.py b/misc/test_case_to_actual.py
deleted file mode 100644
index 9a91bb1fa07d..000000000000
--- a/misc/test_case_to_actual.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from typing import Iterator, List
-import sys
-import os
-import os.path
-
-
-class Chunk:
- def __init__(self, header_type: str, args: str) -> None:
- self.header_type = header_type
- self.args = args
- self.lines = [] # type: List[str]
-
-
-def is_header(line: str) -> bool:
- return line.startswith('[') and line.endswith(']')
-
-
-def normalize(lines: Iterator[str]) -> Iterator[str]:
- return (line.rstrip() for line in lines)
-
-
-def produce_chunks(lines: Iterator[str]) -> Iterator[Chunk]:
- current_chunk = None # type: Chunk
- for line in normalize(lines):
- if is_header(line):
- if current_chunk is not None:
- yield current_chunk
- parts = line[1:-1].split(' ', 1)
- args = parts[1] if len(parts) > 1 else ''
- current_chunk = Chunk(parts[0], args)
- else:
- current_chunk.lines.append(line)
- if current_chunk is not None:
- yield current_chunk
-
-
-def write_out(filename: str, lines: List[str]) -> None:
- os.makedirs(os.path.dirname(filename), exist_ok=True)
- with open(filename, 'w') as stream:
- stream.write('\n'.join(lines))
-
-
-def write_tree(root: str, chunks: Iterator[Chunk]) -> None:
- init = next(chunks)
- assert init.header_type == 'case'
-
- root = os.path.join(root, init.args)
- write_out(os.path.join(root, 'main.py'), init.lines)
-
- for chunk in chunks:
- if chunk.header_type == 'file' and chunk.args.endswith('.py'):
- write_out(os.path.join(root, chunk.args), chunk.lines)
-
-
-def help() -> None:
- print("Usage: python misc/test_case_to_actual.py test_file.txt root_path")
-
-
-def main() -> None:
- if len(sys.argv) != 3:
- help()
- return
-
- test_file_path, root_path = sys.argv[1], sys.argv[2]
- with open(test_file_path, 'r') as stream:
- chunks = produce_chunks(iter(stream))
- write_tree(root_path, chunks)
-
-
-if __name__ == '__main__':
- main()
diff --git a/misc/touch_checker.py b/misc/touch_checker.py
deleted file mode 100644
index c44afe492255..000000000000
--- a/misc/touch_checker.py
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/usr/bin/env python3
-
-from typing import Callable, List, Tuple, Optional
-
-import sys
-import glob
-import os
-import shutil
-import statistics
-import subprocess
-import textwrap
-import time
-
-
-def print_offset(text: str, indent_length: int = 4) -> None:
- print()
- print(textwrap.indent(text, ' ' * indent_length))
- print()
-
-
-def delete_folder(folder_path: str) -> None:
- if os.path.exists(folder_path):
- shutil.rmtree(folder_path)
-
-
-def execute(command: List[str]) -> None:
- proc = subprocess.Popen(
- ' '.join(command),
- stderr=subprocess.PIPE,
- stdout=subprocess.PIPE,
- shell=True)
- stdout_bytes, stderr_bytes = proc.communicate() # type: Tuple[bytes, bytes]
- stdout, stderr = stdout_bytes.decode('utf-8'), stderr_bytes.decode('utf-8')
- if proc.returncode != 0:
- print('EXECUTED COMMAND:', repr(command))
- print('RETURN CODE:', proc.returncode)
- print()
- print('STDOUT:')
- print_offset(stdout)
- print('STDERR:')
- print_offset(stderr)
- print()
-
-
-Command = Callable[[], None]
-
-
-def test(setup: Command, command: Command, teardown: Command) -> float:
- setup()
- start = time.time()
- command()
- end = time.time() - start
- teardown()
- return end
-
-
-def make_touch_wrappers(filename: str) -> Tuple[Command, Command]:
- def setup() -> None:
- execute(["touch", filename])
- def teardown() -> None:
- pass
- return setup, teardown
-
-
-def make_change_wrappers(filename: str) -> Tuple[Command, Command]:
- copy = None # type: Optional[str]
-
- def setup() -> None:
- nonlocal copy
- with open(filename, 'r') as stream:
- copy = stream.read()
- with open(filename, 'a') as stream:
- stream.write('\n\nfoo = 3')
-
- def teardown() -> None:
- assert copy is not None
- with open(filename, 'w') as stream:
- stream.write(copy)
-
- # Re-run to reset cache
- execute(["python3", "-m", "mypy", "-i", "mypy"]),
-
- return setup, teardown
-
-def main() -> None:
- if len(sys.argv) != 2 or sys.argv[1] not in {'touch', 'change'}:
- print("First argument should be 'touch' or 'change'")
- return
-
- if sys.argv[1] == 'touch':
- make_wrappers = make_touch_wrappers
- verb = "Touching"
- elif sys.argv[1] == 'change':
- make_wrappers = make_change_wrappers
- verb = "Changing"
- else:
- raise AssertionError()
-
- print("Setting up...")
-
- baseline = test(
- lambda: None,
- lambda: execute(["python3", "-m", "mypy", "mypy"]),
- lambda: None)
- print("Baseline: {}".format(baseline))
-
- cold = test(
- lambda: delete_folder(".mypy_cache"),
- lambda: execute(["python3", "-m", "mypy", "-i", "mypy"]),
- lambda: None)
- print("Cold cache: {}".format(cold))
-
- warm = test(
- lambda: None,
- lambda: execute(["python3", "-m", "mypy", "-i", "mypy"]),
- lambda: None)
- print("Warm cache: {}".format(warm))
-
- print()
-
- deltas = []
- for filename in glob.iglob("mypy/**/*.py", recursive=True):
- print("{} {}".format(verb, filename))
-
- setup, teardown = make_wrappers(filename)
- delta = test(
- setup,
- lambda: execute(["python3", "-m", "mypy", "-i", "mypy"]),
- teardown)
- print(" Time: {}".format(delta))
- deltas.append(delta)
- print()
-
- print("Initial:")
- print(" Baseline: {}".format(baseline))
- print(" Cold cache: {}".format(cold))
- print(" Warm cache: {}".format(warm))
- print()
- print("Aggregate:")
- print(" Times: {}".format(deltas))
- print(" Mean: {}".format(statistics.mean(deltas)))
- print(" Median: {}".format(statistics.median(deltas)))
- print(" Stdev: {}".format(statistics.stdev(deltas)))
- print(" Min: {}".format(min(deltas)))
- print(" Max: {}".format(max(deltas)))
- print(" Total: {}".format(sum(deltas)))
- print()
-
-if __name__ == '__main__':
- main()
-
diff --git a/misc/trigger_wheel_build.sh b/misc/trigger_wheel_build.sh
index b00b08a15c55..c914a6e7cf86 100755
--- a/misc/trigger_wheel_build.sh
+++ b/misc/trigger_wheel_build.sh
@@ -14,7 +14,7 @@ pip install -r mypy-requirements.txt
V=$(python3 -m mypy --version)
V=$(echo "$V" | cut -d" " -f2)
-git clone https://${WHEELS_PUSH_TOKEN}@github.com/mypyc/mypy_mypyc-wheels.git build
+git clone --depth 1 https://${WHEELS_PUSH_TOKEN}@github.com/mypyc/mypy_mypyc-wheels.git build
cd build
echo $COMMIT > mypy_commit
git commit -am "Build wheels for mypy $V"
diff --git a/misc/upload-pypi.py b/misc/upload-pypi.py
index ad244a547ddb..e60ec3cca207 100644
--- a/misc/upload-pypi.py
+++ b/misc/upload-pypi.py
@@ -5,6 +5,8 @@
"""
+from __future__ import annotations
+
import argparse
import contextlib
import json
@@ -16,7 +18,7 @@
import venv
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
-from typing import Any, Dict, Iterator, List
+from typing import Any, Iterator
from urllib.request import urlopen
BASE = "https://api.github.com/repos"
@@ -27,15 +29,32 @@ def is_whl_or_tar(name: str) -> bool:
return name.endswith(".tar.gz") or name.endswith(".whl")
-def get_release_for_tag(tag: str) -> Dict[str, Any]:
+def item_ok_for_pypi(name: str) -> bool:
+ if not is_whl_or_tar(name):
+ return False
+
+ if name.endswith(".tar.gz"):
+ name = name[:-7]
+ if name.endswith(".whl"):
+ name = name[:-4]
+
+ if name.endswith("wasm32"):
+ return False
+
+ return True
+
+
+def get_release_for_tag(tag: str) -> dict[str, Any]:
with urlopen(f"{BASE}/{REPO}/releases/tags/{tag}") as f:
data = json.load(f)
+ assert isinstance(data, dict)
assert data["tag_name"] == tag
return data
-def download_asset(asset: Dict[str, Any], dst: Path) -> Path:
+def download_asset(asset: dict[str, Any], dst: Path) -> Path:
name = asset["name"]
+ assert isinstance(name, str)
download_url = asset["browser_download_url"]
assert is_whl_or_tar(name)
with urlopen(download_url) as src_file:
@@ -44,8 +63,8 @@ def download_asset(asset: Dict[str, Any], dst: Path) -> Path:
return dst / name
-def download_all_release_assets(release: Dict[str, Any], dst: Path) -> None:
- print(f"Downloading assets...")
+def download_all_release_assets(release: dict[str, Any], dst: Path) -> None:
+ print("Downloading assets...")
with ThreadPoolExecutor() as e:
for asset in e.map(lambda asset: download_asset(asset, dst), release["assets"]):
print(f"Downloaded {asset}")
@@ -66,12 +85,12 @@ def check_sdist(dist: Path, version: str) -> None:
hashless_version = match.group(1) if match else version
assert (
- f"'{hashless_version}'" in version_py_contents
+ f'"{hashless_version}"' in version_py_contents
), "Version does not match version.py in sdist"
def spot_check_dist(dist: Path, version: str) -> None:
- items = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]
+ items = [item for item in dist.iterdir() if item_ok_for_pypi(item.name)]
assert len(items) > 10
assert all(version in item.name for item in items)
assert any(item.name.endswith("py3-none-any.whl") for item in items)
@@ -89,8 +108,8 @@ def tmp_twine() -> Iterator[Path]:
def upload_dist(dist: Path, dry_run: bool = True) -> None:
with tmp_twine() as twine:
- files = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]
- cmd: List[Any] = [twine, "upload"]
+ files = [item for item in dist.iterdir() if item_ok_for_pypi(item.name)]
+ cmd: list[Any] = [twine, "upload"]
cmd += files
if dry_run:
print("[dry run] " + " ".join(map(str, cmd)))
diff --git a/misc/variadics.py b/misc/variadics.py
deleted file mode 100644
index 920028853a4f..000000000000
--- a/misc/variadics.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""Example of code generation approach to variadics.
-
-See https://github.com/python/typing/issues/193#issuecomment-236383893
-"""
-
-LIMIT = 5
-BOUND = 'object'
-
-def prelude(limit: int, bound: str) -> None:
- print('from typing import Callable, Iterable, Iterator, Tuple, TypeVar, overload')
- print('Ts = TypeVar(\'Ts\', bound={bound})'.format(bound=bound))
- print('R = TypeVar(\'R\')')
- for i in range(LIMIT):
- print('T{i} = TypeVar(\'T{i}\', bound={bound})'.format(i=i+1, bound=bound))
-
-def expand_template(template: str,
- arg_template: str = 'arg{i}: {Ts}',
- lower: int = 0,
- limit: int = LIMIT) -> None:
- print()
- for i in range(lower, limit):
- tvs = ', '.join('T{i}'.format(i=j+1) for j in range(i))
- args = ', '.join(arg_template.format(i=j+1, Ts='T{}'.format(j+1))
- for j in range(i))
- print('@overload')
- s = template.format(Ts=tvs, argsTs=args)
- s = s.replace('Tuple[]', 'Tuple[()]')
- print(s)
- args_l = [arg_template.format(i=j+1, Ts='Ts') for j in range(limit)]
- args_l.append('*' + (arg_template.format(i='s', Ts='Ts')))
- args = ', '.join(args_l)
- s = template.format(Ts='Ts, ...', argsTs=args)
- s = s.replace('Callable[[Ts, ...]', 'Callable[...')
- print('@overload')
- print(s)
-
-def main():
- prelude(LIMIT, BOUND)
-
- # map()
- expand_template('def map(func: Callable[[{Ts}], R], {argsTs}) -> R: ...',
- lower=1)
- # zip()
- expand_template('def zip({argsTs}) -> Tuple[{Ts}]: ...')
-
- # Naomi's examples
- expand_template('def my_zip({argsTs}) -> Iterator[Tuple[{Ts}]]: ...',
- 'arg{i}: Iterable[{Ts}]')
- expand_template('def make_check({argsTs}) -> Callable[[{Ts}], bool]: ...')
- expand_template('def my_map(f: Callable[[{Ts}], R], {argsTs}) -> Iterator[R]: ...',
- 'arg{i}: Iterable[{Ts}]')
-
-
-main()
diff --git a/mypy-requirements.txt b/mypy-requirements.txt
index 1c372294383d..ee5fe5d295b8 100644
--- a/mypy-requirements.txt
+++ b/mypy-requirements.txt
@@ -1,3 +1,4 @@
+# NOTE: this needs to be kept in sync with the "requires" list in pyproject.toml
typing_extensions>=3.10
mypy_extensions>=0.4.3
typed_ast>=1.4.0,<2; python_version<'3.8'
diff --git a/mypy/__main__.py b/mypy/__main__.py
index aebeb4baedf8..049553cd1b44 100644
--- a/mypy/__main__.py
+++ b/mypy/__main__.py
@@ -1,4 +1,7 @@
"""Mypy type checker command line tool."""
+
+from __future__ import annotations
+
import os
import sys
import traceback
@@ -9,7 +12,7 @@
def console_entry() -> None:
try:
- main(None, sys.stdout, sys.stderr)
+ main()
sys.stdout.flush()
sys.stderr.flush()
except BrokenPipeError:
@@ -30,5 +33,5 @@ def console_entry() -> None:
sys.exit(2)
-if __name__ == '__main__':
+if __name__ == "__main__":
console_entry()
diff --git a/mypy/api.py b/mypy/api.py
index 28e8d835c7f8..589bfbbfa1a7 100644
--- a/mypy/api.py
+++ b/mypy/api.py
@@ -43,13 +43,14 @@
"""
-import sys
+from __future__ import annotations
+import sys
from io import StringIO
-from typing import List, Tuple, TextIO, Callable
+from typing import Callable, TextIO, cast
-def _run(main_wrapper: Callable[[TextIO, TextIO], None]) -> Tuple[str, str, int]:
+def _run(main_wrapper: Callable[[TextIO, TextIO], None]) -> tuple[str, str, int]:
stdout = StringIO()
stderr = StringIO()
@@ -58,19 +59,21 @@ def _run(main_wrapper: Callable[[TextIO, TextIO], None]) -> Tuple[str, str, int]
main_wrapper(stdout, stderr)
exit_status = 0
except SystemExit as system_exit:
- exit_status = system_exit.code
+ exit_status = cast(int, system_exit.code)
return stdout.getvalue(), stderr.getvalue(), exit_status
-def run(args: List[str]) -> Tuple[str, str, int]:
+def run(args: list[str]) -> tuple[str, str, int]:
# Lazy import to avoid needing to import all of mypy to call run_dmypy
from mypy.main import main
- return _run(lambda stdout, stderr: main(None, args=args,
- stdout=stdout, stderr=stderr, clean_exit=True))
+
+ return _run(
+ lambda stdout, stderr: main(args=args, stdout=stdout, stderr=stderr, clean_exit=True)
+ )
-def run_dmypy(args: List[str]) -> Tuple[str, str, int]:
+def run_dmypy(args: list[str]) -> tuple[str, str, int]:
from mypy.dmypy.client import main
# A bunch of effort has been put into threading stdout and stderr
diff --git a/mypy/applytype.py b/mypy/applytype.py
index a967d834f1a2..a81ed3cd1f16 100644
--- a/mypy/applytype.py
+++ b/mypy/applytype.py
@@ -1,35 +1,50 @@
-from typing import Dict, Sequence, Optional, Callable
+from __future__ import annotations
+
+from typing import Callable, Sequence
import mypy.subtypes
-import mypy.sametypes
-from mypy.expandtype import expand_type
+from mypy.expandtype import expand_type, expand_unpack_with_variables
+from mypy.nodes import ARG_STAR, Context
from mypy.types import (
- Type, TypeVarId, TypeVarType, CallableType, AnyType, PartialType, get_proper_types,
- TypeVarLikeType, ProperType, ParamSpecType, Parameters, get_proper_type
+ AnyType,
+ CallableType,
+ Parameters,
+ ParamSpecType,
+ PartialType,
+ TupleType,
+ Type,
+ TypeVarId,
+ TypeVarLikeType,
+ TypeVarTupleType,
+ TypeVarType,
+ UnpackType,
+ get_proper_type,
)
-from mypy.nodes import Context
+from mypy.typevartuples import find_unpack_in_list, replace_starargs
def get_target_type(
tvar: TypeVarLikeType,
- type: ProperType,
+ type: Type,
callable: CallableType,
report_incompatible_typevar_value: Callable[[CallableType, Type, str, Context], None],
context: Context,
- skip_unsatisfied: bool
-) -> Optional[Type]:
+ skip_unsatisfied: bool,
+) -> Type | None:
if isinstance(tvar, ParamSpecType):
return type
+ if isinstance(tvar, TypeVarTupleType):
+ return type
assert isinstance(tvar, TypeVarType)
- values = get_proper_types(tvar.values)
+ values = tvar.values
+ p_type = get_proper_type(type)
if values:
- if isinstance(type, AnyType):
+ if isinstance(p_type, AnyType):
return type
- if isinstance(type, TypeVarType) and type.values:
+ if isinstance(p_type, TypeVarType) and p_type.values:
# Allow substituting T1 for T if every allowed value of T1
# is also a legal value of T.
- if all(any(mypy.sametypes.is_same_type(v, v1) for v in values)
- for v1 in type.values):
+ if all(any(mypy.subtypes.is_same_type(v, v1) for v in values) for v1 in p_type.values):
return type
matching = []
for value in values:
@@ -55,10 +70,13 @@ def get_target_type(
def apply_generic_arguments(
- callable: CallableType, orig_types: Sequence[Optional[Type]],
- report_incompatible_typevar_value: Callable[[CallableType, Type, str, Context], None],
- context: Context,
- skip_unsatisfied: bool = False) -> CallableType:
+ callable: CallableType,
+ orig_types: Sequence[Type | None],
+ report_incompatible_typevar_value: Callable[[CallableType, Type, str, Context], None],
+ context: Context,
+ skip_unsatisfied: bool = False,
+ allow_erased_callables: bool = False,
+) -> CallableType:
"""Apply generic type arguments to a callable type.
For example, applying [int] to 'def [T] (T) -> T' results in
@@ -73,12 +91,10 @@ def apply_generic_arguments(
assert len(tvars) == len(orig_types)
# Check that inferred type variable values are compatible with allowed
# values and bounds. Also, promote subtype values to allowed values.
- types = get_proper_types(orig_types)
-
# Create a map from type variable id to target type.
- id_to_type: Dict[TypeVarId, Type] = {}
+ id_to_type: dict[TypeVarId, Type] = {}
- for tvar, type in zip(tvars, types):
+ for tvar, type in zip(tvars, orig_types):
assert not isinstance(type, PartialType), "Internal error: must never apply partial type"
if type is None:
continue
@@ -98,13 +114,71 @@ def apply_generic_arguments(
callable = callable.expand_param_spec(nt)
# Apply arguments to argument types.
- arg_types = [expand_type(at, id_to_type) for at in callable.arg_types]
+ var_arg = callable.var_arg()
+ if var_arg is not None and isinstance(var_arg.typ, UnpackType):
+ star_index = callable.arg_kinds.index(ARG_STAR)
+ callable = callable.copy_modified(
+ arg_types=(
+ [
+ expand_type(at, id_to_type, allow_erased_callables)
+ for at in callable.arg_types[:star_index]
+ ]
+ + [callable.arg_types[star_index]]
+ + [
+ expand_type(at, id_to_type, allow_erased_callables)
+ for at in callable.arg_types[star_index + 1 :]
+ ]
+ )
+ )
+
+ unpacked_type = get_proper_type(var_arg.typ.type)
+ if isinstance(unpacked_type, TupleType):
+ # Assuming for now that because we convert prefixes to positional arguments,
+ # the first argument is always an unpack.
+ expanded_tuple = expand_type(unpacked_type, id_to_type)
+ if isinstance(expanded_tuple, TupleType):
+ # TODO: handle the case where the tuple has an unpack. This will
+ # hit an assert below.
+ expanded_unpack = find_unpack_in_list(expanded_tuple.items)
+ if expanded_unpack is not None:
+ callable = callable.copy_modified(
+ arg_types=(
+ callable.arg_types[:star_index]
+ + [expanded_tuple]
+ + callable.arg_types[star_index + 1 :]
+ )
+ )
+ else:
+ callable = replace_starargs(callable, expanded_tuple.items)
+ else:
+ # TODO: handle the case for if we get a variable length tuple.
+ assert False, f"mypy bug: unimplemented case, {expanded_tuple}"
+ elif isinstance(unpacked_type, TypeVarTupleType):
+ expanded_tvt = expand_unpack_with_variables(var_arg.typ, id_to_type)
+ assert isinstance(expanded_tvt, list)
+ for t in expanded_tvt:
+ assert not isinstance(t, UnpackType)
+ callable = replace_starargs(callable, expanded_tvt)
+ else:
+ assert False, "mypy bug: unhandled case applying unpack"
+ else:
+ callable = callable.copy_modified(
+ arg_types=[
+ expand_type(at, id_to_type, allow_erased_callables) for at in callable.arg_types
+ ]
+ )
+
+ # Apply arguments to TypeGuard if any.
+ if callable.type_guard is not None:
+ type_guard = expand_type(callable.type_guard, id_to_type, allow_erased_callables)
+ else:
+ type_guard = None
# The callable may retain some type vars if only some were applied.
remaining_tvars = [tv for tv in tvars if tv.id not in id_to_type]
return callable.copy_modified(
- arg_types=arg_types,
- ret_type=expand_type(callable.ret_type, id_to_type),
+ ret_type=expand_type(callable.ret_type, id_to_type, allow_erased_callables),
variables=remaining_tvars,
+ type_guard=type_guard,
)
diff --git a/mypy/argmap.py b/mypy/argmap.py
index bcb864472038..ec8463fd0625 100644
--- a/mypy/argmap.py
+++ b/mypy/argmap.py
@@ -1,23 +1,33 @@
"""Utilities for mapping between actual and formal arguments (and their types)."""
-from typing import TYPE_CHECKING, List, Optional, Sequence, Callable, Set
+from __future__ import annotations
+from typing import TYPE_CHECKING, Callable, Sequence
+
+from mypy import nodes
from mypy.maptype import map_instance_to_supertype
from mypy.types import (
- Type, Instance, TupleType, AnyType, TypeOfAny, TypedDictType, ParamSpecType, get_proper_type
+ AnyType,
+ Instance,
+ ParamSpecType,
+ TupleType,
+ Type,
+ TypedDictType,
+ TypeOfAny,
+ get_proper_type,
)
-from mypy import nodes
if TYPE_CHECKING:
from mypy.infer import ArgumentInferContext
-def map_actuals_to_formals(actual_kinds: List[nodes.ArgKind],
- actual_names: Optional[Sequence[Optional[str]]],
- formal_kinds: List[nodes.ArgKind],
- formal_names: Sequence[Optional[str]],
- actual_arg_type: Callable[[int],
- Type]) -> List[List[int]]:
+def map_actuals_to_formals(
+ actual_kinds: list[nodes.ArgKind],
+ actual_names: Sequence[str | None] | None,
+ formal_kinds: list[nodes.ArgKind],
+ formal_names: Sequence[str | None],
+ actual_arg_type: Callable[[int], Type],
+) -> list[list[int]]:
"""Calculate mapping between actual (caller) args and formals.
The result contains a list of caller argument indexes mapping to each
@@ -27,8 +37,8 @@ def map_actuals_to_formals(actual_kinds: List[nodes.ArgKind],
argument type with the given index.
"""
nformals = len(formal_kinds)
- formal_to_actual: List[List[int]] = [[] for i in range(nformals)]
- ambiguous_actual_kwargs: List[int] = []
+ formal_to_actual: list[list[int]] = [[] for i in range(nformals)]
+ ambiguous_actual_kwargs: list[int] = []
fi = 0
for ai, actual_kind in enumerate(actual_kinds):
if actual_kind == nodes.ARG_POS:
@@ -89,12 +99,19 @@ def map_actuals_to_formals(actual_kinds: List[nodes.ArgKind],
#
# TODO: If there are also tuple varargs, we might be missing some potential
# matches if the tuple was short enough to not match everything.
- unmatched_formals = [fi for fi in range(nformals)
- if (formal_names[fi]
- and (not formal_to_actual[fi]
- or actual_kinds[formal_to_actual[fi][0]] == nodes.ARG_STAR)
- and formal_kinds[fi] != nodes.ARG_STAR)
- or formal_kinds[fi] == nodes.ARG_STAR2]
+ unmatched_formals = [
+ fi
+ for fi in range(nformals)
+ if (
+ formal_names[fi]
+ and (
+ not formal_to_actual[fi]
+ or actual_kinds[formal_to_actual[fi][0]] == nodes.ARG_STAR
+ )
+ and formal_kinds[fi] != nodes.ARG_STAR
+ )
+ or formal_kinds[fi] == nodes.ARG_STAR2
+ ]
for ai in ambiguous_actual_kwargs:
for fi in unmatched_formals:
formal_to_actual[fi].append(ai)
@@ -102,20 +119,19 @@ def map_actuals_to_formals(actual_kinds: List[nodes.ArgKind],
return formal_to_actual
-def map_formals_to_actuals(actual_kinds: List[nodes.ArgKind],
- actual_names: Optional[Sequence[Optional[str]]],
- formal_kinds: List[nodes.ArgKind],
- formal_names: List[Optional[str]],
- actual_arg_type: Callable[[int],
- Type]) -> List[List[int]]:
+def map_formals_to_actuals(
+ actual_kinds: list[nodes.ArgKind],
+ actual_names: Sequence[str | None] | None,
+ formal_kinds: list[nodes.ArgKind],
+ formal_names: list[str | None],
+ actual_arg_type: Callable[[int], Type],
+) -> list[list[int]]:
"""Calculate the reverse mapping of map_actuals_to_formals."""
- formal_to_actual = map_actuals_to_formals(actual_kinds,
- actual_names,
- formal_kinds,
- formal_names,
- actual_arg_type)
+ formal_to_actual = map_actuals_to_formals(
+ actual_kinds, actual_names, formal_kinds, formal_names, actual_arg_type
+ )
# Now reverse the mapping.
- actual_to_formal: List[List[int]] = [[] for _ in actual_kinds]
+ actual_to_formal: list[list[int]] = [[] for _ in actual_kinds]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
actual_to_formal[actual].append(formal)
@@ -144,19 +160,21 @@ def f(x: int, *args: str) -> None: ...
needs a separate instance since instances have per-call state.
"""
- def __init__(self, context: 'ArgumentInferContext') -> None:
+ def __init__(self, context: ArgumentInferContext) -> None:
# Next tuple *args index to use.
self.tuple_index = 0
# Keyword arguments in TypedDict **kwargs used.
- self.kwargs_used: Set[str] = set()
+ self.kwargs_used: set[str] = set()
# Type context for `*` and `**` arg kinds.
self.context = context
- def expand_actual_type(self,
- actual_type: Type,
- actual_kind: nodes.ArgKind,
- formal_name: Optional[str],
- formal_kind: nodes.ArgKind) -> Type:
+ def expand_actual_type(
+ self,
+ actual_type: Type,
+ actual_kind: nodes.ArgKind,
+ formal_name: str | None,
+ formal_kind: nodes.ArgKind,
+ ) -> Type:
"""Return the actual (caller) type(s) of a formal argument with the given kinds.
If the actual argument is a tuple *args, return the next individual tuple item that
@@ -168,14 +186,15 @@ def expand_actual_type(self,
This is supposed to be called for each formal, in order. Call multiple times per
formal if multiple actuals map to a formal.
"""
+ original_actual = actual_type
actual_type = get_proper_type(actual_type)
if actual_kind == nodes.ARG_STAR:
if isinstance(actual_type, Instance) and actual_type.args:
from mypy.subtypes import is_subtype
+
if is_subtype(actual_type, self.context.iterable_type):
return map_instance_to_supertype(
- actual_type,
- self.context.iterable_type.type,
+ actual_type, self.context.iterable_type.type
).args[0]
else:
# We cannot properly unpack anything other
@@ -198,6 +217,7 @@ def expand_actual_type(self,
return AnyType(TypeOfAny.from_error)
elif actual_kind == nodes.ARG_STAR2:
from mypy.subtypes import is_subtype
+
if isinstance(actual_type, TypedDictType):
if formal_kind != nodes.ARG_STAR2 and formal_name in actual_type.items:
# Lookup type based on keyword argument name.
@@ -208,16 +228,15 @@ def expand_actual_type(self,
self.kwargs_used.add(formal_name)
return actual_type.items[formal_name]
elif (
- isinstance(actual_type, Instance) and
- len(actual_type.args) > 1 and
- is_subtype(actual_type, self.context.mapping_type)
+ isinstance(actual_type, Instance)
+ and len(actual_type.args) > 1
+ and is_subtype(actual_type, self.context.mapping_type)
):
# Only `Mapping` type can be unpacked with `**`.
# Other types will produce an error somewhere else.
- return map_instance_to_supertype(
- actual_type,
- self.context.mapping_type.type,
- ).args[1]
+ return map_instance_to_supertype(actual_type, self.context.mapping_type.type).args[
+ 1
+ ]
elif isinstance(actual_type, ParamSpecType):
# ParamSpec is valid in **kwargs but it can't be unpacked.
return actual_type
@@ -225,4 +244,4 @@ def expand_actual_type(self,
return AnyType(TypeOfAny.from_error)
else:
# No translation for other kinds -- 1:1 mapping.
- return actual_type
+ return original_actual
diff --git a/mypy/backports.py b/mypy/backports.py
deleted file mode 100644
index df5afcb2416f..000000000000
--- a/mypy/backports.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import sys
-from contextlib import contextmanager
-from typing import Iterator
-
-if sys.version_info < (3, 6):
- from collections import OrderedDict as OrderedDict # noqa: F401
-else:
- # OrderedDict is kind of slow, so for most of our uses in Python 3.6
- # and later we'd rather just use dict
- OrderedDict = dict
-
-
-if sys.version_info < (3, 7):
- @contextmanager
- def nullcontext() -> Iterator[None]:
- yield
-else:
- from contextlib import nullcontext as nullcontext # noqa: F401
diff --git a/mypy/binder.py b/mypy/binder.py
index 2f83ffb095fc..d822aecec2f3 100644
--- a/mypy/binder.py
+++ b/mypy/binder.py
@@ -1,22 +1,28 @@
-from contextlib import contextmanager
-from collections import defaultdict
+from __future__ import annotations
-from typing import Dict, List, Set, Iterator, Union, Optional, Tuple, cast
-from typing_extensions import DefaultDict, TypeAlias as _TypeAlias
+from collections import defaultdict
+from contextlib import contextmanager
+from typing import DefaultDict, Iterator, List, Optional, Tuple, Union, cast
+from typing_extensions import TypeAlias as _TypeAlias
-from mypy.types import (
- Type, AnyType, PartialType, UnionType, TypeOfAny, NoneType, get_proper_type
-)
-from mypy.subtypes import is_subtype
-from mypy.join import join_simple
-from mypy.sametypes import is_same_type
from mypy.erasetype import remove_instance_last_known_values
-from mypy.nodes import Expression, Var, RefExpr
+from mypy.join import join_simple
from mypy.literals import Key, literal, literal_hash, subkeys
-from mypy.nodes import IndexExpr, MemberExpr, AssignmentExpr, NameExpr
-
+from mypy.nodes import Expression, IndexExpr, MemberExpr, NameExpr, RefExpr, TypeInfo, Var
+from mypy.subtypes import is_same_type, is_subtype
+from mypy.types import (
+ AnyType,
+ NoneType,
+ PartialType,
+ Type,
+ TypeOfAny,
+ TypeType,
+ UnionType,
+ get_proper_type,
+)
+from mypy.typevars import fill_typevars_with_any
-BindableExpression: _TypeAlias = Union[IndexExpr, MemberExpr, AssignmentExpr, NameExpr]
+BindableExpression: _TypeAlias = Union[IndexExpr, MemberExpr, NameExpr]
class Frame:
@@ -33,7 +39,7 @@ class Frame:
def __init__(self, id: int, conditional_frame: bool = False) -> None:
self.id = id
- self.types: Dict[Key, Type] = {}
+ self.types: dict[Key, Type] = {}
self.unreachable = False
self.conditional_frame = conditional_frame
@@ -69,9 +75,10 @@ class A:
reveal_type(lst[0].a) # str
```
"""
+
# Stored assignments for situations with tuple/list lvalue and rvalue of union type.
# This maps an expression to a list of bound types for every item in the union type.
- type_assignments: Optional[Assigns] = None
+ type_assignments: Assigns | None = None
def __init__(self) -> None:
self.next_id = 1
@@ -89,27 +96,27 @@ def __init__(self) -> None:
# the end of the frame or by a loop control construct
# or raised exception. The last element of self.frames
# has no corresponding element in this list.
- self.options_on_return: List[List[Frame]] = []
+ self.options_on_return: list[list[Frame]] = []
# Maps literal_hash(expr) to get_declaration(expr)
# for every expr stored in the binder
- self.declarations: Dict[Key, Optional[Type]] = {}
+ self.declarations: dict[Key, Type | None] = {}
# Set of other keys to invalidate if a key is changed, e.g. x -> {x.a, x[0]}
# Whenever a new key (e.g. x.a.b) is added, we update this
- self.dependencies: Dict[Key, Set[Key]] = {}
+ self.dependencies: dict[Key, set[Key]] = {}
# Whether the last pop changed the newly top frame on exit
self.last_pop_changed = False
- self.try_frames: Set[int] = set()
- self.break_frames: List[int] = []
- self.continue_frames: List[int] = []
+ self.try_frames: set[int] = set()
+ self.break_frames: list[int] = []
+ self.continue_frames: list[int] = []
def _get_id(self) -> int:
self.next_id += 1
return self.next_id
- def _add_dependencies(self, key: Key, value: Optional[Key] = None) -> None:
+ def _add_dependencies(self, key: Key, value: Key | None = None) -> None:
if value is None:
value = key
else:
@@ -127,7 +134,7 @@ def push_frame(self, conditional_frame: bool = False) -> Frame:
def _put(self, key: Key, type: Type, index: int = -1) -> None:
self.frames[index].types[key] = type
- def _get(self, key: Key, index: int = -1) -> Optional[Type]:
+ def _get(self, key: Key, index: int = -1) -> Type | None:
if index < 0:
index += len(self.frames)
for i in range(index, -1, -1):
@@ -136,12 +143,12 @@ def _get(self, key: Key, index: int = -1) -> Optional[Type]:
return None
def put(self, expr: Expression, typ: Type) -> None:
- if not isinstance(expr, (IndexExpr, MemberExpr, AssignmentExpr, NameExpr)):
+ if not isinstance(expr, (IndexExpr, MemberExpr, NameExpr)):
return
if not literal(expr):
return
key = literal_hash(expr)
- assert key is not None, 'Internal error: binder tried to put non-literal'
+ assert key is not None, "Internal error: binder tried to put non-literal"
if key not in self.declarations:
self.declarations[key] = get_declaration(expr)
self._add_dependencies(key)
@@ -153,9 +160,9 @@ def unreachable(self) -> None:
def suppress_unreachable_warnings(self) -> None:
self.frames[-1].suppress_unreachable_warnings = True
- def get(self, expr: Expression) -> Optional[Type]:
+ def get(self, expr: Expression) -> Type | None:
key = literal_hash(expr)
- assert key is not None, 'Internal error: binder tried to get non-literal'
+ assert key is not None, "Internal error: binder tried to get non-literal"
return self._get(key)
def is_unreachable(self) -> bool:
@@ -170,7 +177,7 @@ def is_unreachable_warning_suppressed(self) -> bool:
def cleanse(self, expr: Expression) -> None:
"""Remove all references to a Node from the binder."""
key = literal_hash(expr)
- assert key is not None, 'Internal error: binder tried cleanse non-literal'
+ assert key is not None, "Internal error: binder tried cleanse non-literal"
self._cleanse_key(key)
def _cleanse_key(self, key: Key) -> None:
@@ -179,7 +186,7 @@ def _cleanse_key(self, key: Key) -> None:
if key in frame.types:
del frame.types[key]
- def update_from_options(self, frames: List[Frame]) -> bool:
+ def update_from_options(self, frames: list[Frame]) -> bool:
"""Update the frame to reflect that each key will be updated
as in one of the frames. Return whether any item changes.
@@ -189,7 +196,7 @@ def update_from_options(self, frames: List[Frame]) -> bool:
frames = [f for f in frames if not f.unreachable]
changed = False
- keys = set(key for f in frames for key in f.types)
+ keys = {key for f in frames for key in f.types}
for key in keys:
current_value = self._get(key)
@@ -239,7 +246,7 @@ def pop_frame(self, can_skip: bool, fall_through: int) -> Frame:
return result
@contextmanager
- def accumulate_type_assignments(self) -> 'Iterator[Assigns]':
+ def accumulate_type_assignments(self) -> Iterator[Assigns]:
"""Push a new map to collect assigned types in multiassign from union.
If this map is not None, actual binding is deferred until all items in
@@ -253,17 +260,13 @@ def accumulate_type_assignments(self) -> 'Iterator[Assigns]':
yield self.type_assignments
self.type_assignments = old_assignments
- def assign_type(self, expr: Expression,
- type: Type,
- declared_type: Optional[Type],
- restrict_any: bool = False) -> None:
+ def assign_type(
+ self, expr: Expression, type: Type, declared_type: Type | None, restrict_any: bool = False
+ ) -> None:
# We should erase last known value in binder, because if we are using it,
# it means that the target is not final, and therefore can't hold a literal.
type = remove_instance_last_known_values(type)
- type = get_proper_type(type)
- declared_type = get_proper_type(declared_type)
-
if self.type_assignments is not None:
# We are in a multiassign from union, defer the actual binding,
# just collect the types.
@@ -288,6 +291,8 @@ def assign_type(self, expr: Expression,
# times?
return
+ p_declared = get_proper_type(declared_type)
+ p_type = get_proper_type(type)
enclosing_type = get_proper_type(self.most_recent_enclosing_type(expr, type))
if isinstance(enclosing_type, AnyType) and not restrict_any:
# If x is Any and y is int, after x = y we do not infer that x is int.
@@ -302,19 +307,24 @@ def assign_type(self, expr: Expression,
# This overrides the normal behavior of ignoring Any assignments to variables
# in order to prevent false positives.
# (See discussion in #3526)
- elif (isinstance(type, AnyType)
- and isinstance(declared_type, UnionType)
- and any(isinstance(get_proper_type(item), NoneType) for item in declared_type.items)
- and isinstance(get_proper_type(self.most_recent_enclosing_type(expr, NoneType())),
- NoneType)):
+ elif (
+ isinstance(p_type, AnyType)
+ and isinstance(p_declared, UnionType)
+ and any(isinstance(get_proper_type(item), NoneType) for item in p_declared.items)
+ and isinstance(
+ get_proper_type(self.most_recent_enclosing_type(expr, NoneType())), NoneType
+ )
+ ):
# Replace any Nones in the union type with Any
- new_items = [type if isinstance(get_proper_type(item), NoneType) else item
- for item in declared_type.items]
+ new_items = [
+ type if isinstance(get_proper_type(item), NoneType) else item
+ for item in p_declared.items
+ ]
self.put(expr, UnionType(new_items))
- elif (isinstance(type, AnyType)
- and not (isinstance(declared_type, UnionType)
- and any(isinstance(get_proper_type(item), AnyType)
- for item in declared_type.items))):
+ elif isinstance(p_type, AnyType) and not (
+ isinstance(p_declared, UnionType)
+ and any(isinstance(get_proper_type(item), AnyType) for item in p_declared.items)
+ ):
# Assigning an Any value doesn't affect the type to avoid false negatives, unless
# there is an Any item in a declared union type.
self.put(expr, declared_type)
@@ -339,15 +349,15 @@ def invalidate_dependencies(self, expr: BindableExpression) -> None:
for dep in self.dependencies.get(key, set()):
self._cleanse_key(dep)
- def most_recent_enclosing_type(self, expr: BindableExpression, type: Type) -> Optional[Type]:
+ def most_recent_enclosing_type(self, expr: BindableExpression, type: Type) -> Type | None:
type = get_proper_type(type)
if isinstance(type, AnyType):
return get_declaration(expr)
key = literal_hash(expr)
assert key is not None
- enclosers = ([get_declaration(expr)] +
- [f.types[key] for f in self.frames
- if key in f.types and is_subtype(type, f.types[key])])
+ enclosers = [get_declaration(expr)] + [
+ f.types[key] for f in self.frames if key in f.types and is_subtype(type, f.types[key])
+ ]
return enclosers[-1]
def allow_jump(self, index: int) -> None:
@@ -356,7 +366,7 @@ def allow_jump(self, index: int) -> None:
if index < 0:
index += len(self.options_on_return)
frame = Frame(self._get_id())
- for f in self.frames[index + 1:]:
+ for f in self.frames[index + 1 :]:
frame.types.update(f.types)
if f.unreachable:
frame.unreachable = True
@@ -371,10 +381,16 @@ def handle_continue(self) -> None:
self.unreachable()
@contextmanager
- def frame_context(self, *, can_skip: bool, fall_through: int = 1,
- break_frame: int = 0, continue_frame: int = 0,
- conditional_frame: bool = False,
- try_frame: bool = False) -> Iterator[Frame]:
+ def frame_context(
+ self,
+ *,
+ can_skip: bool,
+ fall_through: int = 1,
+ break_frame: int = 0,
+ continue_frame: int = 0,
+ conditional_frame: bool = False,
+ try_frame: bool = False,
+ ) -> Iterator[Frame]:
"""Return a context manager that pushes/pops frames on enter/exit.
If can_skip is True, control flow is allowed to bypass the
@@ -432,9 +448,12 @@ def top_frame_context(self) -> Iterator[Frame]:
self.pop_frame(True, 0)
-def get_declaration(expr: BindableExpression) -> Optional[Type]:
- if isinstance(expr, RefExpr) and isinstance(expr.node, Var):
- type = get_proper_type(expr.node.type)
- if not isinstance(type, PartialType):
- return type
+def get_declaration(expr: BindableExpression) -> Type | None:
+ if isinstance(expr, RefExpr):
+ if isinstance(expr.node, Var):
+ type = expr.node.type
+ if not isinstance(get_proper_type(type), PartialType):
+ return type
+ elif isinstance(expr.node, TypeInfo):
+ return TypeType(fill_typevars_with_any(expr.node))
return None
diff --git a/mypy/bogus_type.py b/mypy/bogus_type.py
index eb19e9c5db48..1a61abac9732 100644
--- a/mypy/bogus_type.py
+++ b/mypy/bogus_type.py
@@ -10,10 +10,13 @@
For those cases some other technique should be used.
"""
+from __future__ import annotations
+
+from typing import Any, TypeVar
+
from mypy_extensions import FlexibleAlias
-from typing import TypeVar, Any
-T = TypeVar('T')
+T = TypeVar("T")
# This won't ever be true at runtime, but we consider it true during
# mypyc compilations.
diff --git a/mypy/build.py b/mypy/build.py
index f7a9e9e05e1d..a4817d1866c7 100644
--- a/mypy/build.py
+++ b/mypy/build.py
@@ -10,6 +10,9 @@
"""
# TODO: More consistent terminology, e.g. path/fnam, module/id, state/file
+from __future__ import annotations
+
+import collections
import contextlib
import errno
import gc
@@ -21,47 +24,84 @@
import sys
import time
import types
+from typing import (
+ TYPE_CHECKING,
+ AbstractSet,
+ Any,
+ Callable,
+ ClassVar,
+ Dict,
+ Iterable,
+ Iterator,
+ Mapping,
+ NamedTuple,
+ NoReturn,
+ Sequence,
+ TextIO,
+ TypeVar,
+)
+from typing_extensions import Final, TypeAlias as _TypeAlias
-from typing import (AbstractSet, Any, Dict, Iterable, Iterator, List, Sequence,
- Mapping, NamedTuple, Optional, Set, Tuple, TypeVar, Union, Callable, TextIO)
-from typing_extensions import ClassVar, Final, TYPE_CHECKING, TypeAlias as _TypeAlias
from mypy_extensions import TypedDict
-from mypy.nodes import MypyFile, ImportBase, Import, ImportFrom, ImportAll, SymbolTable
-from mypy.semanal_pass1 import SemanticAnalyzerPreAnalysis
-from mypy.semanal import SemanticAnalyzer
import mypy.semanal_main
from mypy.checker import TypeChecker
+from mypy.errors import CompileError, ErrorInfo, Errors, report_internal_error
from mypy.indirection import TypeIndirectionVisitor
-from mypy.errors import Errors, CompileError, ErrorInfo, report_internal_error
+from mypy.messages import MessageBuilder
+from mypy.nodes import Import, ImportAll, ImportBase, ImportFrom, MypyFile, SymbolTable, TypeInfo
+from mypy.partially_defined import PossiblyUndefinedVariableVisitor
+from mypy.semanal import SemanticAnalyzer
+from mypy.semanal_pass1 import SemanticAnalyzerPreAnalysis
from mypy.util import (
- DecodeError, decode_python_encoding, is_sub_path, get_mypy_comments, module_prefix,
- read_py_file, hash_digest, is_typeshed_file, is_stub_package_file, get_top_two_prefixes
+ DecodeError,
+ decode_python_encoding,
+ get_mypy_comments,
+ get_top_two_prefixes,
+ hash_digest,
+ is_stub_package_file,
+ is_sub_path,
+ is_typeshed_file,
+ module_prefix,
+ read_py_file,
+ time_ref,
+ time_spent_us,
)
+
if TYPE_CHECKING:
from mypy.report import Reports # Avoid unconditional slow import
+
+from mypy import errorcodes as codes
+from mypy.config_parser import parse_mypy_comments
from mypy.fixup import fixup_module
+from mypy.freetree import free_tree
+from mypy.fscache import FileSystemCache
+from mypy.metastore import FilesystemMetadataStore, MetadataStore, SqliteMetadataStore
from mypy.modulefinder import (
- BuildSource, compute_search_paths, FindModuleCache, SearchPaths, ModuleSearchResult,
- ModuleNotFoundReason
+ BuildSource as BuildSource,
+ BuildSourceSet as BuildSourceSet,
+ FindModuleCache,
+ ModuleNotFoundReason,
+ ModuleSearchResult,
+ SearchPaths,
+ compute_search_paths,
)
from mypy.nodes import Expression
from mypy.options import Options
from mypy.parse import parse
+from mypy.plugin import ChainedPlugin, Plugin, ReportConfigContext
+from mypy.plugins.default import DefaultPlugin
+from mypy.renaming import LimitedVariableRenameVisitor, VariableRenameVisitor
from mypy.stats import dump_type_stats
+from mypy.stubinfo import (
+ is_legacy_bundled_package,
+ legacy_bundled_packages,
+ non_bundled_packages,
+ stub_package_name,
+)
from mypy.types import Type
+from mypy.typestate import reset_global_state, type_state
from mypy.version import __version__
-from mypy.plugin import Plugin, ChainedPlugin, ReportConfigContext
-from mypy.plugins.default import DefaultPlugin
-from mypy.fscache import FileSystemCache
-from mypy.metastore import MetadataStore, FilesystemMetadataStore, SqliteMetadataStore
-from mypy.typestate import TypeState, reset_global_state
-from mypy.renaming import VariableRenameVisitor, LimitedVariableRenameVisitor
-from mypy.config_parser import parse_mypy_comments
-from mypy.freetree import free_tree
-from mypy.stubinfo import legacy_bundled_packages, is_legacy_bundled_package
-from mypy import errorcodes as codes
-
# Switch to True to produce debug output related to fine-grained incremental
# mode only that is useful during development. This produces only a subset of
@@ -71,18 +111,18 @@
# These modules are special and should always come from typeshed.
CORE_BUILTIN_MODULES: Final = {
- 'builtins',
- 'typing',
- 'types',
- 'typing_extensions',
- 'mypy_extensions',
- '_importlib_modulespec',
- 'sys',
- 'abc',
+ "builtins",
+ "typing",
+ "types",
+ "typing_extensions",
+ "mypy_extensions",
+ "_importlib_modulespec",
+ "sys",
+ "abc",
}
-Graph: _TypeAlias = Dict[str, 'State']
+Graph: _TypeAlias = Dict[str, "State"]
# TODO: Get rid of BuildResult. We might as well return a BuildManager.
@@ -97,51 +137,25 @@ class BuildResult:
errors: List of error messages.
"""
- def __init__(self, manager: 'BuildManager', graph: Graph) -> None:
+ def __init__(self, manager: BuildManager, graph: Graph) -> None:
self.manager = manager
self.graph = graph
self.files = manager.modules
self.types = manager.all_types # Non-empty if export_types True in options
self.used_cache = manager.cache_enabled
- self.errors: List[str] = [] # Filled in by build if desired
-
-
-class BuildSourceSet:
- """Efficiently test a file's membership in the set of build sources."""
-
- def __init__(self, sources: List[BuildSource]) -> None:
- self.source_text_present = False
- self.source_modules: Set[str] = set()
- self.source_paths: Set[str] = set()
-
- for source in sources:
- if source.text is not None:
- self.source_text_present = True
- elif source.path:
- self.source_paths.add(source.path)
- else:
- self.source_modules.add(source.module)
-
- def is_source(self, file: MypyFile) -> bool:
- if file.path and file.path in self.source_paths:
- return True
- elif file._fullname in self.source_modules:
- return True
- elif self.source_text_present:
- return True
- else:
- return False
-
-
-def build(sources: List[BuildSource],
- options: Options,
- alt_lib_path: Optional[str] = None,
- flush_errors: Optional[Callable[[List[str], bool], None]] = None,
- fscache: Optional[FileSystemCache] = None,
- stdout: Optional[TextIO] = None,
- stderr: Optional[TextIO] = None,
- extra_plugins: Optional[Sequence[Plugin]] = None,
- ) -> BuildResult:
+ self.errors: list[str] = [] # Filled in by build if desired
+
+
+def build(
+ sources: list[BuildSource],
+ options: Options,
+ alt_lib_path: str | None = None,
+ flush_errors: Callable[[list[str], bool], None] | None = None,
+ fscache: FileSystemCache | None = None,
+ stdout: TextIO | None = None,
+ stderr: TextIO | None = None,
+ extra_plugins: Sequence[Plugin] | None = None,
+) -> BuildResult:
"""Analyze a program.
A single call to build performs parsing, semantic analysis and optionally
@@ -168,7 +182,7 @@ def build(sources: List[BuildSource],
# fields for callers that want the traditional API.
messages = []
- def default_flush_errors(new_messages: List[str], is_serious: bool) -> None:
+ def default_flush_errors(new_messages: list[str], is_serious: bool) -> None:
messages.extend(new_messages)
flush_errors = flush_errors or default_flush_errors
@@ -193,16 +207,17 @@ def default_flush_errors(new_messages: List[str], is_serious: bool) -> None:
raise
-def _build(sources: List[BuildSource],
- options: Options,
- alt_lib_path: Optional[str],
- flush_errors: Callable[[List[str], bool], None],
- fscache: Optional[FileSystemCache],
- stdout: TextIO,
- stderr: TextIO,
- extra_plugins: Sequence[Plugin],
- ) -> BuildResult:
- if platform.python_implementation() == 'CPython':
+def _build(
+ sources: list[BuildSource],
+ options: Options,
+ alt_lib_path: str | None,
+ flush_errors: Callable[[list[str], bool], None],
+ fscache: FileSystemCache | None,
+ stdout: TextIO,
+ stderr: TextIO,
+ extra_plugins: Sequence[Plugin],
+) -> BuildResult:
+ if platform.python_implementation() == "CPython":
# This seems the most reasonable place to tune garbage collection.
gc.set_threshold(150 * 1000)
@@ -214,20 +229,23 @@ def _build(sources: List[BuildSource],
reports = None
if options.report_dirs:
# Import lazily to avoid slowing down startup.
- from mypy.report import Reports # noqa
+ from mypy.report import Reports
+
reports = Reports(data_dir, options.report_dirs)
source_set = BuildSourceSet(sources)
cached_read = fscache.read
- errors = Errors(options.show_error_context,
- options.show_column_numbers,
- options.show_error_codes,
- options.pretty,
- lambda path: read_py_file(path, cached_read, options.python_version),
- options.show_absolute_path,
- options.enabled_error_codes,
- options.disabled_error_codes,
- options.many_errors_threshold)
+ errors = Errors(
+ options.show_error_context,
+ options.show_column_numbers,
+ options.hide_error_codes,
+ options.pretty,
+ options.show_error_end,
+ lambda path: read_py_file(path, cached_read),
+ options.show_absolute_path,
+ options.many_errors_threshold,
+ options,
+ )
plugin, snapshot = load_plugins(options, errors, stdout, extra_plugins)
# Add catch-all .gitignore to cache dir if we created it
@@ -236,35 +254,46 @@ def _build(sources: List[BuildSource],
# Construct a build manager object to hold state during the build.
#
# Ignore current directory prefix in error messages.
- manager = BuildManager(data_dir, search_paths,
- ignore_prefix=os.getcwd(),
- source_set=source_set,
- reports=reports,
- options=options,
- version_id=__version__,
- plugin=plugin,
- plugins_snapshot=snapshot,
- errors=errors,
- flush_errors=flush_errors,
- fscache=fscache,
- stdout=stdout,
- stderr=stderr)
+ manager = BuildManager(
+ data_dir,
+ search_paths,
+ ignore_prefix=os.getcwd(),
+ source_set=source_set,
+ reports=reports,
+ options=options,
+ version_id=__version__,
+ plugin=plugin,
+ plugins_snapshot=snapshot,
+ errors=errors,
+ flush_errors=flush_errors,
+ fscache=fscache,
+ stdout=stdout,
+ stderr=stderr,
+ )
manager.trace(repr(options))
reset_global_state()
try:
graph = dispatch(sources, manager, stdout)
if not options.fine_grained_incremental:
- TypeState.reset_all_subtype_caches()
+ type_state.reset_all_subtype_caches()
+ if options.timing_stats is not None:
+ dump_timing_stats(options.timing_stats, graph)
+ if options.line_checking_stats is not None:
+ dump_line_checking_stats(options.line_checking_stats, graph)
return BuildResult(manager, graph)
finally:
t0 = time.time()
manager.metastore.commit()
manager.add_stats(cache_commit_time=time.time() - t0)
- manager.log("Build finished in %.3f seconds with %d modules, and %d errors" %
- (time.time() - manager.start_time,
- len(manager.modules),
- manager.errors.num_messages()))
+ manager.log(
+ "Build finished in %.3f seconds with %d modules, and %d errors"
+ % (
+ time.time() - manager.start_time,
+ len(manager.modules),
+ manager.errors.num_messages(),
+ )
+ )
manager.dump_stats()
if reports is not None:
# Finish the HTML or XML reports even if CompileError was raised.
@@ -296,35 +325,36 @@ def normpath(path: str, options: Options) -> str:
return os.path.abspath(path)
-CacheMeta = NamedTuple('CacheMeta',
- [('id', str),
- ('path', str),
- ('mtime', int),
- ('size', int),
- ('hash', str),
- ('dependencies', List[str]), # names of imported modules
- ('data_mtime', int), # mtime of data_json
- ('data_json', str), # path of .data.json
- ('suppressed', List[str]), # dependencies that weren't imported
- ('options', Optional[Dict[str, object]]), # build options
- # dep_prios and dep_lines are in parallel with
- # dependencies + suppressed.
- ('dep_prios', List[int]),
- ('dep_lines', List[int]),
- ('interface_hash', str), # hash representing the public interface
- ('version_id', str), # mypy version for cache invalidation
- ('ignore_all', bool), # if errors were ignored
- ('plugin_data', Any), # config data from plugins
- ])
+class CacheMeta(NamedTuple):
+ id: str
+ path: str
+ mtime: int
+ size: int
+ hash: str
+ dependencies: list[str] # names of imported modules
+ data_mtime: int # mtime of data_json
+ data_json: str # path of .data.json
+ suppressed: list[str] # dependencies that weren't imported
+ options: dict[str, object] | None # build options
+ # dep_prios and dep_lines are in parallel with dependencies + suppressed
+ dep_prios: list[int]
+ dep_lines: list[int]
+ interface_hash: str # hash representing the public interface
+ version_id: str # mypy version for cache invalidation
+ ignore_all: bool # if errors were ignored
+ plugin_data: Any # config data from plugins
+
+
# NOTE: dependencies + suppressed == all reachable imports;
# suppressed contains those reachable imports that were prevented by
# silent mode or simply not found.
+
# Metadata for the fine-grained dependencies file associated with a module.
-FgDepMeta = TypedDict('FgDepMeta', {'path': str, 'mtime': int})
+FgDepMeta = TypedDict("FgDepMeta", {"path": str, "mtime": int})
-def cache_meta_from_dict(meta: Dict[str, Any], data_json: str) -> CacheMeta:
+def cache_meta_from_dict(meta: dict[str, Any], data_json: str) -> CacheMeta:
"""Build a CacheMeta object from a json metadata dictionary
Args:
@@ -333,22 +363,22 @@ def cache_meta_from_dict(meta: Dict[str, Any], data_json: str) -> CacheMeta:
"""
sentinel: Any = None # Values to be validated by the caller
return CacheMeta(
- meta.get('id', sentinel),
- meta.get('path', sentinel),
- int(meta['mtime']) if 'mtime' in meta else sentinel,
- meta.get('size', sentinel),
- meta.get('hash', sentinel),
- meta.get('dependencies', []),
- int(meta['data_mtime']) if 'data_mtime' in meta else sentinel,
+ meta.get("id", sentinel),
+ meta.get("path", sentinel),
+ int(meta["mtime"]) if "mtime" in meta else sentinel,
+ meta.get("size", sentinel),
+ meta.get("hash", sentinel),
+ meta.get("dependencies", []),
+ int(meta["data_mtime"]) if "data_mtime" in meta else sentinel,
data_json,
- meta.get('suppressed', []),
- meta.get('options'),
- meta.get('dep_prios', []),
- meta.get('dep_lines', []),
- meta.get('interface_hash', ''),
- meta.get('version_id', sentinel),
- meta.get('ignore_all', True),
- meta.get('plugin_data', None),
+ meta.get("suppressed", []),
+ meta.get("options"),
+ meta.get("dep_prios", []),
+ meta.get("dep_lines", []),
+ meta.get("interface_hash", ""),
+ meta.get("version_id", sentinel),
+ meta.get("ignore_all", True),
+ meta.get("plugin_data", None),
)
@@ -377,7 +407,7 @@ def import_priority(imp: ImportBase, toplevel_priority: int) -> int:
def load_plugins_from_config(
options: Options, errors: Errors, stdout: TextIO
-) -> Tuple[List[Plugin], Dict[str, str]]:
+) -> tuple[list[Plugin], dict[str, str]]:
"""Load all configured plugins.
Return a list of all the loaded plugins from the config file.
@@ -386,31 +416,31 @@ def load_plugins_from_config(
"""
import importlib
- snapshot: Dict[str, str] = {}
+ snapshot: dict[str, str] = {}
if not options.config_file:
return [], snapshot
- line = find_config_file_line_number(options.config_file, 'mypy', 'plugins')
+ line = find_config_file_line_number(options.config_file, "mypy", "plugins")
if line == -1:
line = 1 # We need to pick some line number that doesn't look too confusing
- def plugin_error(message: str) -> None:
+ def plugin_error(message: str) -> NoReturn:
errors.report(line, 0, message)
errors.raise_error(use_stdout=False)
- custom_plugins: List[Plugin] = []
- errors.set_file(options.config_file, None)
+ custom_plugins: list[Plugin] = []
+ errors.set_file(options.config_file, None, options)
for plugin_path in options.plugins:
- func_name = 'plugin'
- plugin_dir: Optional[str] = None
- if ':' in os.path.basename(plugin_path):
- plugin_path, func_name = plugin_path.rsplit(':', 1)
- if plugin_path.endswith('.py'):
+ func_name = "plugin"
+ plugin_dir: str | None = None
+ if ":" in os.path.basename(plugin_path):
+ plugin_path, func_name = plugin_path.rsplit(":", 1)
+ if plugin_path.endswith(".py"):
# Plugin paths can be relative to the config file location.
plugin_path = os.path.join(os.path.dirname(options.config_file), plugin_path)
if not os.path.isfile(plugin_path):
- plugin_error('Can\'t find plugin "{}"'.format(plugin_path))
+ plugin_error(f'Can\'t find plugin "{plugin_path}"')
# Use an absolute path to avoid populating the cache entry
# for 'tmp' during tests, since it will be different in
# different tests.
@@ -418,56 +448,58 @@ def plugin_error(message: str) -> None:
fnam = os.path.basename(plugin_path)
module_name = fnam[:-3]
sys.path.insert(0, plugin_dir)
- elif re.search(r'[\\/]', plugin_path):
+ elif re.search(r"[\\/]", plugin_path):
fnam = os.path.basename(plugin_path)
- plugin_error('Plugin "{}" does not have a .py extension'.format(fnam))
+ plugin_error(f'Plugin "{fnam}" does not have a .py extension')
else:
module_name = plugin_path
try:
module = importlib.import_module(module_name)
except Exception as exc:
- plugin_error('Error importing plugin "{}": {}'.format(plugin_path, exc))
+ plugin_error(f'Error importing plugin "{plugin_path}": {exc}')
finally:
if plugin_dir is not None:
assert sys.path[0] == plugin_dir
del sys.path[0]
if not hasattr(module, func_name):
- plugin_error('Plugin "{}" does not define entry point function "{}"'.format(
- plugin_path, func_name))
+ plugin_error(
+ 'Plugin "{}" does not define entry point function "{}"'.format(
+ plugin_path, func_name
+ )
+ )
try:
plugin_type = getattr(module, func_name)(__version__)
except Exception:
- print('Error calling the plugin(version) entry point of {}\n'.format(plugin_path),
- file=stdout)
+ print(f"Error calling the plugin(version) entry point of {plugin_path}\n", file=stdout)
raise # Propagate to display traceback
if not isinstance(plugin_type, type):
plugin_error(
'Type object expected as the return value of "plugin"; got {!r} (in {})'.format(
- plugin_type, plugin_path))
+ plugin_type, plugin_path
+ )
+ )
if not issubclass(plugin_type, Plugin):
plugin_error(
'Return value of "plugin" must be a subclass of "mypy.plugin.Plugin" '
- '(in {})'.format(plugin_path))
+ "(in {})".format(plugin_path)
+ )
try:
custom_plugins.append(plugin_type(options))
snapshot[module_name] = take_module_snapshot(module)
except Exception:
- print('Error constructing plugin instance of {}\n'.format(plugin_type.__name__),
- file=stdout)
+ print(f"Error constructing plugin instance of {plugin_type.__name__}\n", file=stdout)
raise # Propagate to display traceback
return custom_plugins, snapshot
-def load_plugins(options: Options,
- errors: Errors,
- stdout: TextIO,
- extra_plugins: Sequence[Plugin],
- ) -> Tuple[Plugin, Dict[str, str]]:
+def load_plugins(
+ options: Options, errors: Errors, stdout: TextIO, extra_plugins: Sequence[Plugin]
+) -> tuple[Plugin, dict[str, str]]:
"""Load all configured plugins.
Return a plugin that encapsulates all plugins chained together. Always
@@ -493,14 +525,14 @@ def take_module_snapshot(module: types.ModuleType) -> str:
We record _both_ hash and the version to detect more possible changes
(e.g. if there is a change in modules imported by a plugin).
"""
- if hasattr(module, '__file__'):
+ if hasattr(module, "__file__"):
assert module.__file__ is not None
- with open(module.__file__, 'rb') as f:
+ with open(module.__file__, "rb") as f:
digest = hash_digest(f.read())
else:
- digest = 'unknown'
- ver = getattr(module, '__version__', 'none')
- return '{}:{}'.format(ver, digest)
+ digest = "unknown"
+ ver = getattr(module, "__version__", "none")
+ return f"{ver}:{digest}"
def find_config_file_line_number(path: str, section: str, setting_name: str) -> int:
@@ -514,10 +546,10 @@ def find_config_file_line_number(path: str, section: str, setting_name: str) ->
with open(path, encoding="UTF-8") as f:
for i, line in enumerate(f):
line = line.strip()
- if line.startswith('[') and line.endswith(']'):
+ if line.startswith("[") and line.endswith("]"):
current_section = line[1:-1].strip()
- in_desired_section = (current_section == section)
- elif in_desired_section and re.match(r'{}\s*='.format(setting_name), line):
+ in_desired_section = current_section == section
+ elif in_desired_section and re.match(rf"{setting_name}\s*=", line):
results.append(i + 1)
if len(results) == 1:
return results[0]
@@ -568,22 +600,24 @@ class BuildManager:
ast_cache: AST cache to speed up mypy daemon
"""
- def __init__(self, data_dir: str,
- search_paths: SearchPaths,
- ignore_prefix: str,
- source_set: BuildSourceSet,
- reports: 'Optional[Reports]',
- options: Options,
- version_id: str,
- plugin: Plugin,
- plugins_snapshot: Dict[str, str],
- errors: Errors,
- flush_errors: Callable[[List[str], bool], None],
- fscache: FileSystemCache,
- stdout: TextIO,
- stderr: TextIO,
- ) -> None:
- self.stats: Dict[str, Any] = {} # Values are ints or floats
+ def __init__(
+ self,
+ data_dir: str,
+ search_paths: SearchPaths,
+ ignore_prefix: str,
+ source_set: BuildSourceSet,
+ reports: Reports | None,
+ options: Options,
+ version_id: str,
+ plugin: Plugin,
+ plugins_snapshot: dict[str, str],
+ errors: Errors,
+ flush_errors: Callable[[list[str], bool], None],
+ fscache: FileSystemCache,
+ stdout: TextIO,
+ stderr: TextIO,
+ ) -> None:
+ self.stats: dict[str, Any] = {} # Values are ints or floats
self.stdout = stdout
self.stderr = stderr
self.start_time = time.time()
@@ -595,50 +629,76 @@ def __init__(self, data_dir: str,
self.reports = reports
self.options = options
self.version_id = version_id
- self.modules: Dict[str, MypyFile] = {}
- self.missing_modules: Set[str] = set()
- self.fg_deps_meta: Dict[str, FgDepMeta] = {}
+ self.modules: dict[str, MypyFile] = {}
+ self.missing_modules: set[str] = set()
+ self.fg_deps_meta: dict[str, FgDepMeta] = {}
# fg_deps holds the dependencies of every module that has been
# processed. We store this in BuildManager so that we can compute
# dependencies as we go, which allows us to free ASTs and type information,
# saving a ton of memory on net.
- self.fg_deps: Dict[str, Set[str]] = {}
+ self.fg_deps: dict[str, set[str]] = {}
# Always convert the plugin to a ChainedPlugin so that it can be manipulated if needed
if not isinstance(plugin, ChainedPlugin):
plugin = ChainedPlugin(options, [plugin])
self.plugin = plugin
# Set of namespaces (module or class) that are being populated during semantic
# analysis and may have missing definitions.
- self.incomplete_namespaces: Set[str] = set()
+ self.incomplete_namespaces: set[str] = set()
self.semantic_analyzer = SemanticAnalyzer(
self.modules,
self.missing_modules,
self.incomplete_namespaces,
self.errors,
- self.plugin)
- self.all_types: Dict[Expression, Type] = {} # Enabled by export_types
+ self.plugin,
+ )
+ self.all_types: dict[Expression, Type] = {} # Enabled by export_types
self.indirection_detector = TypeIndirectionVisitor()
- self.stale_modules: Set[str] = set()
- self.rechecked_modules: Set[str] = set()
+ self.stale_modules: set[str] = set()
+ self.rechecked_modules: set[str] = set()
self.flush_errors = flush_errors
has_reporters = reports is not None and reports.reporters
- self.cache_enabled = (options.incremental
- and (not options.fine_grained_incremental
- or options.use_fine_grained_cache)
- and not has_reporters)
+ self.cache_enabled = (
+ options.incremental
+ and (not options.fine_grained_incremental or options.use_fine_grained_cache)
+ and not has_reporters
+ )
self.fscache = fscache
- self.find_module_cache = FindModuleCache(self.search_paths, self.fscache, self.options)
+ self.find_module_cache = FindModuleCache(
+ self.search_paths, self.fscache, self.options, source_set=self.source_set
+ )
+ for module in CORE_BUILTIN_MODULES:
+ if options.use_builtins_fixtures:
+ continue
+ if module == "_importlib_modulespec":
+ continue
+ path = self.find_module_cache.find_module(module)
+ if not isinstance(path, str):
+ raise CompileError(
+ [f"Failed to find builtin module {module}, perhaps typeshed is broken?"]
+ )
+ if is_typeshed_file(options.abs_custom_typeshed_dir, path) or is_stub_package_file(
+ path
+ ):
+ continue
+
+ raise CompileError(
+ [
+ f'mypy: "{os.path.relpath(path)}" shadows library module "{module}"',
+ f'note: A user-defined top-level module with name "{module}" is not supported',
+ ]
+ )
+
self.metastore = create_metastore(options)
# a mapping from source files to their corresponding shadow files
# for efficient lookup
- self.shadow_map: Dict[str, str] = {}
+ self.shadow_map: dict[str, str] = {}
if self.options.shadow_file is not None:
- self.shadow_map = {source_file: shadow_file
- for (source_file, shadow_file)
- in self.options.shadow_file}
+ self.shadow_map = {
+ source_file: shadow_file for (source_file, shadow_file) in self.options.shadow_file
+ }
# a mapping from each file being typechecked to its possible shadow file
- self.shadow_equivalence_map: Dict[str, Optional[str]] = {}
+ self.shadow_equivalence_map: dict[str, str | None] = {}
self.plugin = plugin
self.plugins_snapshot = plugins_snapshot
self.old_plugins_snapshot = read_plugins_snapshot(self)
@@ -646,9 +706,9 @@ def __init__(self, data_dir: str,
# Fine grained targets (module top levels and top level functions) processed by
# the semantic analyzer, used only for testing. Currently used only by the new
# semantic analyzer.
- self.processed_targets: List[str] = []
+ self.processed_targets: list[str] = []
# Missing stub packages encountered.
- self.missing_stub_packages: Set[str] = set()
+ self.missing_stub_packages: set[str] = set()
# Cache for mypy ASTs that have completed semantic analysis
# pass 1. When multiple files are added to the build in a
# single daemon increment, only one of the files gets added
@@ -656,13 +716,13 @@ def __init__(self, data_dir: str,
# until all the files have been added. This means that a
# new file can be processed O(n**2) times. This cache
# avoids most of this redundant work.
- self.ast_cache: Dict[str, Tuple[MypyFile, List[ErrorInfo]]] = {}
+ self.ast_cache: dict[str, tuple[MypyFile, list[ErrorInfo]]] = {}
def dump_stats(self) -> None:
if self.options.dump_build_stats:
print("Stats:")
for key, value in sorted(self.stats_summary().items()):
- print("{:24}{}".format(key + ":", value))
+ print(f"{key + ':':24}{value}")
def use_fine_grained_cache(self) -> bool:
return self.cache_enabled and self.options.use_fine_grained_cache
@@ -699,8 +759,7 @@ def getmtime(self, path: str) -> int:
else:
return int(self.metastore.getmtime(path))
- def all_imported_modules_in_file(self,
- file: MypyFile) -> List[Tuple[int, str, int]]:
+ def all_imported_modules_in_file(self, file: MypyFile) -> list[tuple[int, str, int]]:
"""Find all reachable import statements in a file.
Return list of tuples (priority, module id, import line number)
@@ -709,40 +768,33 @@ def all_imported_modules_in_file(self,
Can generate blocking errors on bogus relative imports.
"""
- def correct_rel_imp(imp: Union[ImportFrom, ImportAll]) -> str:
+ def correct_rel_imp(imp: ImportFrom | ImportAll) -> str:
"""Function to correct for relative imports."""
file_id = file.fullname
rel = imp.relative
if rel == 0:
return imp.id
- if os.path.basename(file.path).startswith('__init__.'):
+ if os.path.basename(file.path).startswith("__init__."):
rel -= 1
if rel != 0:
file_id = ".".join(file_id.split(".")[:-rel])
new_id = file_id + "." + imp.id if imp.id else file_id
if not new_id:
- self.errors.set_file(file.path, file.name)
- self.errors.report(imp.line, 0,
- "No parent module -- cannot perform relative import",
- blocker=True)
+ self.errors.set_file(file.path, file.name, self.options)
+ self.errors.report(
+ imp.line, 0, "No parent module -- cannot perform relative import", blocker=True
+ )
return new_id
- res: List[Tuple[int, str, int]] = []
- delayed_res: List[Tuple[int, str, int]] = []
+ res: list[tuple[int, str, int]] = []
for imp in file.imports:
if not imp.is_unreachable:
if isinstance(imp, Import):
pri = import_priority(imp, PRI_MED)
ancestor_pri = import_priority(imp, PRI_LOW)
for id, _ in imp.ids:
- # We append the target (e.g. foo.bar.baz) before the ancestors (e.g. foo
- # and foo.bar) so that, if FindModuleCache finds the target module in a
- # package marked with py.typed underneath a namespace package installed in
- # site-packages, (gasp), that cache's knowledge of the ancestors
- # (aka FindModuleCache.ns_ancestors) can be primed when it is asked to find
- # the parent.
res.append((pri, id, imp.line))
ancestor_parts = id.split(".")[:-1]
ancestors = []
@@ -751,15 +803,13 @@ def correct_rel_imp(imp: Union[ImportFrom, ImportAll]) -> str:
res.append((ancestor_pri, ".".join(ancestors), imp.line))
elif isinstance(imp, ImportFrom):
cur_id = correct_rel_imp(imp)
- any_are_submodules = False
all_are_submodules = True
# Also add any imported names that are submodules.
pri = import_priority(imp, PRI_MED)
for name, __ in imp.names:
- sub_id = cur_id + '.' + name
+ sub_id = cur_id + "." + name
if self.is_module(sub_id):
res.append((pri, sub_id, imp.line))
- any_are_submodules = True
else:
all_are_submodules = False
# Add cur_id as a dependency, even if all of the
@@ -769,27 +819,27 @@ def correct_rel_imp(imp: Union[ImportFrom, ImportAll]) -> str:
# if all of the imports are submodules, do the import at a lower
# priority.
pri = import_priority(imp, PRI_HIGH if not all_are_submodules else PRI_LOW)
- # The imported module goes in after the submodules, for the same namespace
- # related reasons discussed in the Import case.
- # There is an additional twist: if none of the submodules exist,
- # we delay the import in case other imports of other submodules succeed.
- if any_are_submodules:
- res.append((pri, cur_id, imp.line))
- else:
- delayed_res.append((pri, cur_id, imp.line))
+ res.append((pri, cur_id, imp.line))
elif isinstance(imp, ImportAll):
pri = import_priority(imp, PRI_HIGH)
res.append((pri, correct_rel_imp(imp), imp.line))
- res.extend(delayed_res)
+ # Sort such that module (e.g. foo.bar.baz) comes before its ancestors (e.g. foo
+ # and foo.bar) so that, if FindModuleCache finds the target module in a
+ # package marked with py.typed underneath a namespace package installed in
+ # site-packages, (gasp), that cache's knowledge of the ancestors
+ # (aka FindModuleCache.ns_ancestors) can be primed when it is asked to find
+ # the parent.
+ res.sort(key=lambda x: -x[1].count("."))
return res
def is_module(self, id: str) -> bool:
"""Is there a file in the file system corresponding to module id?"""
return find_module_simple(id, self) is not None
- def parse_file(self, id: str, path: str, source: str, ignore_errors: bool,
- options: Options) -> MypyFile:
+ def parse_file(
+ self, id: str, path: str, source: str, ignore_errors: bool, options: Options
+ ) -> MypyFile:
"""Parse the source of a file with the given name.
Raise CompileError if there is a parse error.
@@ -797,10 +847,12 @@ def parse_file(self, id: str, path: str, source: str, ignore_errors: bool,
t0 = time.time()
tree = parse(source, path, id, self.errors, options=options)
tree._fullname = id
- self.add_stats(files_parsed=1,
- modules_parsed=int(not tree.is_stub),
- stubs_parsed=int(tree.is_stub),
- parse_time=time.time() - t0)
+ self.add_stats(
+ files_parsed=1,
+ modules_parsed=int(not tree.is_stub),
+ stubs_parsed=int(tree.is_stub),
+ parse_time=time.time() - t0,
+ )
if self.errors.is_blockers():
self.log("Bailing due to parse errors")
@@ -809,21 +861,20 @@ def parse_file(self, id: str, path: str, source: str, ignore_errors: bool,
self.errors.set_file_ignored_lines(path, tree.ignored_lines, ignore_errors)
return tree
- def load_fine_grained_deps(self, id: str) -> Dict[str, Set[str]]:
+ def load_fine_grained_deps(self, id: str) -> dict[str, set[str]]:
t0 = time.time()
if id in self.fg_deps_meta:
# TODO: Assert deps file wasn't changed.
- deps = json.loads(self.metastore.read(self.fg_deps_meta[id]['path']))
+ deps = json.loads(self.metastore.read(self.fg_deps_meta[id]["path"]))
else:
deps = {}
val = {k: set(v) for k, v in deps.items()}
self.add_stats(load_fg_deps_time=time.time() - t0)
return val
- def report_file(self,
- file: MypyFile,
- type_map: Dict[Expression, Type],
- options: Options) -> None:
+ def report_file(
+ self, file: MypyFile, type_map: dict[Expression, Type], options: Options
+ ) -> None:
if self.reports is not None and self.source_set.is_source(file):
self.reports.file(file, self.modules, type_map, options)
@@ -833,15 +884,16 @@ def verbosity(self) -> int:
def log(self, *message: str) -> None:
if self.verbosity() >= 1:
if message:
- print('LOG: ', *message, file=self.stderr)
+ print("LOG: ", *message, file=self.stderr)
else:
print(file=self.stderr)
self.stderr.flush()
def log_fine_grained(self, *message: str) -> None:
import mypy.build
+
if self.verbosity() >= 1:
- self.log('fine-grained:', *message)
+ self.log("fine-grained:", *message)
elif mypy.build.DEBUG_FINE_GRAINED:
# Output log in a simplified format that is quick to browse.
if message:
@@ -852,7 +904,7 @@ def log_fine_grained(self, *message: str) -> None:
def trace(self, *message: str) -> None:
if self.verbosity() >= 2:
- print('TRACE:', *message, file=self.stderr)
+ print("TRACE:", *message, file=self.stderr)
self.stderr.flush()
def add_stats(self, **kwds: Any) -> None:
@@ -866,7 +918,7 @@ def stats_summary(self) -> Mapping[str, object]:
return self.stats
-def deps_to_json(x: Dict[str, Set[str]]) -> str:
+def deps_to_json(x: dict[str, set[str]]) -> str:
return json.dumps({k: list(v) for k, v in x.items()})
@@ -880,8 +932,9 @@ def deps_to_json(x: Dict[str, Set[str]]) -> str:
FAKE_ROOT_MODULE: Final = "@root"
-def write_deps_cache(rdeps: Dict[str, Dict[str, Set[str]]],
- manager: BuildManager, graph: Graph) -> None:
+def write_deps_cache(
+ rdeps: dict[str, dict[str, set[str]]], manager: BuildManager, graph: Graph
+) -> None:
"""Write cache files for fine-grained dependencies.
Serialize fine-grained dependencies map for fine grained mode.
@@ -915,12 +968,12 @@ def write_deps_cache(rdeps: Dict[str, Dict[str, Set[str]]],
assert deps_json
manager.log("Writing deps cache", deps_json)
if not manager.metastore.write(deps_json, deps_to_json(rdeps[id])):
- manager.log("Error writing fine-grained deps JSON file {}".format(deps_json))
+ manager.log(f"Error writing fine-grained deps JSON file {deps_json}")
error = True
else:
- fg_deps_meta[id] = {'path': deps_json, 'mtime': manager.getmtime(deps_json)}
+ fg_deps_meta[id] = {"path": deps_json, "mtime": manager.getmtime(deps_json)}
- meta_snapshot: Dict[str, str] = {}
+ meta_snapshot: dict[str, str] = {}
for id, st in graph.items():
# If we didn't parse a file (so it doesn't have a
# source_hash), then it must be a module with a fresh cache,
@@ -932,20 +985,18 @@ def write_deps_cache(rdeps: Dict[str, Dict[str, Set[str]]],
hash = st.meta.hash
meta_snapshot[id] = hash
- meta = {'snapshot': meta_snapshot, 'deps_meta': fg_deps_meta}
+ meta = {"snapshot": meta_snapshot, "deps_meta": fg_deps_meta}
if not metastore.write(DEPS_META_FILE, json.dumps(meta)):
- manager.log("Error writing fine-grained deps meta JSON file {}".format(DEPS_META_FILE))
+ manager.log(f"Error writing fine-grained deps meta JSON file {DEPS_META_FILE}")
error = True
if error:
- manager.errors.set_file(_cache_dir_prefix(manager.options), None)
- manager.errors.report(0, 0, "Error writing fine-grained dependencies cache",
- blocker=True)
+ manager.errors.set_file(_cache_dir_prefix(manager.options), None, manager.options)
+ manager.errors.report(0, 0, "Error writing fine-grained dependencies cache", blocker=True)
-def invert_deps(deps: Dict[str, Set[str]],
- graph: Graph) -> Dict[str, Dict[str, Set[str]]]:
+def invert_deps(deps: dict[str, set[str]], graph: Graph) -> dict[str, dict[str, set[str]]]:
"""Splits fine-grained dependencies based on the module of the trigger.
Returns a dictionary from module ids to all dependencies on that
@@ -959,7 +1010,7 @@ def invert_deps(deps: Dict[str, Set[str]],
# Prepopulate the map for all the modules that have been processed,
# so that we always generate files for processed modules (even if
# there aren't any dependencies to them.)
- rdeps: Dict[str, Dict[str, Set[str]]] = {id: {} for id, st in graph.items() if st.tree}
+ rdeps: dict[str, dict[str, set[str]]] = {id: {} for id, st in graph.items() if st.tree}
for trigger, targets in deps.items():
module = module_prefix(graph, trigger_to_target(trigger))
if not module or not graph[module].tree:
@@ -971,8 +1022,7 @@ def invert_deps(deps: Dict[str, Set[str]],
return rdeps
-def generate_deps_for_cache(manager: BuildManager,
- graph: Graph) -> Dict[str, Dict[str, Set[str]]]:
+def generate_deps_for_cache(manager: BuildManager, graph: Graph) -> dict[str, dict[str, set[str]]]:
"""Generate fine-grained dependencies into a form suitable for serializing.
This does a couple things:
@@ -1006,47 +1056,47 @@ def generate_deps_for_cache(manager: BuildManager,
def write_plugins_snapshot(manager: BuildManager) -> None:
"""Write snapshot of versions and hashes of currently active plugins."""
if not manager.metastore.write(PLUGIN_SNAPSHOT_FILE, json.dumps(manager.plugins_snapshot)):
- manager.errors.set_file(_cache_dir_prefix(manager.options), None)
- manager.errors.report(0, 0, "Error writing plugins snapshot",
- blocker=True)
+ manager.errors.set_file(_cache_dir_prefix(manager.options), None, manager.options)
+ manager.errors.report(0, 0, "Error writing plugins snapshot", blocker=True)
-def read_plugins_snapshot(manager: BuildManager) -> Optional[Dict[str, str]]:
+def read_plugins_snapshot(manager: BuildManager) -> dict[str, str] | None:
"""Read cached snapshot of versions and hashes of plugins from previous run."""
- snapshot = _load_json_file(PLUGIN_SNAPSHOT_FILE, manager,
- log_success='Plugins snapshot ',
- log_error='Could not load plugins snapshot: ')
+ snapshot = _load_json_file(
+ PLUGIN_SNAPSHOT_FILE,
+ manager,
+ log_success="Plugins snapshot ",
+ log_error="Could not load plugins snapshot: ",
+ )
if snapshot is None:
return None
if not isinstance(snapshot, dict):
- manager.log('Could not load plugins snapshot: cache is not a dict: {}'
- .format(type(snapshot)))
+ manager.log(f"Could not load plugins snapshot: cache is not a dict: {type(snapshot)}")
return None
return snapshot
-def read_quickstart_file(options: Options,
- stdout: TextIO,
- ) -> Optional[Dict[str, Tuple[float, int, str]]]:
- quickstart: Optional[Dict[str, Tuple[float, int, str]]] = None
+def read_quickstart_file(
+ options: Options, stdout: TextIO
+) -> dict[str, tuple[float, int, str]] | None:
+ quickstart: dict[str, tuple[float, int, str]] | None = None
if options.quickstart_file:
# This is very "best effort". If the file is missing or malformed,
# just ignore it.
- raw_quickstart: Dict[str, Any] = {}
+ raw_quickstart: dict[str, Any] = {}
try:
- with open(options.quickstart_file, "r") as f:
+ with open(options.quickstart_file) as f:
raw_quickstart = json.load(f)
quickstart = {}
for file, (x, y, z) in raw_quickstart.items():
quickstart[file] = (x, y, z)
except Exception as e:
- print("Warning: Failed to load quickstart file: {}\n".format(str(e)), file=stdout)
+ print(f"Warning: Failed to load quickstart file: {str(e)}\n", file=stdout)
return quickstart
-def read_deps_cache(manager: BuildManager,
- graph: Graph) -> Optional[Dict[str, FgDepMeta]]:
+def read_deps_cache(manager: BuildManager, graph: Graph) -> dict[str, FgDepMeta] | None:
"""Read and validate the fine-grained dependencies cache.
See the write_deps_cache documentation for more information on
@@ -1054,45 +1104,51 @@ def read_deps_cache(manager: BuildManager,
Returns None if the cache was invalid in some way.
"""
- deps_meta = _load_json_file(DEPS_META_FILE, manager,
- log_success='Deps meta ',
- log_error='Could not load fine-grained dependency metadata: ')
+ deps_meta = _load_json_file(
+ DEPS_META_FILE,
+ manager,
+ log_success="Deps meta ",
+ log_error="Could not load fine-grained dependency metadata: ",
+ )
if deps_meta is None:
return None
- meta_snapshot = deps_meta['snapshot']
+ meta_snapshot = deps_meta["snapshot"]
# Take a snapshot of the source hashes from all of the metas we found.
# (Including the ones we rejected because they were out of date.)
# We use this to verify that they match up with the proto_deps.
- current_meta_snapshot = {id: st.meta_source_hash for id, st in graph.items()
- if st.meta_source_hash is not None}
+ current_meta_snapshot = {
+ id: st.meta_source_hash for id, st in graph.items() if st.meta_source_hash is not None
+ }
common = set(meta_snapshot.keys()) & set(current_meta_snapshot.keys())
if any(meta_snapshot[id] != current_meta_snapshot[id] for id in common):
# TODO: invalidate also if options changed (like --strict-optional)?
- manager.log('Fine-grained dependencies cache inconsistent, ignoring')
+ manager.log("Fine-grained dependencies cache inconsistent, ignoring")
return None
- module_deps_metas = deps_meta['deps_meta']
+ module_deps_metas = deps_meta["deps_meta"]
+ assert isinstance(module_deps_metas, dict)
if not manager.options.skip_cache_mtime_checks:
for id, meta in module_deps_metas.items():
try:
- matched = manager.getmtime(meta['path']) == meta['mtime']
+ matched = manager.getmtime(meta["path"]) == meta["mtime"]
except FileNotFoundError:
matched = False
if not matched:
- manager.log('Invalid or missing fine-grained deps cache: {}'.format(meta['path']))
+ manager.log(f"Invalid or missing fine-grained deps cache: {meta['path']}")
return None
return module_deps_metas
-def _load_json_file(file: str, manager: BuildManager,
- log_success: str, log_error: str) -> Optional[Dict[str, Any]]:
+def _load_json_file(
+ file: str, manager: BuildManager, log_success: str, log_error: str
+) -> dict[str, Any] | None:
"""A simple helper to read a JSON file with logging."""
t0 = time.time()
try:
data = manager.metastore.read(file)
- except IOError:
+ except OSError:
manager.log(log_error + file)
return None
manager.add_stats(metastore_read_time=time.time() - t0)
@@ -1104,17 +1160,19 @@ def _load_json_file(file: str, manager: BuildManager,
result = json.loads(data)
manager.add_stats(data_json_load_time=time.time() - t1)
except json.JSONDecodeError:
- manager.errors.set_file(file, None)
- manager.errors.report(-1, -1,
- "Error reading JSON file;"
- " you likely have a bad cache.\n"
- "Try removing the {cache_dir} directory"
- " and run mypy again.".format(
- cache_dir=manager.options.cache_dir
- ),
- blocker=True)
+ manager.errors.set_file(file, None, manager.options)
+ manager.errors.report(
+ -1,
+ -1,
+ "Error reading JSON file;"
+ " you likely have a bad cache.\n"
+ "Try removing the {cache_dir} directory"
+ " and run mypy again.".format(cache_dir=manager.options.cache_dir),
+ blocker=True,
+ )
return None
else:
+ assert isinstance(result, dict)
return result
@@ -1125,7 +1183,7 @@ def _cache_dir_prefix(options: Options) -> str:
return os.curdir
cache_dir = options.cache_dir
pyversion = options.python_version
- base = os.path.join(cache_dir, '%d.%d' % pyversion)
+ base = os.path.join(cache_dir, "%d.%d" % pyversion)
return base
@@ -1151,10 +1209,12 @@ def exclude_from_backups(target_dir: str) -> None:
cachedir_tag = os.path.join(target_dir, "CACHEDIR.TAG")
try:
with open(cachedir_tag, "x") as f:
- f.write("""Signature: 8a477f597d28d172789f06886806bc55
+ f.write(
+ """Signature: 8a477f597d28d172789f06886806bc55
# This file is a cache directory tag automatically created by mypy.
# For information about cache directory tags see https://bford.info/cachedir/
-""")
+"""
+ )
except FileExistsError:
pass
@@ -1168,7 +1228,7 @@ def create_metastore(options: Options) -> MetadataStore:
return mds
-def get_cache_names(id: str, path: str, options: Options) -> Tuple[str, str, Optional[str]]:
+def get_cache_names(id: str, path: str, options: Options) -> tuple[str, str, str | None]:
"""Return the file names for the cache files.
Args:
@@ -1193,18 +1253,18 @@ def get_cache_names(id: str, path: str, options: Options) -> Tuple[str, str, Opt
# This only makes sense when using the filesystem backed cache.
root = _cache_dir_prefix(options)
return (os.path.relpath(pair[0], root), os.path.relpath(pair[1], root), None)
- prefix = os.path.join(*id.split('.'))
- is_package = os.path.basename(path).startswith('__init__.py')
+ prefix = os.path.join(*id.split("."))
+ is_package = os.path.basename(path).startswith("__init__.py")
if is_package:
- prefix = os.path.join(prefix, '__init__')
+ prefix = os.path.join(prefix, "__init__")
deps_json = None
if options.cache_fine_grained:
- deps_json = prefix + '.deps.json'
- return (prefix + '.meta.json', prefix + '.data.json', deps_json)
+ deps_json = prefix + ".deps.json"
+ return (prefix + ".meta.json", prefix + ".data.json", deps_json)
-def find_cache_meta(id: str, path: str, manager: BuildManager) -> Optional[CacheMeta]:
+def find_cache_meta(id: str, path: str, manager: BuildManager) -> CacheMeta | None:
"""Find cache data for a module.
Args:
@@ -1218,37 +1278,46 @@ def find_cache_meta(id: str, path: str, manager: BuildManager) -> Optional[Cache
"""
# TODO: May need to take more build options into account
meta_json, data_json, _ = get_cache_names(id, path, manager.options)
- manager.trace('Looking for {} at {}'.format(id, meta_json))
+ manager.trace(f"Looking for {id} at {meta_json}")
t0 = time.time()
- meta = _load_json_file(meta_json, manager,
- log_success='Meta {} '.format(id),
- log_error='Could not load cache for {}: '.format(id))
+ meta = _load_json_file(
+ meta_json, manager, log_success=f"Meta {id} ", log_error=f"Could not load cache for {id}: "
+ )
t1 = time.time()
if meta is None:
return None
if not isinstance(meta, dict):
- manager.log('Could not load cache for {}: meta cache is not a dict: {}'
- .format(id, repr(meta)))
+ manager.log(f"Could not load cache for {id}: meta cache is not a dict: {repr(meta)}")
return None
m = cache_meta_from_dict(meta, data_json)
t2 = time.time()
- manager.add_stats(load_meta_time=t2 - t0,
- load_meta_load_time=t1 - t0,
- load_meta_from_dict_time=t2 - t1)
+ manager.add_stats(
+ load_meta_time=t2 - t0, load_meta_load_time=t1 - t0, load_meta_from_dict_time=t2 - t1
+ )
# Don't check for path match, that is dealt with in validate_meta().
- if (m.id != id or
- m.mtime is None or m.size is None or
- m.dependencies is None or m.data_mtime is None):
- manager.log('Metadata abandoned for {}: attributes are missing'.format(id))
+ #
+ # TODO: these `type: ignore`s wouldn't be necessary
+ # if the type annotations for CacheMeta were more accurate
+ # (all of these attributes can be `None`)
+ if (
+ m.id != id
+ or m.mtime is None # type: ignore[redundant-expr]
+ or m.size is None # type: ignore[redundant-expr]
+ or m.dependencies is None # type: ignore[redundant-expr]
+ or m.data_mtime is None
+ ):
+ manager.log(f"Metadata abandoned for {id}: attributes are missing")
return None
# Ignore cache if generated by an older mypy version.
- if ((m.version_id != manager.version_id and not manager.options.skip_version_check)
- or m.options is None
- or len(m.dependencies) + len(m.suppressed) != len(m.dep_prios)
- or len(m.dependencies) + len(m.suppressed) != len(m.dep_lines)):
- manager.log('Metadata abandoned for {}: new attributes are missing'.format(id))
+ if (
+ (m.version_id != manager.version_id and not manager.options.skip_version_check)
+ or m.options is None
+ or len(m.dependencies) + len(m.suppressed) != len(m.dep_prios)
+ or len(m.dependencies) + len(m.suppressed) != len(m.dep_lines)
+ ):
+ manager.log(f"Metadata abandoned for {id}: new attributes are missing")
return None
# Ignore cache if (relevant) options aren't the same.
@@ -1257,57 +1326,61 @@ def find_cache_meta(id: str, path: str, manager: BuildManager) -> Optional[Cache
current_options = manager.options.clone_for_module(id).select_options_affecting_cache()
if manager.options.skip_version_check:
# When we're lax about version we're also lax about platform.
- cached_options['platform'] = current_options['platform']
- if 'debug_cache' in cached_options:
+ cached_options["platform"] = current_options["platform"]
+ if "debug_cache" in cached_options:
# Older versions included debug_cache, but it's silly to compare it.
- del cached_options['debug_cache']
+ del cached_options["debug_cache"]
if cached_options != current_options:
- manager.log('Metadata abandoned for {}: options differ'.format(id))
+ manager.log(f"Metadata abandoned for {id}: options differ")
if manager.options.verbosity >= 2:
for key in sorted(set(cached_options) | set(current_options)):
if cached_options.get(key) != current_options.get(key):
- manager.trace(' {}: {} != {}'
- .format(key, cached_options.get(key), current_options.get(key)))
+ manager.trace(
+ " {}: {} != {}".format(
+ key, cached_options.get(key), current_options.get(key)
+ )
+ )
return None
if manager.old_plugins_snapshot and manager.plugins_snapshot:
# Check if plugins are still the same.
if manager.plugins_snapshot != manager.old_plugins_snapshot:
- manager.log('Metadata abandoned for {}: plugins differ'.format(id))
+ manager.log(f"Metadata abandoned for {id}: plugins differ")
return None
# So that plugins can return data with tuples in it without
# things silently always invalidating modules, we round-trip
# the config data. This isn't beautiful.
- plugin_data = json.loads(json.dumps(
- manager.plugin.report_config_data(ReportConfigContext(id, path, is_check=True))
- ))
+ plugin_data = json.loads(
+ json.dumps(manager.plugin.report_config_data(ReportConfigContext(id, path, is_check=True)))
+ )
if m.plugin_data != plugin_data:
- manager.log('Metadata abandoned for {}: plugin configuration differs'.format(id))
+ manager.log(f"Metadata abandoned for {id}: plugin configuration differs")
return None
manager.add_stats(fresh_metas=1)
return m
-def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
- ignore_all: bool, manager: BuildManager) -> Optional[CacheMeta]:
- '''Checks whether the cached AST of this module can be used.
+def validate_meta(
+ meta: CacheMeta | None, id: str, path: str | None, ignore_all: bool, manager: BuildManager
+) -> CacheMeta | None:
+ """Checks whether the cached AST of this module can be used.
Returns:
None, if the cached AST is unusable.
Original meta, if mtime/size matched.
Meta with mtime updated to match source file, if hash/size matched but mtime/path didn't.
- '''
+ """
# This requires two steps. The first one is obvious: we check that the module source file
# contents is the same as it was when the cache data file was created. The second one is not
# too obvious: we check that the cache data file mtime has not changed; it is needed because
# we use cache data file mtime to propagate information about changes in the dependencies.
if meta is None:
- manager.log('Metadata not found for {}'.format(id))
+ manager.log(f"Metadata not found for {id}")
return None
if meta.ignore_all and not ignore_all:
- manager.log('Metadata abandoned for {}: errors were previously ignored'.format(id))
+ manager.log(f"Metadata abandoned for {id}: errors were previously ignored")
return None
t0 = time.time()
@@ -1318,10 +1391,10 @@ def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
try:
data_mtime = manager.getmtime(meta.data_json)
except OSError:
- manager.log('Metadata abandoned for {}: failed to stat data_json'.format(id))
+ manager.log(f"Metadata abandoned for {id}: failed to stat data_json")
return None
if data_mtime != meta.data_mtime:
- manager.log('Metadata abandoned for {}: data cache is modified'.format(id))
+ manager.log(f"Metadata abandoned for {id}: data cache is modified")
return None
if bazel:
@@ -1331,8 +1404,8 @@ def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
st = manager.get_stat(path)
except OSError:
return None
- if not (stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode)):
- manager.log('Metadata abandoned for {}: file {} does not exist'.format(id, path))
+ if not stat.S_ISDIR(st.st_mode) and not stat.S_ISREG(st.st_mode):
+ manager.log(f"Metadata abandoned for {id}: file or directory {path} does not exist")
return None
manager.add_stats(validate_stat_time=time.time() - t0)
@@ -1355,7 +1428,7 @@ def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
size = st.st_size
# Bazel ensures the cache is valid.
if size != meta.size and not bazel and not fine_grained_cache:
- manager.log('Metadata abandoned for {}: file {} has different size'.format(id, path))
+ manager.log(f"Metadata abandoned for {id}: file {path} has different size")
return None
# Bazel ensures the cache is valid.
@@ -1368,7 +1441,7 @@ def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
# the file is up to date even though the mtime is wrong, without needing to hash it.
qmtime, qsize, qhash = manager.quickstart_state[path]
if int(qmtime) == mtime and qsize == size and qhash == meta.hash:
- manager.log('Metadata fresh (by quickstart) for {}: file {}'.format(id, path))
+ manager.log(f"Metadata fresh (by quickstart) for {id}: file {path}")
meta = meta._replace(mtime=mtime, path=path)
return meta
@@ -1376,7 +1449,7 @@ def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
try:
# dir means it is a namespace package
if stat.S_ISDIR(st.st_mode):
- source_hash = ''
+ source_hash = ""
else:
source_hash = manager.fscache.hash_digest(path)
except (OSError, UnicodeDecodeError, DecodeError):
@@ -1384,11 +1457,10 @@ def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
manager.add_stats(validate_hash_time=time.time() - t0)
if source_hash != meta.hash:
if fine_grained_cache:
- manager.log('Using stale metadata for {}: file {}'.format(id, path))
+ manager.log(f"Using stale metadata for {id}: file {path}")
return meta
else:
- manager.log('Metadata abandoned for {}: file {} has different hash'.format(
- id, path))
+ manager.log(f"Metadata abandoned for {id}: file {path} has different hash")
return None
else:
t0 = time.time()
@@ -1396,38 +1468,39 @@ def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
meta = meta._replace(mtime=mtime, path=path)
# Construct a dict we can pass to json.dumps() (compare to write_cache()).
meta_dict = {
- 'id': id,
- 'path': path,
- 'mtime': mtime,
- 'size': size,
- 'hash': source_hash,
- 'data_mtime': meta.data_mtime,
- 'dependencies': meta.dependencies,
- 'suppressed': meta.suppressed,
- 'options': (manager.options.clone_for_module(id)
- .select_options_affecting_cache()),
- 'dep_prios': meta.dep_prios,
- 'dep_lines': meta.dep_lines,
- 'interface_hash': meta.interface_hash,
- 'version_id': manager.version_id,
- 'ignore_all': meta.ignore_all,
- 'plugin_data': meta.plugin_data,
+ "id": id,
+ "path": path,
+ "mtime": mtime,
+ "size": size,
+ "hash": source_hash,
+ "data_mtime": meta.data_mtime,
+ "dependencies": meta.dependencies,
+ "suppressed": meta.suppressed,
+ "options": (manager.options.clone_for_module(id).select_options_affecting_cache()),
+ "dep_prios": meta.dep_prios,
+ "dep_lines": meta.dep_lines,
+ "interface_hash": meta.interface_hash,
+ "version_id": manager.version_id,
+ "ignore_all": meta.ignore_all,
+ "plugin_data": meta.plugin_data,
}
if manager.options.debug_cache:
meta_str = json.dumps(meta_dict, indent=2, sort_keys=True)
else:
meta_str = json.dumps(meta_dict)
meta_json, _, _ = get_cache_names(id, path, manager.options)
- manager.log('Updating mtime for {}: file {}, meta {}, mtime {}'
- .format(id, path, meta_json, meta.mtime))
+ manager.log(
+ "Updating mtime for {}: file {}, meta {}, mtime {}".format(
+ id, path, meta_json, meta.mtime
+ )
+ )
t1 = time.time()
manager.metastore.write(meta_json, meta_str) # Ignore errors, just an optimization.
- manager.add_stats(validate_update_time=time.time() - t1,
- validate_munging_time=t1 - t0)
+ manager.add_stats(validate_update_time=time.time() - t1, validate_munging_time=t1 - t0)
return meta
# It's a match on (id, path, size, hash, mtime).
- manager.log('Metadata fresh for {}: file {}'.format(id, path))
+ manager.log(f"Metadata fresh for {id}: file {path}")
return meta
@@ -1437,7 +1510,7 @@ def compute_hash(text: str) -> str:
# hash randomization (enabled by default in Python 3.3). See the
# note in
# https://docs.python.org/3/reference/datamodel.html#object.__hash__.
- return hash_digest(text.encode('utf-8'))
+ return hash_digest(text.encode("utf-8"))
def json_dumps(obj: Any, debug_cache: bool) -> str:
@@ -1447,11 +1520,19 @@ def json_dumps(obj: Any, debug_cache: bool) -> str:
return json.dumps(obj, sort_keys=True)
-def write_cache(id: str, path: str, tree: MypyFile,
- dependencies: List[str], suppressed: List[str],
- dep_prios: List[int], dep_lines: List[int],
- old_interface_hash: str, source_hash: str,
- ignore_all: bool, manager: BuildManager) -> Tuple[str, Optional[CacheMeta]]:
+def write_cache(
+ id: str,
+ path: str,
+ tree: MypyFile,
+ dependencies: list[str],
+ suppressed: list[str],
+ dep_prios: list[int],
+ dep_lines: list[int],
+ old_interface_hash: str,
+ source_hash: str,
+ ignore_all: bool,
+ manager: BuildManager,
+) -> tuple[str, CacheMeta | None]:
"""Write cache files for a module.
Note that this mypy's behavior is still correct when any given
@@ -1482,8 +1563,7 @@ def write_cache(id: str, path: str, tree: MypyFile,
# Obtain file paths.
meta_json, data_json, _ = get_cache_names(id, path, manager.options)
- manager.log('Writing {} {} {} {}'.format(
- id, path, meta_json, data_json))
+ manager.log(f"Writing {id} {path} {meta_json} {data_json}")
# Update tree.path so that in bazel mode it's made relative (since
# sometimes paths leak out).
@@ -1501,7 +1581,7 @@ def write_cache(id: str, path: str, tree: MypyFile,
try:
st = manager.get_stat(path)
except OSError as err:
- manager.log("Cannot get stat for {}: {}".format(path, err))
+ manager.log(f"Cannot get stat for {path}: {err}")
# Remove apparently-invalid cache files.
# (This is purely an optimization.)
for filename in [data_json, meta_json]:
@@ -1515,13 +1595,13 @@ def write_cache(id: str, path: str, tree: MypyFile,
# Write data cache file, if applicable
# Note that for Bazel we don't record the data file's mtime.
if old_interface_hash == interface_hash:
- manager.trace("Interface for {} is unchanged".format(id))
+ manager.trace(f"Interface for {id} is unchanged")
else:
- manager.trace("Interface for {} has changed".format(id))
+ manager.trace(f"Interface for {id} has changed")
if not metastore.write(data_json, data_str):
# Most likely the error is the replace() call
# (see https://github.com/python/mypy/issues/3215).
- manager.log("Error writing data JSON file {}".format(data_json))
+ manager.log(f"Error writing data JSON file {data_json}")
# Let's continue without writing the meta file. Analysis:
# If the replace failed, we've changed nothing except left
# behind an extraneous temporary file; if the replace
@@ -1535,7 +1615,7 @@ def write_cache(id: str, path: str, tree: MypyFile,
try:
data_mtime = manager.getmtime(data_json)
except OSError:
- manager.log("Error in os.stat({!r}), skipping cache write".format(data_json))
+ manager.log(f"Error in os.stat({data_json!r}), skipping cache write")
return interface_hash, None
mtime = 0 if bazel else int(st.st_mtime)
@@ -1547,22 +1627,23 @@ def write_cache(id: str, path: str, tree: MypyFile,
# verifying the cache.
options = manager.options.clone_for_module(id)
assert source_hash is not None
- meta = {'id': id,
- 'path': path,
- 'mtime': mtime,
- 'size': size,
- 'hash': source_hash,
- 'data_mtime': data_mtime,
- 'dependencies': dependencies,
- 'suppressed': suppressed,
- 'options': options.select_options_affecting_cache(),
- 'dep_prios': dep_prios,
- 'dep_lines': dep_lines,
- 'interface_hash': interface_hash,
- 'version_id': manager.version_id,
- 'ignore_all': ignore_all,
- 'plugin_data': plugin_data,
- }
+ meta = {
+ "id": id,
+ "path": path,
+ "mtime": mtime,
+ "size": size,
+ "hash": source_hash,
+ "data_mtime": data_mtime,
+ "dependencies": dependencies,
+ "suppressed": suppressed,
+ "options": options.select_options_affecting_cache(),
+ "dep_prios": dep_prios,
+ "dep_lines": dep_lines,
+ "interface_hash": interface_hash,
+ "version_id": manager.version_id,
+ "ignore_all": ignore_all,
+ "plugin_data": plugin_data,
+ }
# Write meta cache file
meta_str = json_dumps(meta, manager.options.debug_cache)
@@ -1570,7 +1651,7 @@ def write_cache(id: str, path: str, tree: MypyFile,
# Most likely the error is the replace() call
# (see https://github.com/python/mypy/issues/3215).
# The next run will simply find the cache entry out of date.
- manager.log("Error writing meta JSON file {}".format(meta_json))
+ manager.log(f"Error writing meta JSON file {meta_json}")
return interface_hash, cache_meta_from_dict(meta, data_json)
@@ -1587,14 +1668,14 @@ def delete_cache(id: str, path: str, manager: BuildManager) -> None:
# tracked separately.
meta_path, data_path, _ = get_cache_names(id, path, manager.options)
cache_paths = [meta_path, data_path]
- manager.log('Deleting {} {} {}'.format(id, path, " ".join(x for x in cache_paths if x)))
+ manager.log(f"Deleting {id} {path} {' '.join(x for x in cache_paths if x)}")
for filename in cache_paths:
try:
manager.metastore.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
- manager.log("Error deleting cache file {}: {}".format(filename, e.strerror))
+ manager.log(f"Error deleting cache file {filename}: {e.strerror}")
"""Dependency manager.
@@ -1749,36 +1830,36 @@ class State:
order_counter: ClassVar[int] = 0
order: int # Order in which modules were encountered
id: str # Fully qualified module name
- path: Optional[str] = None # Path to module source
- abspath: Optional[str] = None # Absolute path to module source
+ path: str | None = None # Path to module source
+ abspath: str | None = None # Absolute path to module source
xpath: str # Path or ''
- source: Optional[str] = None # Module source code
- source_hash: Optional[str] = None # Hash calculated based on the source code
- meta_source_hash: Optional[str] = None # Hash of the source given in the meta, if any
- meta: Optional[CacheMeta] = None
- data: Optional[str] = None
- tree: Optional[MypyFile] = None
+ source: str | None = None # Module source code
+ source_hash: str | None = None # Hash calculated based on the source code
+ meta_source_hash: str | None = None # Hash of the source given in the meta, if any
+ meta: CacheMeta | None = None
+ data: str | None = None
+ tree: MypyFile | None = None
# We keep both a list and set of dependencies. A set because it makes it efficient to
# prevent duplicates and the list because I am afraid of changing the order of
# iteration over dependencies.
# They should be managed with add_dependency and suppress_dependency.
- dependencies: List[str] # Modules directly imported by the module
- dependencies_set: Set[str] # The same but as a set for deduplication purposes
- suppressed: List[str] # Suppressed/missing dependencies
- suppressed_set: Set[str] # Suppressed/missing dependencies
- priorities: Dict[str, int]
+ dependencies: list[str] # Modules directly imported by the module
+ dependencies_set: set[str] # The same but as a set for deduplication purposes
+ suppressed: list[str] # Suppressed/missing dependencies
+ suppressed_set: set[str] # Suppressed/missing dependencies
+ priorities: dict[str, int]
# Map each dependency to the line number where it is first imported
- dep_line_map: Dict[str, int]
+ dep_line_map: dict[str, int]
# Parent package, its parent, etc.
- ancestors: Optional[List[str]] = None
+ ancestors: list[str] | None = None
# List of (path, line number) tuples giving context for import
- import_context: List[Tuple[str, int]]
+ import_context: list[tuple[str, int]]
# The State from which this module was imported, if any
- caller_state: Optional["State"] = None
+ caller_state: State | None = None
# If caller_state is set, the line number in the caller where the import occurred
caller_line = 0
@@ -1800,29 +1881,37 @@ class State:
# Errors reported before semantic analysis, to allow fine-grained
# mode to keep reporting them.
- early_errors: List[ErrorInfo]
+ early_errors: list[ErrorInfo]
# Type checker used for checking this file. Use type_checker() for
# access and to construct this on demand.
- _type_checker: Optional[TypeChecker] = None
+ _type_checker: TypeChecker | None = None
fine_grained_deps_loaded = False
- def __init__(self,
- id: Optional[str],
- path: Optional[str],
- source: Optional[str],
- manager: BuildManager,
- caller_state: 'Optional[State]' = None,
- caller_line: int = 0,
- ancestor_for: 'Optional[State]' = None,
- root_source: bool = False,
- # If `temporary` is True, this State is being created to just
- # quickly parse/load the tree, without an intention to further
- # process it. With this flag, any changes to external state as well
- # as error reporting should be avoided.
- temporary: bool = False,
- ) -> None:
+ # Cumulative time spent on this file, in microseconds (for profiling stats)
+ time_spent_us: int = 0
+
+ # Per-line type-checking time (cumulative time spent type-checking expressions
+ # on a given source code line).
+ per_line_checking_time_ns: dict[int, int]
+
+ def __init__(
+ self,
+ id: str | None,
+ path: str | None,
+ source: str | None,
+ manager: BuildManager,
+ caller_state: State | None = None,
+ caller_line: int = 0,
+ ancestor_for: State | None = None,
+ root_source: bool = False,
+ # If `temporary` is True, this State is being created to just
+ # quickly parse/load the tree, without an intention to further
+ # process it. With this flag, any changes to external state as well
+ # as error reporting should be avoided.
+ temporary: bool = False,
+ ) -> None:
if not temporary:
assert id or path or source is not None, "Neither id, path nor source given"
self.manager = manager
@@ -1835,7 +1924,7 @@ def __init__(self,
self.import_context.append((caller_state.xpath, caller_line))
else:
self.import_context = []
- self.id = id or '__main__'
+ self.id = id or "__main__"
self.options = manager.options.clone_for_module(self.id)
self.early_errors = []
self._type_checker = None
@@ -1843,18 +1932,27 @@ def __init__(self,
assert id is not None
try:
path, follow_imports = find_module_and_diagnose(
- manager, id, self.options, caller_state, caller_line,
- ancestor_for, root_source, skip_diagnose=temporary)
+ manager,
+ id,
+ self.options,
+ caller_state,
+ caller_line,
+ ancestor_for,
+ root_source,
+ skip_diagnose=temporary,
+ )
except ModuleNotFound:
if not temporary:
manager.missing_modules.add(id)
raise
- if follow_imports == 'silent':
+ if follow_imports == "silent":
self.ignore_all = True
+ elif path and is_silent_import_module(manager, path) and not root_source:
+ self.ignore_all = True
self.path = path
if path:
self.abspath = os.path.abspath(path)
- self.xpath = path or ''
+ self.xpath = path or ""
if path and source is None and self.manager.cache_enabled:
self.meta = find_cache_meta(self.id, path, manager)
# TODO: Get mtime if not cached.
@@ -1862,9 +1960,10 @@ def __init__(self,
self.interface_hash = self.meta.interface_hash
self.meta_source_hash = self.meta.hash
if path and source is None and self.manager.fscache.isdir(path):
- source = ''
+ source = ""
self.source = source
self.add_ancestors()
+ self.per_line_checking_time_ns = collections.defaultdict(int)
t0 = time.time()
self.meta = validate_meta(self.meta, self.id, self.path, self.ignore_all, manager)
self.manager.add_stats(validate_meta_time=time.time() - t0)
@@ -1877,11 +1976,9 @@ def __init__(self,
self.suppressed_set = set(self.suppressed)
all_deps = self.dependencies + self.suppressed
assert len(all_deps) == len(self.meta.dep_prios)
- self.priorities = {id: pri
- for id, pri in zip(all_deps, self.meta.dep_prios)}
+ self.priorities = {id: pri for id, pri in zip(all_deps, self.meta.dep_prios)}
assert len(all_deps) == len(self.meta.dep_lines)
- self.dep_line_map = {id: line
- for id, line in zip(all_deps, self.meta.dep_lines)}
+ self.dep_line_map = {id: line for id, line in zip(all_deps, self.meta.dep_lines)}
if temporary:
self.load_tree(temporary=True)
if not manager.use_fine_grained_cache():
@@ -1902,7 +1999,7 @@ def __init__(self,
# know about modules that have cache information and defer
# handling new modules until the fine-grained update.
if manager.use_fine_grained_cache():
- manager.log("Deferring module to fine-grained update %s (%s)" % (path, id))
+ manager.log(f"Deferring module to fine-grained update {path} ({id})")
raise ModuleNotFound
# Parse the file (and then some) to get the dependencies.
@@ -1918,15 +2015,15 @@ def add_ancestors(self) -> None:
if self.path is not None:
_, name = os.path.split(self.path)
base, _ = os.path.splitext(name)
- if '.' in base:
+ if "." in base:
# This is just a weird filename, don't add anything
self.ancestors = []
return
# All parent packages are new ancestors.
ancestors = []
parent = self.id
- while '.' in parent:
- parent, _ = parent.rsplit('.', 1)
+ while "." in parent:
+ parent, _ = parent.rsplit(".", 1)
ancestors.append(parent)
self.ancestors = ancestors
@@ -1936,9 +2033,11 @@ def is_fresh(self) -> bool:
# self.meta.dependencies when a dependency is dropped due to
# suppression by silent mode. However when a suppressed
# dependency is added back we find out later in the process.
- return (self.meta is not None
- and self.is_interface_fresh()
- and self.dependencies == self.meta.dependencies)
+ return (
+ self.meta is not None
+ and self.is_interface_fresh()
+ and self.dependencies == self.meta.dependencies
+ )
def is_interface_fresh(self) -> bool:
return self.externally_same
@@ -1977,22 +2076,31 @@ def wrap_context(self, check_blockers: bool = True) -> Iterator[None]:
except CompileError:
raise
except Exception as err:
- report_internal_error(err, self.path, 0, self.manager.errors,
- self.options, self.manager.stdout, self.manager.stderr)
+ report_internal_error(
+ err,
+ self.path,
+ 0,
+ self.manager.errors,
+ self.options,
+ self.manager.stdout,
+ self.manager.stderr,
+ )
self.manager.errors.set_import_context(save_import_context)
# TODO: Move this away once we've removed the old semantic analyzer?
if check_blockers:
self.check_blockers()
- def load_fine_grained_deps(self) -> Dict[str, Set[str]]:
+ def load_fine_grained_deps(self) -> dict[str, set[str]]:
return self.manager.load_fine_grained_deps(self.id)
def load_tree(self, temporary: bool = False) -> None:
- assert self.meta is not None, "Internal error: this method must be called only" \
- " for cached modules"
+ assert (
+ self.meta is not None
+ ), "Internal error: this method must be called only for cached modules"
- data = _load_json_file(self.meta.data_json, self.manager, "Load tree ",
- "Could not load tree: ")
+ data = _load_json_file(
+ self.meta.data_json, self.manager, "Load tree ", "Could not load tree: "
+ )
if data is None:
return None
@@ -2009,8 +2117,7 @@ def fix_cross_refs(self) -> None:
assert self.tree is not None, "Internal error: method must be called on parsed file only"
# We need to set allow_missing when doing a fine grained cache
# load because we need to gracefully handle missing modules.
- fixup_module(self.tree, self.manager.modules,
- self.options.use_fine_grained_cache)
+ fixup_module(self.tree, self.manager.modules, self.options.use_fine_grained_cache)
# Methods for processing modules from source code.
@@ -2030,9 +2137,11 @@ def parse_file(self) -> None:
cached = self.id in manager.ast_cache
modules = manager.modules
if not cached:
- manager.log("Parsing %s (%s)" % (self.xpath, self.id))
+ manager.log(f"Parsing {self.xpath} ({self.id})")
else:
- manager.log("Using cached AST for %s (%s)" % (self.xpath, self.id))
+ manager.log(f"Using cached AST for {self.xpath} ({self.id})")
+
+ t0 = time_ref()
with self.wrap_context():
source = self.source
@@ -2040,36 +2149,43 @@ def parse_file(self) -> None:
if self.path and source is None:
try:
path = manager.maybe_swap_for_shadow_path(self.path)
- source = decode_python_encoding(manager.fscache.read(path),
- manager.options.python_version)
+ source = decode_python_encoding(manager.fscache.read(path))
self.source_hash = manager.fscache.hash_digest(path)
- except IOError as ioerr:
+ except OSError as ioerr:
# ioerr.strerror differs for os.stat failures between Windows and
# other systems, but os.strerror(ioerr.errno) does not, so we use that.
# (We want the error messages to be platform-independent so that the
# tests have predictable output.)
- raise CompileError([
- "mypy: can't read file '{}': {}".format(
- self.path, os.strerror(ioerr.errno))],
- module_with_blocker=self.id) from ioerr
+ raise CompileError(
+ [
+ "mypy: can't read file '{}': {}".format(
+ self.path, os.strerror(ioerr.errno)
+ )
+ ],
+ module_with_blocker=self.id,
+ ) from ioerr
except (UnicodeDecodeError, DecodeError) as decodeerr:
- if self.path.endswith('.pyd'):
- err = "mypy: stubgen does not support .pyd files: '{}'".format(self.path)
+ if self.path.endswith(".pyd"):
+ err = f"mypy: stubgen does not support .pyd files: '{self.path}'"
else:
- err = "mypy: can't decode file '{}': {}".format(self.path, str(decodeerr))
+ err = f"mypy: can't decode file '{self.path}': {str(decodeerr)}"
raise CompileError([err], module_with_blocker=self.id) from decodeerr
elif self.path and self.manager.fscache.isdir(self.path):
- source = ''
- self.source_hash = ''
+ source = ""
+ self.source_hash = ""
else:
assert source is not None
self.source_hash = compute_hash(source)
self.parse_inline_configuration(source)
if not cached:
- self.tree = manager.parse_file(self.id, self.xpath, source,
- self.ignore_all or self.options.ignore_errors,
- self.options)
+ self.tree = manager.parse_file(
+ self.id,
+ self.xpath,
+ source,
+ self.ignore_all or self.options.ignore_errors,
+ self.options,
+ )
else:
# Reuse a cached AST
@@ -2077,7 +2193,10 @@ def parse_file(self) -> None:
manager.errors.set_file_ignored_lines(
self.xpath,
self.tree.ignored_lines,
- self.ignore_all or self.options.ignore_errors)
+ self.ignore_all or self.options.ignore_errors,
+ )
+
+ self.time_spent_us += time_spent_us(t0)
if not cached:
# Make a copy of any errors produced during parse time so that
@@ -2102,7 +2221,7 @@ def parse_inline_configuration(self, source: str) -> None:
if flags:
changes, config_errors = parse_mypy_comments(flags, self.options)
self.options = self.options.apply_changes(changes)
- self.manager.errors.set_file(self.xpath, self.id)
+ self.manager.errors.set_file(self.xpath, self.id, self.options)
for lineno, error in config_errors:
self.manager.errors.report(lineno, 0, error)
@@ -2113,6 +2232,9 @@ def semantic_analysis_pass1(self) -> None:
"""
options = self.options
assert self.tree is not None
+
+ t0 = time_ref()
+
# Do the first pass of semantic analysis: analyze the reachability
# of blocks and import statements. We must do this before
# processing imports, since this may mark some import statements as
@@ -2131,6 +2253,7 @@ def semantic_analysis_pass1(self) -> None:
if options.allow_redefinition:
# Perform more renaming across the AST to allow variable redefinitions
self.tree.accept(VariableRenameVisitor())
+ self.time_spent_us += time_spent_us(t0)
def add_dependency(self, dep: str) -> None:
if dep not in self.dependencies_set:
@@ -2170,8 +2293,9 @@ def compute_dependencies(self) -> None:
self.suppressed_set = set()
self.priorities = {} # id -> priority
self.dep_line_map = {} # id -> line
- dep_entries = (manager.all_imported_modules_in_file(self.tree) +
- self.manager.plugin.get_additional_deps(self.tree))
+ dep_entries = manager.all_imported_modules_in_file(
+ self.tree
+ ) + self.manager.plugin.get_additional_deps(self.tree)
for pri, id, line in dep_entries:
self.priorities[id] = min(pri, self.priorities.get(id, PRI_ALL))
if id == self.id:
@@ -2180,41 +2304,74 @@ def compute_dependencies(self) -> None:
if id not in self.dep_line_map:
self.dep_line_map[id] = line
# Every module implicitly depends on builtins.
- if self.id != 'builtins':
- self.add_dependency('builtins')
+ if self.id != "builtins":
+ self.add_dependency("builtins")
self.check_blockers() # Can fail due to bogus relative imports
def type_check_first_pass(self) -> None:
if self.options.semantic_analysis_only:
return
+ t0 = time_ref()
with self.wrap_context():
self.type_checker().check_first_pass()
+ self.time_spent_us += time_spent_us(t0)
def type_checker(self) -> TypeChecker:
if not self._type_checker:
assert self.tree is not None, "Internal error: must be called on parsed file only"
manager = self.manager
self._type_checker = TypeChecker(
- manager.errors, manager.modules, self.options,
- self.tree, self.xpath, manager.plugin,
+ manager.errors,
+ manager.modules,
+ self.options,
+ self.tree,
+ self.xpath,
+ manager.plugin,
+ self.per_line_checking_time_ns,
)
return self._type_checker
- def type_map(self) -> Dict[Expression, Type]:
- return self.type_checker().type_map
+ def type_map(self) -> dict[Expression, Type]:
+ # We can extract the master type map directly since at this
+ # point no temporary type maps can be active.
+ assert len(self.type_checker()._type_maps) == 1
+ return self.type_checker()._type_maps[0]
def type_check_second_pass(self) -> bool:
if self.options.semantic_analysis_only:
return False
+ t0 = time_ref()
with self.wrap_context():
- return self.type_checker().check_second_pass()
+ result = self.type_checker().check_second_pass()
+ self.time_spent_us += time_spent_us(t0)
+ return result
+
+ def detect_possibly_undefined_vars(self) -> None:
+ assert self.tree is not None, "Internal error: method must be called on parsed file only"
+ if self.tree.is_stub:
+ # We skip stub files because they aren't actually executed.
+ return
+ manager = self.manager
+ manager.errors.set_file(self.xpath, self.tree.fullname, options=self.options)
+ if manager.errors.is_error_code_enabled(
+ codes.POSSIBLY_UNDEFINED
+ ) or manager.errors.is_error_code_enabled(codes.USED_BEFORE_DEF):
+ self.tree.accept(
+ PossiblyUndefinedVariableVisitor(
+ MessageBuilder(manager.errors, manager.modules),
+ self.type_map(),
+ self.options,
+ self.tree.names,
+ )
+ )
def finish_passes(self) -> None:
assert self.tree is not None, "Internal error: method must be called on parsed file only"
manager = self.manager
if self.options.semantic_analysis_only:
return
+ t0 = time_ref()
with self.wrap_context():
# Some tests (and tools) want to look at the set of all types.
options = manager.options
@@ -2223,30 +2380,47 @@ def finish_passes(self) -> None:
# We should always patch indirect dependencies, even in full (non-incremental) builds,
# because the cache still may be written, and it must be correct.
- self._patch_indirect_dependencies(self.type_checker().module_refs, self.type_map())
+ # TODO: find a more robust way to traverse *all* relevant types?
+ expr_types = set(self.type_map().values())
+ symbol_types = set()
+ for _, sym, _ in self.tree.local_definitions():
+ if sym.type is not None:
+ symbol_types.add(sym.type)
+ if isinstance(sym.node, TypeInfo):
+ # TypeInfo symbols have some extra relevant types.
+ symbol_types.update(sym.node.bases)
+ if sym.node.metaclass_type:
+ symbol_types.add(sym.node.metaclass_type)
+ if sym.node.typeddict_type:
+ symbol_types.add(sym.node.typeddict_type)
+ if sym.node.tuple_type:
+ symbol_types.add(sym.node.tuple_type)
+ self._patch_indirect_dependencies(
+ self.type_checker().module_refs, expr_types | symbol_types
+ )
if self.options.dump_inference_stats:
- dump_type_stats(self.tree,
- self.xpath,
- modules=self.manager.modules,
- inferred=True,
- typemap=self.type_map())
+ dump_type_stats(
+ self.tree,
+ self.xpath,
+ modules=self.manager.modules,
+ inferred=True,
+ typemap=self.type_map(),
+ )
manager.report_file(self.tree, self.type_map(), self.options)
self.update_fine_grained_deps(self.manager.fg_deps)
self.free_state()
if not manager.options.fine_grained_incremental and not manager.options.preserve_asts:
free_tree(self.tree)
+ self.time_spent_us += time_spent_us(t0)
def free_state(self) -> None:
if self._type_checker:
self._type_checker.reset()
self._type_checker = None
- def _patch_indirect_dependencies(self,
- module_refs: Set[str],
- type_map: Dict[Expression, Type]) -> None:
- types = set(type_map.values())
+ def _patch_indirect_dependencies(self, module_refs: set[str], types: set[Type]) -> None:
assert None not in types
valid = self.valid_references()
@@ -2262,9 +2436,9 @@ def _patch_indirect_dependencies(self,
elif dep not in self.suppressed_set and dep in self.manager.missing_modules:
self.suppress_dependency(dep)
- def compute_fine_grained_deps(self) -> Dict[str, Set[str]]:
+ def compute_fine_grained_deps(self) -> dict[str, set[str]]:
assert self.tree is not None
- if self.id in ('builtins', 'typing', 'types', 'sys', '_typeshed'):
+ if self.id in ("builtins", "typing", "types", "sys", "_typeshed"):
# We don't track changes to core parts of typeshed -- the
# assumption is that they are only changed as part of mypy
# updates, which will invalidate everything anyway. These
@@ -2274,19 +2448,23 @@ def compute_fine_grained_deps(self) -> Dict[str, Set[str]]:
# dependencies then to handle cyclic imports.
return {}
from mypy.server.deps import get_dependencies # Lazy import to speed up startup
- return get_dependencies(target=self.tree,
- type_map=self.type_map(),
- python_version=self.options.python_version,
- options=self.manager.options)
- def update_fine_grained_deps(self, deps: Dict[str, Set[str]]) -> None:
+ return get_dependencies(
+ target=self.tree,
+ type_map=self.type_map(),
+ python_version=self.options.python_version,
+ options=self.manager.options,
+ )
+
+ def update_fine_grained_deps(self, deps: dict[str, set[str]]) -> None:
options = self.manager.options
if options.cache_fine_grained or options.fine_grained_incremental:
from mypy.server.deps import merge_dependencies # Lazy import to speed up startup
+
merge_dependencies(self.compute_fine_grained_deps(), deps)
- TypeState.update_protocol_deps(deps)
+ type_state.update_protocol_deps(deps)
- def valid_references(self) -> Set[str]:
+ def valid_references(self) -> set[str]:
assert self.ancestors is not None
valid_refs = set(self.dependencies + self.suppressed + self.ancestors)
valid_refs.add(self.id)
@@ -2299,9 +2477,17 @@ def valid_references(self) -> Set[str]:
def write_cache(self) -> None:
assert self.tree is not None, "Internal error: method must be called on parsed file only"
# We don't support writing cache files in fine-grained incremental mode.
- if (not self.path
- or self.options.cache_dir == os.devnull
- or self.options.fine_grained_incremental):
+ if (
+ not self.path
+ or self.options.cache_dir == os.devnull
+ or self.options.fine_grained_incremental
+ ):
+ if self.options.debug_serialize:
+ try:
+ self.tree.serialize()
+ except Exception:
+ print(f"Error serializing {self.id}", file=self.manager.stdout)
+ raise # Propagate to display traceback
return
is_errors = self.transitive_error
if is_errors:
@@ -2312,17 +2498,26 @@ def write_cache(self) -> None:
dep_prios = self.dependency_priorities()
dep_lines = self.dependency_lines()
assert self.source_hash is not None
- assert len(set(self.dependencies)) == len(self.dependencies), (
- "Duplicates in dependencies list for {} ({})".format(self.id, self.dependencies))
+ assert len(set(self.dependencies)) == len(
+ self.dependencies
+ ), f"Duplicates in dependencies list for {self.id} ({self.dependencies})"
new_interface_hash, self.meta = write_cache(
- self.id, self.path, self.tree,
- list(self.dependencies), list(self.suppressed),
- dep_prios, dep_lines, self.interface_hash, self.source_hash, self.ignore_all,
- self.manager)
+ self.id,
+ self.path,
+ self.tree,
+ list(self.dependencies),
+ list(self.suppressed),
+ dep_prios,
+ dep_lines,
+ self.interface_hash,
+ self.source_hash,
+ self.ignore_all,
+ self.manager,
+ )
if new_interface_hash == self.interface_hash:
- self.manager.log("Cached module {} has same interface".format(self.id))
+ self.manager.log(f"Cached module {self.id} has same interface")
else:
- self.manager.log("Cached module {} has changed interface".format(self.id))
+ self.manager.log(f"Cached module {self.id} has changed interface")
self.mark_interface_stale()
self.interface_hash = new_interface_hash
@@ -2337,8 +2532,9 @@ def verify_dependencies(self, suppressed_only: bool = False) -> None:
all_deps = self.suppressed
else:
# Strip out indirect dependencies. See comment in build.load_graph().
- dependencies = [dep for dep in self.dependencies
- if self.priorities.get(dep) != PRI_INDIRECT]
+ dependencies = [
+ dep for dep in self.dependencies if self.priorities.get(dep) != PRI_INDIRECT
+ ]
all_deps = dependencies + self.suppressed + self.ancestors
for dep in all_deps:
if dep in manager.modules:
@@ -2349,14 +2545,19 @@ def verify_dependencies(self, suppressed_only: bool = False) -> None:
line = self.dep_line_map.get(dep, 1)
try:
if dep in self.ancestors:
- state, ancestor = None, self # type: (Optional[State], Optional[State])
+ state: State | None = None
+ ancestor: State | None = self
else:
state, ancestor = self, None
# Called just for its side effects of producing diagnostics.
find_module_and_diagnose(
- manager, dep, options,
- caller_state=state, caller_line=line,
- ancestor_for=ancestor)
+ manager,
+ dep,
+ options,
+ caller_state=state,
+ caller_line=line,
+ ancestor_for=ancestor,
+ )
except (ModuleNotFound, CompileError):
# Swallow up any ModuleNotFounds or CompilerErrors while generating
# a diagnostic. CompileErrors may get generated in
@@ -2365,10 +2566,10 @@ def verify_dependencies(self, suppressed_only: bool = False) -> None:
# it is renamed.
pass
- def dependency_priorities(self) -> List[int]:
+ def dependency_priorities(self) -> list[int]:
return [self.priorities.get(dep, PRI_HIGH) for dep in self.dependencies + self.suppressed]
- def dependency_lines(self) -> List[int]:
+ def dependency_lines(self) -> list[int]:
return [self.dep_line_map.get(dep, 1) for dep in self.dependencies + self.suppressed]
def generate_unused_ignore_notes(self) -> None:
@@ -2383,22 +2584,23 @@ def generate_unused_ignore_notes(self) -> None:
def generate_ignore_without_code_notes(self) -> None:
if self.manager.errors.is_error_code_enabled(codes.IGNORE_WITHOUT_CODE):
self.manager.errors.generate_ignore_without_code_errors(
- self.xpath,
- self.options.warn_unused_ignores,
+ self.xpath, self.options.warn_unused_ignores
)
# Module import and diagnostic glue
-def find_module_and_diagnose(manager: BuildManager,
- id: str,
- options: Options,
- caller_state: 'Optional[State]' = None,
- caller_line: int = 0,
- ancestor_for: 'Optional[State]' = None,
- root_source: bool = False,
- skip_diagnose: bool = False) -> Tuple[str, str]:
+def find_module_and_diagnose(
+ manager: BuildManager,
+ id: str,
+ options: Options,
+ caller_state: State | None = None,
+ caller_line: int = 0,
+ ancestor_for: State | None = None,
+ root_source: bool = False,
+ skip_diagnose: bool = False,
+) -> tuple[str, str]:
"""Find a module by name, respecting follow_imports and producing diagnostics.
If the module is not found, then the ModuleNotFound exception is raised.
@@ -2419,59 +2621,39 @@ def find_module_and_diagnose(manager: BuildManager,
Returns a tuple containing (file path, target's effective follow_imports setting)
"""
- file_id = id
- if id == 'builtins' and options.python_version[0] == 2:
- # The __builtin__ module is called internally by mypy
- # 'builtins' in Python 2 mode (similar to Python 3),
- # but the stub file is __builtin__.pyi. The reason is
- # that a lot of code hard-codes 'builtins.x' and it's
- # easier to work it around like this. It also means
- # that the implementation can mostly ignore the
- # difference and just assume 'builtins' everywhere,
- # which simplifies code.
- file_id = '__builtin__'
- result = find_module_with_reason(file_id, manager)
+ result = find_module_with_reason(id, manager)
if isinstance(result, str):
# For non-stubs, look at options.follow_imports:
# - normal (default) -> fully analyze
# - silent -> analyze but silence errors
# - skip -> don't analyze, make the type Any
follow_imports = options.follow_imports
- if (root_source # Honor top-level modules
- or (not result.endswith('.py') # Stubs are always normal
- and not options.follow_imports_for_stubs) # except when they aren't
- or id in mypy.semanal_main.core_modules): # core is always normal
- follow_imports = 'normal'
+ if (
+ root_source # Honor top-level modules
+ or (
+ result.endswith(".pyi") # Stubs are always normal
+ and not options.follow_imports_for_stubs # except when they aren't
+ )
+ or id in mypy.semanal_main.core_modules # core is always normal
+ ):
+ follow_imports = "normal"
if skip_diagnose:
pass
- elif follow_imports == 'silent':
+ elif follow_imports == "silent":
# Still import it, but silence non-blocker errors.
- manager.log("Silencing %s (%s)" % (result, id))
- elif follow_imports == 'skip' or follow_imports == 'error':
+ manager.log(f"Silencing {result} ({id})")
+ elif follow_imports == "skip" or follow_imports == "error":
# In 'error' mode, produce special error messages.
if id not in manager.missing_modules:
- manager.log("Skipping %s (%s)" % (result, id))
- if follow_imports == 'error':
+ manager.log(f"Skipping {result} ({id})")
+ if follow_imports == "error":
if ancestor_for:
skipping_ancestor(manager, id, result, ancestor_for)
else:
- skipping_module(manager, caller_line, caller_state,
- id, result)
+ skipping_module(manager, caller_line, caller_state, id, result)
raise ModuleNotFound
- if not manager.options.no_silence_site_packages:
- for dir in manager.search_paths.package_path + manager.search_paths.typeshed_path:
- if is_sub_path(result, dir):
- # Silence errors in site-package dirs and typeshed
- follow_imports = 'silent'
- if (id in CORE_BUILTIN_MODULES
- and not is_typeshed_file(result)
- and not is_stub_package_file(result)
- and not options.use_builtins_fixtures
- and not options.custom_typeshed_dir):
- raise CompileError([
- 'mypy: "%s" shadows library module "%s"' % (os.path.relpath(result), id),
- 'note: A user-defined top-level module with name "%s" is not supported' % id
- ])
+ if is_silent_import_module(manager, result) and not root_source:
+ follow_imports = "silent"
return (result, follow_imports)
else:
# Could not find a module. Typically the reason is a
@@ -2479,18 +2661,18 @@ def find_module_and_diagnose(manager: BuildManager,
# search path or the module has not been installed.
ignore_missing_imports = options.ignore_missing_imports
- top_level, second_level = get_top_two_prefixes(file_id)
+ top_level, second_level = get_top_two_prefixes(id)
# Don't honor a global (not per-module) ignore_missing_imports
# setting for modules that used to have bundled stubs, as
# otherwise updating mypy can silently result in new false
# negatives. (Unless there are stubs but they are incomplete.)
global_ignore_missing_imports = manager.options.ignore_missing_imports
- py_ver = options.python_version[0]
- if ((is_legacy_bundled_package(top_level, py_ver)
- or is_legacy_bundled_package(second_level, py_ver))
- and global_ignore_missing_imports
- and not options.ignore_missing_imports_per_module
- and result is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED):
+ if (
+ (is_legacy_bundled_package(top_level) or is_legacy_bundled_package(second_level))
+ and global_ignore_missing_imports
+ and not options.ignore_missing_imports_per_module
+ and result is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED
+ ):
ignore_missing_imports = False
if skip_diagnose:
@@ -2503,13 +2685,12 @@ def find_module_and_diagnose(manager: BuildManager,
# If we can't find a root source it's always fatal.
# TODO: This might hide non-fatal errors from
# root sources processed earlier.
- raise CompileError(["mypy: can't find module '%s'" % id])
+ raise CompileError([f"mypy: can't find module '{id}'"])
else:
raise ModuleNotFound
-def exist_added_packages(suppressed: List[str],
- manager: BuildManager, options: Options) -> bool:
+def exist_added_packages(suppressed: list[str], manager: BuildManager, options: Options) -> bool:
"""Find if there are any newly added packages that were previously suppressed.
Exclude everything not in build for follow-imports=skip.
@@ -2522,17 +2703,18 @@ def exist_added_packages(suppressed: List[str],
path = find_module_simple(dep, manager)
if not path:
continue
- if (options.follow_imports == 'skip' and
- (not path.endswith('.pyi') or options.follow_imports_for_stubs)):
+ if options.follow_imports == "skip" and (
+ not path.endswith(".pyi") or options.follow_imports_for_stubs
+ ):
continue
- if '__init__.py' in path:
+ if "__init__.py" in path:
# It is better to have a bit lenient test, this will only slightly reduce
# performance, while having a too strict test may affect correctness.
return True
return False
-def find_module_simple(id: str, manager: BuildManager) -> Optional[str]:
+def find_module_simple(id: str, manager: BuildManager) -> str | None:
"""Find a filesystem path for module `id` or `None` if not found."""
x = find_module_with_reason(id, manager)
if isinstance(x, ModuleNotFoundReason):
@@ -2554,88 +2736,99 @@ def in_partial_package(id: str, manager: BuildManager) -> bool:
This checks if there is any existing parent __init__.pyi stub that
defines a module-level __getattr__ (a.k.a. partial stub package).
"""
- while '.' in id:
- parent, _ = id.rsplit('.', 1)
+ while "." in id:
+ parent, _ = id.rsplit(".", 1)
if parent in manager.modules:
- parent_mod: Optional[MypyFile] = manager.modules[parent]
+ parent_mod: MypyFile | None = manager.modules[parent]
else:
# Parent is not in build, try quickly if we can find it.
try:
- parent_st = State(id=parent, path=None, source=None, manager=manager,
- temporary=True)
+ parent_st = State(
+ id=parent, path=None, source=None, manager=manager, temporary=True
+ )
except (ModuleNotFound, CompileError):
parent_mod = None
else:
parent_mod = parent_st.tree
if parent_mod is not None:
- if parent_mod.is_partial_stub_package:
- return True
- else:
- # Bail out soon, complete subpackage found
- return False
+ # Bail out soon, complete subpackage found
+ return parent_mod.is_partial_stub_package
id = parent
return False
-def module_not_found(manager: BuildManager, line: int, caller_state: State,
- target: str, reason: ModuleNotFoundReason) -> None:
+def module_not_found(
+ manager: BuildManager,
+ line: int,
+ caller_state: State,
+ target: str,
+ reason: ModuleNotFoundReason,
+) -> None:
errors = manager.errors
save_import_context = errors.import_context()
errors.set_import_context(caller_state.import_context)
- errors.set_file(caller_state.xpath, caller_state.id)
- if target == 'builtins':
- errors.report(line, 0, "Cannot find 'builtins' module. Typeshed appears broken!",
- blocker=True)
+ errors.set_file(caller_state.xpath, caller_state.id, caller_state.options)
+ if target == "builtins":
+ errors.report(
+ line, 0, "Cannot find 'builtins' module. Typeshed appears broken!", blocker=True
+ )
errors.raise_error()
else:
daemon = manager.options.fine_grained_incremental
msg, notes = reason.error_message_templates(daemon)
- pyver = '%d.%d' % manager.options.python_version
- errors.report(line, 0, msg.format(module=target, pyver=pyver), code=codes.IMPORT)
+ errors.report(line, 0, msg.format(module=target), code=codes.IMPORT)
top_level, second_level = get_top_two_prefixes(target)
- if second_level in legacy_bundled_packages:
+ if second_level in legacy_bundled_packages or second_level in non_bundled_packages:
top_level = second_level
for note in notes:
- if '{stub_dist}' in note:
- note = note.format(stub_dist=legacy_bundled_packages[top_level].name)
- errors.report(line, 0, note, severity='note', only_once=True, code=codes.IMPORT)
+ if "{stub_dist}" in note:
+ note = note.format(stub_dist=stub_package_name(top_level))
+ errors.report(line, 0, note, severity="note", only_once=True, code=codes.IMPORT)
if reason is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED:
- manager.missing_stub_packages.add(legacy_bundled_packages[top_level].name)
+ manager.missing_stub_packages.add(stub_package_name(top_level))
errors.set_import_context(save_import_context)
-def skipping_module(manager: BuildManager, line: int, caller_state: Optional[State],
- id: str, path: str) -> None:
+def skipping_module(
+ manager: BuildManager, line: int, caller_state: State | None, id: str, path: str
+) -> None:
"""Produce an error for an import ignored due to --follow_imports=error"""
assert caller_state, (id, path)
save_import_context = manager.errors.import_context()
manager.errors.set_import_context(caller_state.import_context)
- manager.errors.set_file(caller_state.xpath, caller_state.id)
- manager.errors.report(line, 0,
- 'Import of "%s" ignored' % (id,),
- severity='error')
- manager.errors.report(line, 0,
- "(Using --follow-imports=error, module not passed on command line)",
- severity='note', only_once=True)
+ manager.errors.set_file(caller_state.xpath, caller_state.id, manager.options)
+ manager.errors.report(line, 0, f'Import of "{id}" ignored', severity="error")
+ manager.errors.report(
+ line,
+ 0,
+ "(Using --follow-imports=error, module not passed on command line)",
+ severity="note",
+ only_once=True,
+ )
manager.errors.set_import_context(save_import_context)
-def skipping_ancestor(manager: BuildManager, id: str, path: str, ancestor_for: 'State') -> None:
+def skipping_ancestor(manager: BuildManager, id: str, path: str, ancestor_for: State) -> None:
"""Produce an error for an ancestor ignored due to --follow_imports=error"""
# TODO: Read the path (the __init__.py file) and return
# immediately if it's empty or only contains comments.
# But beware, some package may be the ancestor of many modules,
# so we'd need to cache the decision.
manager.errors.set_import_context([])
- manager.errors.set_file(ancestor_for.xpath, ancestor_for.id)
- manager.errors.report(-1, -1, 'Ancestor package "%s" ignored' % (id,),
- severity='error', only_once=True)
- manager.errors.report(-1, -1,
- "(Using --follow-imports=error, submodule passed on command line)",
- severity='note', only_once=True)
+ manager.errors.set_file(ancestor_for.xpath, ancestor_for.id, manager.options)
+ manager.errors.report(
+ -1, -1, f'Ancestor package "{id}" ignored', severity="error", only_once=True
+ )
+ manager.errors.report(
+ -1,
+ -1,
+ "(Using --follow-imports=error, submodule passed on command line)",
+ severity="note",
+ only_once=True,
+ )
-def log_configuration(manager: BuildManager, sources: List[BuildSource]) -> None:
+def log_configuration(manager: BuildManager, sources: list[BuildSource]) -> None:
"""Output useful configuration information to LOG and TRACE"""
manager.log()
@@ -2650,30 +2843,27 @@ def log_configuration(manager: BuildManager, sources: List[BuildSource]) -> None
]
for conf_name, conf_value in configuration_vars:
- manager.log("{:24}{}".format(conf_name + ":", conf_value))
+ manager.log(f"{conf_name + ':':24}{conf_value}")
for source in sources:
- manager.log("{:24}{}".format("Found source:", source))
+ manager.log(f"{'Found source:':24}{source}")
# Complete list of searched paths can get very long, put them under TRACE
for path_type, paths in manager.search_paths._asdict().items():
if not paths:
- manager.trace("No %s" % path_type)
+ manager.trace(f"No {path_type}")
continue
- manager.trace("%s:" % path_type)
+ manager.trace(f"{path_type}:")
for pth in paths:
- manager.trace(" %s" % pth)
+ manager.trace(f" {pth}")
# The driver
-def dispatch(sources: List[BuildSource],
- manager: BuildManager,
- stdout: TextIO,
- ) -> Graph:
+def dispatch(sources: list[BuildSource], manager: BuildManager, stdout: TextIO) -> Graph:
log_configuration(manager, sources)
t0 = time.time()
@@ -2691,16 +2881,16 @@ def dispatch(sources: List[BuildSource],
graph = load_graph(sources, manager)
t1 = time.time()
- manager.add_stats(graph_size=len(graph),
- stubs_found=sum(g.path is not None and g.path.endswith('.pyi')
- for g in graph.values()),
- graph_load_time=(t1 - t0),
- fm_cache_size=len(manager.find_module_cache.results),
- )
+ manager.add_stats(
+ graph_size=len(graph),
+ stubs_found=sum(g.path is not None and g.path.endswith(".pyi") for g in graph.values()),
+ graph_load_time=(t1 - t0),
+ fm_cache_size=len(manager.find_module_cache.results),
+ )
if not graph:
print("Nothing to do?!", file=stdout)
return graph
- manager.log("Loaded graph with %d nodes (%.3f sec)" % (len(graph), t1 - t0))
+ manager.log(f"Loaded graph with {len(graph)} nodes ({t1 - t0:.3f} sec)")
if manager.options.dump_graph:
dump_graph(graph, stdout)
return graph
@@ -2717,7 +2907,7 @@ def dispatch(sources: List[BuildSource],
manager.add_stats(load_fg_deps_time=time.time() - t2)
if fg_deps_meta is not None:
manager.fg_deps_meta = fg_deps_meta
- elif manager.stats.get('fresh_metas', 0) > 0:
+ elif manager.stats.get("fresh_metas", 0) > 0:
# Clear the stats so we don't infinite loop because of positive fresh_metas
manager.stats.clear()
# There were some cache files read, but no fine-grained dependencies loaded.
@@ -2739,7 +2929,7 @@ def dispatch(sources: List[BuildSource],
# then we need to collect fine grained protocol dependencies.
# Since these are a global property of the program, they are calculated after we
# processed the whole graph.
- TypeState.add_all_protocol_deps(manager.fg_deps)
+ type_state.add_all_protocol_deps(manager.fg_deps)
if not manager.options.fine_grained_incremental:
rdeps = generate_deps_for_cache(manager, graph)
write_deps_cache(rdeps, manager, graph)
@@ -2747,31 +2937,54 @@ def dispatch(sources: List[BuildSource],
if manager.options.dump_deps:
# This speeds up startup a little when not using the daemon mode.
from mypy.server.deps import dump_all_dependencies
- dump_all_dependencies(manager.modules, manager.all_types,
- manager.options.python_version, manager.options)
+
+ dump_all_dependencies(
+ manager.modules, manager.all_types, manager.options.python_version, manager.options
+ )
return graph
class NodeInfo:
"""Some info about a node in the graph of SCCs."""
- def __init__(self, index: int, scc: List[str]) -> None:
+ def __init__(self, index: int, scc: list[str]) -> None:
self.node_id = "n%d" % index
self.scc = scc
- self.sizes: Dict[str, int] = {} # mod -> size in bytes
- self.deps: Dict[str, int] = {} # node_id -> pri
+ self.sizes: dict[str, int] = {} # mod -> size in bytes
+ self.deps: dict[str, int] = {} # node_id -> pri
def dumps(self) -> str:
"""Convert to JSON string."""
total_size = sum(self.sizes.values())
- return "[%s, %s, %s,\n %s,\n %s]" % (json.dumps(self.node_id),
- json.dumps(total_size),
- json.dumps(self.scc),
- json.dumps(self.sizes),
- json.dumps(self.deps))
+ return "[{}, {}, {},\n {},\n {}]".format(
+ json.dumps(self.node_id),
+ json.dumps(total_size),
+ json.dumps(self.scc),
+ json.dumps(self.sizes),
+ json.dumps(self.deps),
+ )
+
+def dump_timing_stats(path: str, graph: Graph) -> None:
+ """Dump timing stats for each file in the given graph."""
+ with open(path, "w") as f:
+ for id in sorted(graph):
+ f.write(f"{id} {graph[id].time_spent_us}\n")
-def dump_graph(graph: Graph, stdout: Optional[TextIO] = None) -> None:
+
+def dump_line_checking_stats(path: str, graph: Graph) -> None:
+ """Dump per-line expression type checking stats."""
+ with open(path, "w") as f:
+ for id in sorted(graph):
+ if not graph[id].per_line_checking_time_ns:
+ continue
+ f.write(f"{id}:\n")
+ for line in sorted(graph[id].per_line_checking_time_ns):
+ line_time = graph[id].per_line_checking_time_ns[line]
+ f.write(f"{line:>5} {line_time/1000:8.1f}\n")
+
+
+def dump_graph(graph: Graph, stdout: TextIO | None = None) -> None:
"""Dump the graph as a JSON string to stdout.
This copies some of the work by process_graph()
@@ -2803,15 +3016,19 @@ def dump_graph(graph: Graph, stdout: Optional[TextIO] = None) -> None:
pri = state.priorities[dep]
if dep in inv_nodes:
dep_id = inv_nodes[dep]
- if (dep_id != node.node_id and
- (dep_id not in node.deps or pri < node.deps[dep_id])):
+ if dep_id != node.node_id and (
+ dep_id not in node.deps or pri < node.deps[dep_id]
+ ):
node.deps[dep_id] = pri
print("[" + ",\n ".join(node.dumps() for node in nodes) + "\n]", file=stdout)
-def load_graph(sources: List[BuildSource], manager: BuildManager,
- old_graph: Optional[Graph] = None,
- new_modules: Optional[List[State]] = None) -> Graph:
+def load_graph(
+ sources: list[BuildSource],
+ manager: BuildManager,
+ old_graph: Graph | None = None,
+ new_modules: list[State] | None = None,
+) -> Graph:
"""Given some source files, load the full dependency graph.
If an old_graph is passed in, it is used as the starting point and
@@ -2831,33 +3048,41 @@ def load_graph(sources: List[BuildSource], manager: BuildManager,
# TODO: Consider whether to go depth-first instead. This may
# affect the order in which we process files within import cycles.
new = new_modules if new_modules is not None else []
- entry_points: Set[str] = set()
+ entry_points: set[str] = set()
# Seed the graph with the initial root sources.
for bs in sources:
try:
- st = State(id=bs.module, path=bs.path, source=bs.text, manager=manager,
- root_source=True)
+ st = State(
+ id=bs.module,
+ path=bs.path,
+ source=bs.text,
+ manager=manager,
+ root_source=not bs.followed,
+ )
except ModuleNotFound:
continue
if st.id in graph:
- manager.errors.set_file(st.xpath, st.id)
+ manager.errors.set_file(st.xpath, st.id, manager.options)
manager.errors.report(
- -1, -1,
- 'Duplicate module named "%s" (also at "%s")' % (st.id, graph[st.id].xpath),
+ -1,
+ -1,
+ f'Duplicate module named "{st.id}" (also at "{graph[st.id].xpath}")',
blocker=True,
)
manager.errors.report(
- -1, -1,
+ -1,
+ -1,
"See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules " # noqa: E501
"for more info",
- severity='note',
+ severity="note",
)
manager.errors.report(
- -1, -1,
+ -1,
+ -1,
"Common resolutions include: a) using `--exclude` to avoid checking one of them, "
"b) adding `__init__.py` somewhere, c) using `--explicit-package-bases` or "
"adjusting MYPYPATH",
- severity='note'
+ severity="note",
)
manager.errors.raise_error()
@@ -2905,11 +3130,18 @@ def load_graph(sources: List[BuildSource], manager: BuildManager,
if dep in st.ancestors:
# TODO: Why not 'if dep not in st.dependencies' ?
# Ancestors don't have import context.
- newst = State(id=dep, path=None, source=None, manager=manager,
- ancestor_for=st)
+ newst = State(
+ id=dep, path=None, source=None, manager=manager, ancestor_for=st
+ )
else:
- newst = State(id=dep, path=None, source=None, manager=manager,
- caller_state=st, caller_line=st.dep_line_map.get(dep, 1))
+ newst = State(
+ id=dep,
+ path=None,
+ source=None,
+ manager=manager,
+ caller_state=st,
+ caller_line=st.dep_line_map.get(dep, 1),
+ )
except ModuleNotFound:
if dep in st.dependencies_set:
st.suppress_dependency(dep)
@@ -2919,22 +3151,25 @@ def load_graph(sources: List[BuildSource], manager: BuildManager,
if newst_path in seen_files:
manager.errors.report(
- -1, 0,
- 'Source file found twice under different module names: '
+ -1,
+ 0,
+ "Source file found twice under different module names: "
'"{}" and "{}"'.format(seen_files[newst_path].id, newst.id),
blocker=True,
)
manager.errors.report(
- -1, 0,
+ -1,
+ 0,
"See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules " # noqa: E501
"for more info",
- severity='note',
+ severity="note",
)
manager.errors.report(
- -1, 0,
+ -1,
+ 0,
"Common resolutions include: a) adding `__init__.py` somewhere, "
"b) using `--explicit-package-bases` or adjusting MYPYPATH",
- severity='note',
+ severity="note",
)
manager.errors.raise_error()
@@ -2953,10 +3188,9 @@ def load_graph(sources: List[BuildSource], manager: BuildManager,
def process_graph(graph: Graph, manager: BuildManager) -> None:
"""Process everything in dependency order."""
sccs = sorted_components(graph)
- manager.log("Found %d SCCs; largest has %d nodes" %
- (len(sccs), max(len(scc) for scc in sccs)))
+ manager.log("Found %d SCCs; largest has %d nodes" % (len(sccs), max(len(scc) for scc in sccs)))
- fresh_scc_queue: List[List[str]] = []
+ fresh_scc_queue: list[list[str]] = []
# We're processing SCCs from leaves (those without further
# dependencies) to roots (those from which everything else can be
@@ -2968,21 +3202,25 @@ def process_graph(graph: Graph, manager: BuildManager) -> None:
# Make the order of the SCC that includes 'builtins' and 'typing',
# among other things, predictable. Various things may break if
# the order changes.
- if 'builtins' in ascc:
+ if "builtins" in ascc:
scc = sorted(scc, reverse=True)
# If builtins is in the list, move it last. (This is a bit of
# a hack, but it's necessary because the builtins module is
# part of a small cycle involving at least {builtins, abc,
# typing}. Of these, builtins must be processed last or else
# some builtin objects will be incompletely processed.)
- scc.remove('builtins')
- scc.append('builtins')
+ scc.remove("builtins")
+ scc.append("builtins")
if manager.options.verbosity >= 2:
for id in scc:
- manager.trace("Priorities for %s:" % id,
- " ".join("%s:%d" % (x, graph[id].priorities[x])
- for x in graph[id].dependencies
- if x in ascc and x in graph[id].priorities))
+ manager.trace(
+ f"Priorities for {id}:",
+ " ".join(
+ "%s:%d" % (x, graph[id].priorities[x])
+ for x in graph[id].dependencies
+ if x in ascc and x in graph[id].priorities
+ ),
+ )
# Because the SCCs are presented in topological sort order, we
# don't need to look at dependencies recursively for staleness
# -- the immediate dependencies are sufficient.
@@ -3009,8 +3247,9 @@ def process_graph(graph: Graph, manager: BuildManager) -> None:
# cache file is newer than any scc node's cache file.
oldest_in_scc = min(graph[id].xmeta.data_mtime for id in scc)
viable = {id for id in stale_deps if graph[id].meta is not None}
- newest_in_deps = 0 if not viable else max(graph[dep].xmeta.data_mtime
- for dep in viable)
+ newest_in_deps = (
+ 0 if not viable else max(graph[dep].xmeta.data_mtime for dep in viable)
+ )
if manager.options.verbosity >= 3: # Dump all mtimes for extreme debugging.
all_ids = sorted(ascc | viable, key=lambda id: graph[id].xmeta.data_mtime)
for id in all_ids:
@@ -3029,19 +3268,19 @@ def process_graph(graph: Graph, manager: BuildManager) -> None:
# (on some platforms).
if oldest_in_scc < newest_in_deps:
fresh = False
- fresh_msg = "out of date by %.0f seconds" % (newest_in_deps - oldest_in_scc)
+ fresh_msg = f"out of date by {newest_in_deps - oldest_in_scc:.0f} seconds"
else:
fresh_msg = "fresh"
elif undeps:
- fresh_msg = "stale due to changed suppression (%s)" % " ".join(sorted(undeps))
+ fresh_msg = f"stale due to changed suppression ({' '.join(sorted(undeps))})"
elif stale_scc:
fresh_msg = "inherently stale"
if stale_scc != ascc:
- fresh_msg += " (%s)" % " ".join(sorted(stale_scc))
+ fresh_msg += f" ({' '.join(sorted(stale_scc))})"
if stale_deps:
- fresh_msg += " with stale deps (%s)" % " ".join(sorted(stale_deps))
+ fresh_msg += f" with stale deps ({' '.join(sorted(stale_deps))})"
else:
- fresh_msg = "stale due to deps (%s)" % " ".join(sorted(stale_deps))
+ fresh_msg = f"stale due to deps ({' '.join(sorted(stale_deps))})"
# Initialize transitive_error for all SCC members from union
# of transitive_error of dependencies.
@@ -3051,11 +3290,11 @@ def process_graph(graph: Graph, manager: BuildManager) -> None:
scc_str = " ".join(scc)
if fresh:
- manager.trace("Queuing %s SCC (%s)" % (fresh_msg, scc_str))
+ manager.trace(f"Queuing {fresh_msg} SCC ({scc_str})")
fresh_scc_queue.append(scc)
else:
if len(fresh_scc_queue) > 0:
- manager.log("Processing {} queued fresh SCCs".format(len(fresh_scc_queue)))
+ manager.log(f"Processing {len(fresh_scc_queue)} queued fresh SCCs")
# Defer processing fresh SCCs until we actually run into a stale SCC
# and need the earlier modules to be loaded.
#
@@ -3075,7 +3314,7 @@ def process_graph(graph: Graph, manager: BuildManager) -> None:
fresh_scc_queue = []
size = len(scc)
if size == 1:
- manager.log("Processing SCC singleton (%s) as %s" % (scc_str, fresh_msg))
+ manager.log(f"Processing SCC singleton ({scc_str}) as {fresh_msg}")
else:
manager.log("Processing SCC of size %d (%s) as %s" % (size, scc_str, fresh_msg))
process_stale_scc(graph, scc, manager)
@@ -3084,14 +3323,17 @@ def process_graph(graph: Graph, manager: BuildManager) -> None:
nodes_left = sum(len(scc) for scc in fresh_scc_queue)
manager.add_stats(sccs_left=sccs_left, nodes_left=nodes_left)
if sccs_left:
- manager.log("{} fresh SCCs ({} nodes) left in queue (and will remain unprocessed)"
- .format(sccs_left, nodes_left))
+ manager.log(
+ "{} fresh SCCs ({} nodes) left in queue (and will remain unprocessed)".format(
+ sccs_left, nodes_left
+ )
+ )
manager.trace(str(fresh_scc_queue))
else:
manager.log("No fresh SCCs left in queue")
-def order_ascc(graph: Graph, ascc: AbstractSet[str], pri_max: int = PRI_ALL) -> List[str]:
+def order_ascc(graph: Graph, ascc: AbstractSet[str], pri_max: int = PRI_ALL) -> list[str]:
"""Come up with the ideal processing order within an SCC.
Using the priorities assigned by all_imported_modules_in_file(),
@@ -3138,7 +3380,7 @@ def order_ascc(graph: Graph, ascc: AbstractSet[str], pri_max: int = PRI_ALL) ->
return [s for ss in sccs for s in order_ascc(graph, ss, pri_max)]
-def process_fresh_modules(graph: Graph, modules: List[str], manager: BuildManager) -> None:
+def process_fresh_modules(graph: Graph, modules: list[str], manager: BuildManager) -> None:
"""Process the modules in one group of modules from their cached data.
This can be used to process an SCC of modules
@@ -3154,7 +3396,7 @@ def process_fresh_modules(graph: Graph, modules: List[str], manager: BuildManage
manager.add_stats(process_fresh_time=t2 - t0, load_tree_time=t1 - t0)
-def process_stale_scc(graph: Graph, scc: List[str], manager: BuildManager) -> None:
+def process_stale_scc(graph: Graph, scc: list[str], manager: BuildManager) -> None:
"""Process the modules in one SCC from source code.
Exception: If quick_and_dirty is set, use the cache for fresh modules.
@@ -3164,11 +3406,11 @@ def process_stale_scc(graph: Graph, scc: List[str], manager: BuildManager) -> No
# We may already have parsed the module, or not.
# If the former, parse_file() is a no-op.
graph[id].parse_file()
- if 'typing' in scc:
+ if "typing" in scc:
# For historical reasons we need to manually add typing aliases
# for built-in generic collections, see docstring of
# SemanticAnalyzerPass2.add_builtin_aliases for details.
- typing_mod = graph['typing'].tree
+ typing_mod = graph["typing"].tree
assert typing_mod, "The typing module was not parsed"
mypy.semanal_main.semantic_analysis_for_scc(graph, scc, manager.errors)
@@ -3179,6 +3421,7 @@ def process_stale_scc(graph: Graph, scc: List[str], manager: BuildManager) -> No
graph[id].type_check_first_pass()
if not graph[id].type_checker().deferred_nodes:
unfinished_modules.discard(id)
+ graph[id].detect_possibly_undefined_vars()
graph[id].finish_passes()
while unfinished_modules:
@@ -3187,6 +3430,7 @@ def process_stale_scc(graph: Graph, scc: List[str], manager: BuildManager) -> No
continue
if not graph[id].type_check_second_pass():
unfinished_modules.discard(id)
+ graph[id].detect_possibly_undefined_vars()
graph[id].finish_passes()
for id in stale:
graph[id].generate_unused_ignore_notes()
@@ -3200,9 +3444,9 @@ def process_stale_scc(graph: Graph, scc: List[str], manager: BuildManager) -> No
graph[id].mark_as_rechecked()
-def sorted_components(graph: Graph,
- vertices: Optional[AbstractSet[str]] = None,
- pri_max: int = PRI_ALL) -> List[AbstractSet[str]]:
+def sorted_components(
+ graph: Graph, vertices: AbstractSet[str] | None = None, pri_max: int = PRI_ALL
+) -> list[AbstractSet[str]]:
"""Return the graph's SCCs, topologically sorted by dependencies.
The sort order is from leaves (nodes without dependencies) to
@@ -3218,9 +3462,9 @@ def sorted_components(graph: Graph,
sccs = list(strongly_connected_components(vertices, edges))
# Topsort.
sccsmap = {id: frozenset(scc) for scc in sccs for id in scc}
- data: Dict[AbstractSet[str], Set[AbstractSet[str]]] = {}
+ data: dict[AbstractSet[str], set[AbstractSet[str]]] = {}
for scc in sccs:
- deps: Set[AbstractSet[str]] = set()
+ deps: set[AbstractSet[str]] = set()
for id in scc:
deps.update(sccsmap[x] for x in deps_filtered(graph, vertices, id, pri_max))
data[frozenset(scc)] = deps
@@ -3234,23 +3478,25 @@ def sorted_components(graph: Graph,
# - If ready is [{a, b}, {c, d}], a.order == 1, b.order == 3,
# c.order == 2, d.order == 4, the sort keys become [1, 2]
# and the result is [{c, d}, {a, b}].
- res.extend(sorted(ready,
- key=lambda scc: -min(graph[id].order for id in scc)))
+ res.extend(sorted(ready, key=lambda scc: -min(graph[id].order for id in scc)))
return res
-def deps_filtered(graph: Graph, vertices: AbstractSet[str], id: str, pri_max: int) -> List[str]:
+def deps_filtered(graph: Graph, vertices: AbstractSet[str], id: str, pri_max: int) -> list[str]:
"""Filter dependencies for id with pri < pri_max."""
if id not in vertices:
return []
state = graph[id]
- return [dep
- for dep in state.dependencies
- if dep in vertices and state.priorities.get(dep, PRI_HIGH) < pri_max]
+ return [
+ dep
+ for dep in state.dependencies
+ if dep in vertices and state.priorities.get(dep, PRI_HIGH) < pri_max
+ ]
-def strongly_connected_components(vertices: AbstractSet[str],
- edges: Dict[str, List[str]]) -> Iterator[Set[str]]:
+def strongly_connected_components(
+ vertices: AbstractSet[str], edges: dict[str, list[str]]
+) -> Iterator[set[str]]:
"""Compute Strongly Connected Components of a directed graph.
Args:
@@ -3265,12 +3511,12 @@ def strongly_connected_components(vertices: AbstractSet[str],
From http://code.activestate.com/recipes/578507/.
"""
- identified: Set[str] = set()
- stack: List[str] = []
- index: Dict[str, int] = {}
- boundaries: List[int] = []
+ identified: set[str] = set()
+ stack: list[str] = []
+ index: dict[str, int] = {}
+ boundaries: list[int] = []
- def dfs(v: str) -> Iterator[Set[str]]:
+ def dfs(v: str) -> Iterator[set[str]]:
index[v] = len(stack)
stack.append(v)
boundaries.append(index[v])
@@ -3284,8 +3530,8 @@ def dfs(v: str) -> Iterator[Set[str]]:
if boundaries[-1] == index[v]:
boundaries.pop()
- scc = set(stack[index[v]:])
- del stack[index[v]:]
+ scc = set(stack[index[v] :])
+ del stack[index[v] :]
identified.update(scc)
yield scc
@@ -3297,7 +3543,7 @@ def dfs(v: str) -> Iterator[Set[str]]:
T = TypeVar("T")
-def topsort(data: Dict[T, Set[T]]) -> Iterable[Set[T]]:
+def topsort(data: dict[T, set[T]]) -> Iterable[set[T]]:
"""Topological sort.
Args:
@@ -3338,17 +3584,15 @@ def topsort(data: Dict[T, Set[T]]) -> Iterable[Set[T]]:
if not ready:
break
yield ready
- data = {item: (dep - ready)
- for item, dep in data.items()
- if item not in ready}
- assert not data, "A cyclic dependency exists amongst %r" % data
+ data = {item: (dep - ready) for item, dep in data.items() if item not in ready}
+ assert not data, f"A cyclic dependency exists amongst {data!r}"
def missing_stubs_file(cache_dir: str) -> str:
- return os.path.join(cache_dir, 'missing_stubs')
+ return os.path.join(cache_dir, "missing_stubs")
-def record_missing_stub_packages(cache_dir: str, missing_stub_packages: Set[str]) -> None:
+def record_missing_stub_packages(cache_dir: str, missing_stub_packages: set[str]) -> None:
"""Write a file containing missing stub packages.
This allows a subsequent "mypy --install-types" run (without other arguments)
@@ -3356,9 +3600,19 @@ def record_missing_stub_packages(cache_dir: str, missing_stub_packages: Set[str]
"""
fnam = missing_stubs_file(cache_dir)
if missing_stub_packages:
- with open(fnam, 'w') as f:
+ with open(fnam, "w") as f:
for pkg in sorted(missing_stub_packages):
- f.write('%s\n' % pkg)
+ f.write(f"{pkg}\n")
else:
if os.path.isfile(fnam):
os.remove(fnam)
+
+
+def is_silent_import_module(manager: BuildManager, path: str) -> bool:
+ if manager.options.no_silence_site_packages:
+ return False
+ # Silence errors in site-package dirs and typeshed
+ return any(
+ is_sub_path(path, dir)
+ for dir in manager.search_paths.package_path + manager.search_paths.typeshed_path
+ )
diff --git a/mypy/checker.py b/mypy/checker.py
index e53e306a7e5d..46200f5813cc 100644
--- a/mypy/checker.py
+++ b/mypy/checker.py
@@ -1,121 +1,250 @@
"""Mypy type checker."""
+from __future__ import annotations
+
import itertools
-import fnmatch
from collections import defaultdict
-from contextlib import contextmanager
-
+from contextlib import contextmanager, nullcontext
from typing import (
- Any, Dict, Set, List, cast, Tuple, TypeVar, Union, Optional, NamedTuple, Iterator,
- Iterable, Sequence, Mapping, Generic, AbstractSet, Callable, overload
+ AbstractSet,
+ Callable,
+ Dict,
+ Generic,
+ Iterable,
+ Iterator,
+ Mapping,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ TypeVar,
+ Union,
+ cast,
+ overload,
)
from typing_extensions import Final, TypeAlias as _TypeAlias
-from mypy.backports import nullcontext
-from mypy.errors import Errors, report_internal_error
-from mypy.nodes import (
- SymbolTable, Statement, MypyFile, Var, Expression, Lvalue, Node,
- OverloadedFuncDef, FuncDef, FuncItem, FuncBase, TypeInfo,
- ClassDef, Block, AssignmentStmt, NameExpr, MemberExpr, IndexExpr,
- TupleExpr, ListExpr, ExpressionStmt, ReturnStmt, IfStmt,
- WhileStmt, OperatorAssignmentStmt, WithStmt, AssertStmt,
- RaiseStmt, TryStmt, ForStmt, DelStmt, CallExpr, IntExpr, StrExpr,
- UnicodeExpr, OpExpr, UnaryExpr, LambdaExpr, TempNode, SymbolTableNode,
- Context, Decorator, PrintStmt, BreakStmt, PassStmt, ContinueStmt,
- ComparisonExpr, StarExpr, EllipsisExpr, RefExpr, PromoteExpr,
- Import, ImportFrom, ImportAll, ImportBase, TypeAlias,
- ARG_POS, ARG_STAR, ARG_NAMED, LITERAL_TYPE, LDEF, MDEF, GDEF,
- CONTRAVARIANT, COVARIANT, INVARIANT, TypeVarExpr, AssignmentExpr,
- is_final_node, MatchStmt)
-from mypy import nodes
-from mypy import operators
-from mypy.literals import literal, literal_hash, Key
-from mypy.typeanal import has_any_from_unimported_type, check_for_explicit_any, make_optional_type
-from mypy.types import (
- Type, AnyType, CallableType, FunctionLike, Overloaded, TupleType, TypedDictType,
- Instance, NoneType, strip_type, TypeType, TypeOfAny,
- UnionType, TypeVarId, TypeVarType, PartialType, DeletedType, UninhabitedType,
- is_named_instance, union_items, TypeQuery, LiteralType,
- is_optional, remove_optional, TypeTranslator, StarType, get_proper_type, ProperType,
- get_proper_types, is_literal_type, TypeAliasType, TypeGuardedType, ParamSpecType
-)
-from mypy.sametypes import is_same_type
-from mypy.messages import (
- MessageBuilder, make_inferred_type_note, append_invariance_notes, pretty_seq,
- format_type, format_type_bare, format_type_distinctly, SUGGESTED_TEST_FIXTURES
-)
import mypy.checkexpr
+from mypy import errorcodes as codes, message_registry, nodes, operators
+from mypy.binder import ConditionalTypeBinder, get_declaration
from mypy.checkmember import (
- MemberContext, analyze_member_access, analyze_descriptor_access,
- type_object_type,
+ MemberContext,
analyze_decorator_or_funcbase_access,
+ analyze_descriptor_access,
+ analyze_member_access,
+ type_object_type,
)
from mypy.checkpattern import PatternChecker
-from mypy.semanal_enum import ENUM_BASES, ENUM_SPECIAL_PROPS
-from mypy.typeops import (
- map_type_from_supertype, bind_self, erase_to_bound, make_simplified_union,
- erase_def_to_union_or_bound, erase_to_union_or_bound, coerce_to_literal,
- try_getting_str_literals_from_type, try_getting_int_literals_from_type,
- tuple_fallback, is_singleton_type, try_expanding_sum_type_to_union,
- true_only, false_only, function_type, get_type_vars, custom_special_method,
- is_literal_type_like,
-)
-from mypy import message_registry
-from mypy.message_registry import ErrorMessage
-from mypy.subtypes import (
- is_subtype, is_equivalent, is_proper_subtype, is_more_precise,
- restrict_subtype_away, is_callable_compatible,
- unify_generic_callable, find_member
-)
from mypy.constraints import SUPERTYPE_OF
-from mypy.maptype import map_instance_to_supertype
-from mypy.typevars import fill_typevars, has_no_typevars, fill_typevars_with_any
-from mypy.semanal import set_callable_name, refers_to_fullname
-from mypy.mro import calculate_mro, MroError
-from mypy.erasetype import erase_typevars, remove_instance_last_known_values, erase_type
-from mypy.expandtype import expand_type, expand_type_by_instance
-from mypy.visitor import NodeVisitor
+from mypy.erasetype import erase_type, erase_typevars, remove_instance_last_known_values
+from mypy.errorcodes import TYPE_VAR, UNUSED_AWAITABLE, UNUSED_COROUTINE, ErrorCode
+from mypy.errors import Errors, ErrorWatcher, report_internal_error
+from mypy.expandtype import expand_self_type, expand_type, expand_type_by_instance
from mypy.join import join_types
-from mypy.treetransform import TransformVisitor
-from mypy.binder import ConditionalTypeBinder, get_declaration
+from mypy.literals import Key, literal, literal_hash
+from mypy.maptype import map_instance_to_supertype
from mypy.meet import is_overlapping_erased_types, is_overlapping_types
+from mypy.message_registry import ErrorMessage
+from mypy.messages import (
+ SUGGESTED_TEST_FIXTURES,
+ MessageBuilder,
+ append_invariance_notes,
+ format_type,
+ format_type_bare,
+ format_type_distinctly,
+ make_inferred_type_note,
+ pretty_seq,
+)
+from mypy.mro import MroError, calculate_mro
+from mypy.nodes import (
+ ARG_NAMED,
+ ARG_POS,
+ ARG_STAR,
+ CONTRAVARIANT,
+ COVARIANT,
+ FUNC_NO_INFO,
+ GDEF,
+ IMPLICITLY_ABSTRACT,
+ INVARIANT,
+ IS_ABSTRACT,
+ LDEF,
+ LITERAL_TYPE,
+ MDEF,
+ NOT_ABSTRACT,
+ AssertStmt,
+ AssignmentExpr,
+ AssignmentStmt,
+ Block,
+ BreakStmt,
+ BytesExpr,
+ CallExpr,
+ ClassDef,
+ ComparisonExpr,
+ Context,
+ ContinueStmt,
+ Decorator,
+ DelStmt,
+ EllipsisExpr,
+ Expression,
+ ExpressionStmt,
+ FloatExpr,
+ ForStmt,
+ FuncBase,
+ FuncDef,
+ FuncItem,
+ IfStmt,
+ Import,
+ ImportAll,
+ ImportBase,
+ ImportFrom,
+ IndexExpr,
+ IntExpr,
+ LambdaExpr,
+ ListExpr,
+ Lvalue,
+ MatchStmt,
+ MemberExpr,
+ MypyFile,
+ NameExpr,
+ Node,
+ OperatorAssignmentStmt,
+ OpExpr,
+ OverloadedFuncDef,
+ PassStmt,
+ PromoteExpr,
+ RaiseStmt,
+ RefExpr,
+ ReturnStmt,
+ StarExpr,
+ Statement,
+ StrExpr,
+ SymbolNode,
+ SymbolTable,
+ SymbolTableNode,
+ TempNode,
+ TryStmt,
+ TupleExpr,
+ TypeAlias,
+ TypeInfo,
+ TypeVarExpr,
+ UnaryExpr,
+ Var,
+ WhileStmt,
+ WithStmt,
+ is_final_node,
+)
from mypy.options import Options
-from mypy.plugin import Plugin, CheckerPluginInterface
-from mypy.sharedparse import BINARY_MAGIC_METHODS
+from mypy.plugin import CheckerPluginInterface, Plugin
from mypy.scope import Scope
-from mypy import errorcodes as codes
+from mypy.semanal import is_trivial_body, refers_to_fullname, set_callable_name
+from mypy.semanal_enum import ENUM_BASES, ENUM_SPECIAL_PROPS
+from mypy.sharedparse import BINARY_MAGIC_METHODS
from mypy.state import state
-from mypy.traverser import has_return_statement, all_return_statements
-from mypy.errorcodes import ErrorCode, UNUSED_AWAITABLE, UNUSED_COROUTINE
-from mypy.util import is_typeshed_file, is_dunder, is_sunder
+from mypy.subtypes import (
+ find_member,
+ is_callable_compatible,
+ is_equivalent,
+ is_more_precise,
+ is_proper_subtype,
+ is_same_type,
+ is_subtype,
+ restrict_subtype_away,
+ unify_generic_callable,
+)
+from mypy.traverser import all_return_statements, has_return_statement
+from mypy.treetransform import TransformVisitor
+from mypy.typeanal import check_for_explicit_any, has_any_from_unimported_type, make_optional_type
+from mypy.typeops import (
+ bind_self,
+ coerce_to_literal,
+ custom_special_method,
+ erase_def_to_union_or_bound,
+ erase_to_bound,
+ erase_to_union_or_bound,
+ false_only,
+ fixup_partial_type,
+ function_type,
+ get_type_vars,
+ is_literal_type_like,
+ is_singleton_type,
+ make_simplified_union,
+ map_type_from_supertype,
+ true_only,
+ try_expanding_sum_type_to_union,
+ try_getting_int_literals_from_type,
+ try_getting_str_literals,
+ try_getting_str_literals_from_type,
+ tuple_fallback,
+)
+from mypy.types import (
+ ANY_STRATEGY,
+ OVERLOAD_NAMES,
+ AnyType,
+ BoolTypeQuery,
+ CallableType,
+ DeletedType,
+ ErasedType,
+ FunctionLike,
+ Instance,
+ LiteralType,
+ NoneType,
+ Overloaded,
+ PartialType,
+ ProperType,
+ StarType,
+ TupleType,
+ Type,
+ TypeAliasType,
+ TypedDictType,
+ TypeGuardedType,
+ TypeOfAny,
+ TypeTranslator,
+ TypeType,
+ TypeVarId,
+ TypeVarLikeType,
+ TypeVarType,
+ UnboundType,
+ UninhabitedType,
+ UnionType,
+ flatten_nested_unions,
+ get_proper_type,
+ get_proper_types,
+ is_literal_type,
+ is_named_instance,
+ is_optional,
+ remove_optional,
+ store_argument_type,
+ strip_type,
+)
+from mypy.typetraverser import TypeTraverserVisitor
+from mypy.typevars import fill_typevars, fill_typevars_with_any, has_no_typevars
+from mypy.util import is_dunder, is_sunder, is_typeshed_file
+from mypy.visitor import NodeVisitor
-T = TypeVar('T')
+T = TypeVar("T")
DEFAULT_LAST_PASS: Final = 1 # Pass numbers start at 0
DeferredNodeType: _TypeAlias = Union[FuncDef, LambdaExpr, OverloadedFuncDef, Decorator]
FineGrainedDeferredNodeType: _TypeAlias = Union[FuncDef, MypyFile, OverloadedFuncDef]
+
# A node which is postponed to be processed during the next pass.
# In normal mode one can defer functions and methods (also decorated and/or overloaded)
# and lambda expressions. Nested functions can't be deferred -- only top-level functions
# and methods of classes not defined within a function can be deferred.
-DeferredNode = NamedTuple(
- 'DeferredNode',
- [
- ('node', DeferredNodeType),
- ('active_typeinfo', Optional[TypeInfo]), # And its TypeInfo (for semantic analysis
- # self type handling)
- ])
+class DeferredNode(NamedTuple):
+ node: DeferredNodeType
+ # And its TypeInfo (for semantic analysis self type handling
+ active_typeinfo: TypeInfo | None
+
# Same as above, but for fine-grained mode targets. Only top-level functions/methods
# and module top levels are allowed as such.
-FineGrainedDeferredNode = NamedTuple(
- 'FineGrainedDeferredNode',
- [
- ('node', FineGrainedDeferredNodeType),
- ('active_typeinfo', Optional[TypeInfo]),
- ])
+class FineGrainedDeferredNode(NamedTuple):
+ node: FineGrainedDeferredNodeType
+ active_typeinfo: TypeInfo | None
+
# Data structure returned by find_isinstance_check representing
# information learned from the truth or falsehood of a condition. The
@@ -130,25 +259,23 @@
# (such as two references to the same variable). TODO: it would
# probably be better to have the dict keyed by the nodes' literal_hash
# field instead.
-
TypeMap: _TypeAlias = Optional[Dict[Expression, Type]]
+
# An object that represents either a precise type or a type with an upper bound;
# it is important for correct type inference with isinstance.
-TypeRange = NamedTuple(
- 'TypeRange',
- [
- ('item', Type),
- ('is_upper_bound', bool), # False => precise type
- ])
+class TypeRange(NamedTuple):
+ item: Type
+ is_upper_bound: bool # False => precise type
+
# Keeps track of partial types in a single scope. In fine-grained incremental
# mode partial types initially defined at the top level cannot be completed in
# a function, and we use the 'is_function' attribute to enforce this.
-PartialTypeScope = NamedTuple('PartialTypeScope', [('map', Dict[Var, Context]),
- ('is_function', bool),
- ('is_local', bool),
- ])
+class PartialTypeScope(NamedTuple):
+ map: dict[Var, Context]
+ is_function: bool
+ is_local: bool
class TypeChecker(NodeVisitor[None], CheckerPluginInterface):
@@ -165,8 +292,15 @@ class TypeChecker(NodeVisitor[None], CheckerPluginInterface):
errors: Errors
# Utility for generating messages
msg: MessageBuilder
- # Types of type checked nodes
- type_map: Dict[Expression, Type]
+ # Types of type checked nodes. The first item is the "master" type
+ # map that will store the final, exported types. Additional items
+ # are temporary type maps used during type inference, and these
+ # will be eventually popped and either discarded or merged into
+ # the master type map.
+ #
+ # Avoid accessing this directly, but prefer the lookup_type(),
+ # has_type() etc. helpers instead.
+ _type_maps: list[dict[Expression, Type]]
# Helper for managing conditional types
binder: ConditionalTypeBinder
@@ -176,21 +310,21 @@ class TypeChecker(NodeVisitor[None], CheckerPluginInterface):
pattern_checker: PatternChecker
tscope: Scope
- scope: "CheckerScope"
+ scope: CheckerScope
# Stack of function return types
- return_types: List[Type]
+ return_types: list[Type]
# Flags; true for dynamically typed functions
- dynamic_funcs: List[bool]
+ dynamic_funcs: list[bool]
# Stack of collections of variables with partial types
- partial_types: List[PartialTypeScope]
+ partial_types: list[PartialTypeScope]
# Vars for which partial type errors are already reported
# (to avoid logically duplicate errors with different error context).
- partial_reported: Set[Var]
+ partial_reported: set[Var]
globals: SymbolTable
- modules: Dict[str, MypyFile]
+ modules: dict[str, MypyFile]
# Nodes that couldn't be checked because some types weren't available. We'll run
# another pass and try these again.
- deferred_nodes: List[DeferredNode]
+ deferred_nodes: list[DeferredNode]
# Type checking pass number (0 = first pass)
pass_num = 0
# Last pass number to take
@@ -200,18 +334,16 @@ class TypeChecker(NodeVisitor[None], CheckerPluginInterface):
current_node_deferred = False
# Is this file a typeshed stub?
is_typeshed_stub = False
- # Should strict Optional-related errors be suppressed in this file?
- suppress_none_errors = False # TODO: Get it from options instead
options: Options
# Used for collecting inferred attribute types so that they can be checked
# for consistency.
- inferred_attribute_types: Optional[Dict[Var, Type]] = None
+ inferred_attribute_types: dict[Var, Type] | None = None
# Don't infer partial None types if we are processing assignment from Union
no_partial_types: bool = False
# The set of all dependencies (suppressed or not) that this module accesses, either
# directly or indirectly.
- module_refs: Set[str]
+ module_refs: set[str]
# A map from variable nodes to a snapshot of the frame ids of the
# frames that were active when the variable was declared. This can
@@ -219,14 +351,22 @@ class TypeChecker(NodeVisitor[None], CheckerPluginInterface):
# declaration and the current frame, which lets us determine if it
# was declared in a different branch of the same `if` statement
# (if that frame is a conditional_frame).
- var_decl_frames: Dict[Var, Set[int]]
+ var_decl_frames: dict[Var, set[int]]
# Plugin that provides special type checking rules for specific library
# functions such as open(), etc.
plugin: Plugin
- def __init__(self, errors: Errors, modules: Dict[str, MypyFile], options: Options,
- tree: MypyFile, path: str, plugin: Plugin) -> None:
+ def __init__(
+ self,
+ errors: Errors,
+ modules: dict[str, MypyFile],
+ options: Options,
+ tree: MypyFile,
+ path: str,
+ plugin: Plugin,
+ per_line_checking_time_ns: dict[int, int],
+ ) -> None:
"""Construct a type checker.
Use errors to report type check errors.
@@ -238,8 +378,6 @@ def __init__(self, errors: Errors, modules: Dict[str, MypyFile], options: Option
self.path = path
self.msg = MessageBuilder(errors, modules)
self.plugin = plugin
- self.expr_checker = mypy.checkexpr.ExpressionChecker(self, self.msg, self.plugin)
- self.pattern_checker = PatternChecker(self, self.msg, self.plugin)
self.tscope = Scope()
self.scope = CheckerScope(tree)
self.binder = ConditionalTypeBinder()
@@ -250,19 +388,14 @@ def __init__(self, errors: Errors, modules: Dict[str, MypyFile], options: Option
self.partial_reported = set()
self.var_decl_frames = {}
self.deferred_nodes = []
- self.type_map = {}
+ self._type_maps = [{}]
self.module_refs = set()
self.pass_num = 0
self.current_node_deferred = False
self.is_stub = tree.is_stub
- self.is_typeshed_stub = is_typeshed_file(path)
+ self.is_typeshed_stub = is_typeshed_file(options.abs_custom_typeshed_dir, path)
self.inferred_attribute_types = None
- if options.strict_optional_whitelist is None:
- self.suppress_none_errors = not options.show_none_errors
- else:
- self.suppress_none_errors = not any(fnmatch.fnmatch(path, pattern)
- for pattern
- in options.strict_optional_whitelist)
+
# If True, process function definitions. If False, don't. This is used
# for processing module top levels in fine-grained incremental mode.
self.recurse_into_functions = True
@@ -273,8 +406,23 @@ def __init__(self, errors: Errors, modules: Dict[str, MypyFile], options: Option
# argument through various `checker` and `checkmember` functions.
self._is_final_def = False
+ # This flag is set when we run type-check or attribute access check for the purpose
+ # of giving a note on possibly missing "await". It is used to avoid infinite recursion.
+ self.checking_missing_await = False
+
+ # While this is True, allow passing an abstract class where Type[T] is expected.
+ # although this is technically unsafe, this is desirable in some context, for
+ # example when type-checking class decorators.
+ self.allow_abstract_call = False
+
+ # Child checker objects for specific AST node types
+ self.expr_checker = mypy.checkexpr.ExpressionChecker(
+ self, self.msg, self.plugin, per_line_checking_time_ns
+ )
+ self.pattern_checker = PatternChecker(self, self.msg, self.plugin)
+
@property
- def type_context(self) -> List[Optional[Type]]:
+ def type_context(self) -> list[Type | None]:
return self.expr_checker.type_context
def reset(self) -> None:
@@ -287,7 +435,10 @@ def reset(self) -> None:
self.partial_reported.clear()
self.module_refs.clear()
self.binder = ConditionalTypeBinder()
- self.type_map.clear()
+ self._type_maps[1:] = []
+ self._type_maps[0].clear()
+ self.temp_type_map = None
+ self.expr_checker.reset()
assert self.inferred_attribute_types is None
assert self.partial_types == []
@@ -307,37 +458,39 @@ def check_first_pass(self) -> None:
"""
self.recurse_into_functions = True
with state.strict_optional_set(self.options.strict_optional):
- self.errors.set_file(self.path, self.tree.fullname, scope=self.tscope)
+ self.errors.set_file(
+ self.path, self.tree.fullname, scope=self.tscope, options=self.options
+ )
with self.tscope.module_scope(self.tree.fullname):
with self.enter_partial_types(), self.binder.top_frame_context():
for d in self.tree.defs:
- if (self.binder.is_unreachable()
- and self.should_report_unreachable_issues()
- and not self.is_raising_or_empty(d)):
+ if (
+ self.binder.is_unreachable()
+ and self.should_report_unreachable_issues()
+ and not self.is_raising_or_empty(d)
+ ):
self.msg.unreachable_statement(d)
break
self.accept(d)
assert not self.current_node_deferred
- all_ = self.globals.get('__all__')
+ all_ = self.globals.get("__all__")
if all_ is not None and all_.type is not None:
all_node = all_.node
assert all_node is not None
- seq_str = self.named_generic_type('typing.Sequence',
- [self.named_type('builtins.str')])
- if self.options.python_version[0] < 3:
- seq_str = self.named_generic_type('typing.Sequence',
- [self.named_type('builtins.unicode')])
+ seq_str = self.named_generic_type(
+ "typing.Sequence", [self.named_type("builtins.str")]
+ )
if not is_subtype(all_.type, seq_str):
str_seq_s, all_s = format_type_distinctly(seq_str, all_.type)
- self.fail(message_registry.ALL_MUST_BE_SEQ_STR.format(str_seq_s, all_s),
- all_node)
+ self.fail(
+ message_registry.ALL_MUST_BE_SEQ_STR.format(str_seq_s, all_s), all_node
+ )
- def check_second_pass(self,
- todo: Optional[Sequence[Union[DeferredNode,
- FineGrainedDeferredNode]]] = None
- ) -> bool:
+ def check_second_pass(
+ self, todo: Sequence[DeferredNode | FineGrainedDeferredNode] | None = None
+ ) -> bool:
"""Run second or following pass of type checking.
This goes through deferred nodes, returning True if there were any.
@@ -346,7 +499,9 @@ def check_second_pass(self,
with state.strict_optional_set(self.options.strict_optional):
if not todo and not self.deferred_nodes:
return False
- self.errors.set_file(self.path, self.tree.fullname, scope=self.tscope)
+ self.errors.set_file(
+ self.path, self.tree.fullname, scope=self.tscope, options=self.options
+ )
with self.tscope.module_scope(self.tree.fullname):
self.pass_num += 1
if not todo:
@@ -354,7 +509,7 @@ def check_second_pass(self,
else:
assert not self.deferred_nodes
self.deferred_nodes = []
- done: Set[Union[DeferredNodeType, FineGrainedDeferredNodeType]] = set()
+ done: set[DeferredNodeType | FineGrainedDeferredNodeType] = set()
for node, active_typeinfo in todo:
if node in done:
continue
@@ -362,14 +517,16 @@ def check_second_pass(self,
# print("XXX in pass %d, class %s, function %s" %
# (self.pass_num, type_name, node.fullname or node.name))
done.add(node)
- with self.tscope.class_scope(active_typeinfo) if active_typeinfo \
- else nullcontext():
- with self.scope.push_class(active_typeinfo) if active_typeinfo \
- else nullcontext():
+ with self.tscope.class_scope(
+ active_typeinfo
+ ) if active_typeinfo else nullcontext():
+ with self.scope.push_class(
+ active_typeinfo
+ ) if active_typeinfo else nullcontext():
self.check_partial(node)
return True
- def check_partial(self, node: Union[DeferredNodeType, FineGrainedDeferredNodeType]) -> None:
+ def check_partial(self, node: DeferredNodeType | FineGrainedDeferredNodeType) -> None:
if isinstance(node, MypyFile):
self.check_top_level(node)
else:
@@ -390,7 +547,7 @@ def check_top_level(self, node: MypyFile) -> None:
assert not self.current_node_deferred
# TODO: Handle __all__
- def defer_node(self, node: DeferredNodeType, enclosing_class: Optional[TypeInfo]) -> None:
+ def defer_node(self, node: DeferredNodeType, enclosing_class: TypeInfo | None) -> None:
"""Defer a node for processing during next type-checking pass.
Args:
@@ -426,8 +583,13 @@ def accept(self, stmt: Statement) -> None:
except Exception as err:
report_internal_error(err, self.errors.file, stmt.line, self.errors, self.options)
- def accept_loop(self, body: Statement, else_body: Optional[Statement] = None, *,
- exit_condition: Optional[Expression] = None) -> None:
+ def accept_loop(
+ self,
+ body: Statement,
+ else_body: Statement | None = None,
+ *,
+ exit_condition: Expression | None = None,
+ ) -> None:
"""Repeatedly type check a loop body until the frame doesn't change.
If exit_condition is set, assume it must be False on exit from the loop.
@@ -436,8 +598,7 @@ def accept_loop(self, body: Statement, else_body: Optional[Statement] = None, *,
# The outer frame accumulates the results of all iterations
with self.binder.frame_context(can_skip=False, conditional_frame=True):
while True:
- with self.binder.frame_context(can_skip=True,
- break_frame=2, continue_frame=1):
+ with self.binder.frame_context(can_skip=True, break_frame=2, continue_frame=1):
self.accept(body)
if not self.binder.last_pop_changed:
break
@@ -471,8 +632,8 @@ def _visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
self.visit_decorator(cast(Decorator, defn.items[0]))
for fdef in defn.items:
assert isinstance(fdef, Decorator)
- self.check_func_item(fdef.func, name=fdef.func.name)
- if fdef.func.is_abstract:
+ self.check_func_item(fdef.func, name=fdef.func.name, allow_empty=True)
+ if fdef.func.abstract_status in (IS_ABSTRACT, IMPLICITLY_ABSTRACT):
num_abstract += 1
if num_abstract not in (0, len(defn.items)):
self.fail(message_registry.INCONSISTENT_ABSTRACT_OVERLOAD, defn)
@@ -497,10 +658,10 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None:
return
# Compute some info about the implementation (if it exists) for use below
- impl_type: Optional[CallableType] = None
+ impl_type: CallableType | None = None
if defn.impl:
if isinstance(defn.impl, FuncDef):
- inner_type: Optional[Type] = defn.impl.type
+ inner_type: Type | None = defn.impl.type
elif isinstance(defn.impl, Decorator):
inner_type = defn.impl.var.type
else:
@@ -515,7 +676,7 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None:
elif isinstance(inner_type, Instance):
inner_call = get_proper_type(
analyze_member_access(
- name='__call__',
+ name="__call__",
typ=inner_type,
context=defn.impl,
is_lvalue=False,
@@ -524,7 +685,7 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None:
msg=self.msg,
original_type=inner_type,
chk=self,
- ),
+ )
)
if isinstance(inner_call, CallableType):
impl_type = inner_call
@@ -538,7 +699,7 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None:
sig1 = self.function_type(item.func)
assert isinstance(sig1, CallableType)
- for j, item2 in enumerate(defn.items[i + 1:]):
+ for j, item2 in enumerate(defn.items[i + 1 :]):
assert isinstance(item2, Decorator)
sig2 = self.function_type(item2.func)
assert isinstance(sig2, CallableType)
@@ -547,8 +708,7 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None:
continue
if overload_can_never_match(sig1, sig2):
- self.msg.overloaded_signature_will_never_match(
- i + 1, i + j + 2, item2.func)
+ self.msg.overloaded_signature_will_never_match(i + 1, i + j + 2, item2.func)
elif not is_descriptor_get:
# Note: we force mypy to check overload signatures in strict-optional mode
# so we don't incorrectly report errors when a user tries typing an overload
@@ -565,8 +725,7 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None:
# See Python 2's map function for a concrete example of this kind of overload.
with state.strict_optional_set(True):
if is_unsafe_overlapping_overload_signatures(sig1, sig2):
- self.msg.overloaded_signatures_overlap(
- i + 1, i + j + 2, item.func)
+ self.msg.overloaded_signatures_overlap(i + 1, i + j + 2, item.func)
if impl_type is not None:
assert defn.impl is not None
@@ -580,9 +739,13 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None:
# This is to match the direction the implementation's return
# needs to be compatible in.
if impl_type.variables:
- impl = unify_generic_callable(impl_type, sig1,
- ignore_return=False,
- return_constraint_direction=SUPERTYPE_OF)
+ impl: CallableType | None = unify_generic_callable(
+ # Normalize both before unifying
+ impl_type.with_unpacked_kwargs(),
+ sig1.with_unpacked_kwargs(),
+ ignore_return=False,
+ return_constraint_direction=SUPERTYPE_OF,
+ )
if impl is None:
self.msg.overloaded_signatures_typevar_specific(i + 1, defn.impl)
continue
@@ -595,14 +758,16 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None:
impl = impl.copy_modified(arg_types=[sig1.arg_types[0]] + impl.arg_types[1:])
# Is the overload alternative's arguments subtypes of the implementation's?
- if not is_callable_compatible(impl, sig1,
- is_compat=is_subtype_no_promote,
- ignore_return=True):
+ if not is_callable_compatible(
+ impl, sig1, is_compat=is_subtype, ignore_return=True
+ ):
self.msg.overloaded_signatures_arg_specific(i + 1, defn.impl)
# Is the overload alternative's return type a subtype of the implementation's?
- if not (is_subtype_no_promote(sig1.ret_type, impl.ret_type) or
- is_subtype_no_promote(impl.ret_type, sig1.ret_type)):
+ if not (
+ is_subtype(sig1.ret_type, impl.ret_type)
+ or is_subtype(impl.ret_type, sig1.ret_type)
+ ):
self.msg.overloaded_signatures_ret_specific(i + 1, defn.impl)
# Here's the scoop about generators and coroutines.
@@ -657,15 +822,15 @@ def is_generator_return_type(self, typ: Type, is_coroutine: bool) -> bool:
typ = get_proper_type(typ)
if is_coroutine:
# This means we're in Python 3.5 or later.
- at = self.named_generic_type('typing.Awaitable', [AnyType(TypeOfAny.special_form)])
+ at = self.named_generic_type("typing.Awaitable", [AnyType(TypeOfAny.special_form)])
if is_subtype(at, typ):
return True
else:
any_type = AnyType(TypeOfAny.special_form)
- gt = self.named_generic_type('typing.Generator', [any_type, any_type, any_type])
+ gt = self.named_generic_type("typing.Generator", [any_type, any_type, any_type])
if is_subtype(gt, typ):
return True
- return isinstance(typ, Instance) and typ.type.fullname == 'typing.AwaitableGenerator'
+ return isinstance(typ, Instance) and typ.type.fullname == "typing.AwaitableGenerator"
def is_async_generator_return_type(self, typ: Type) -> bool:
"""Is `typ` a valid type for an async generator?
@@ -674,7 +839,7 @@ def is_async_generator_return_type(self, typ: Type) -> bool:
"""
try:
any_type = AnyType(TypeOfAny.special_form)
- agt = self.named_generic_type('typing.AsyncGenerator', [any_type, any_type])
+ agt = self.named_generic_type("typing.AsyncGenerator", [any_type, any_type])
except KeyError:
# we're running on a version of typing that doesn't have AsyncGenerator yet
return False
@@ -686,15 +851,20 @@ def get_generator_yield_type(self, return_type: Type, is_coroutine: bool) -> Typ
if isinstance(return_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=return_type)
- elif (not self.is_generator_return_type(return_type, is_coroutine)
- and not self.is_async_generator_return_type(return_type)):
+ elif isinstance(return_type, UnionType):
+ return make_simplified_union(
+ [self.get_generator_yield_type(item, is_coroutine) for item in return_type.items]
+ )
+ elif not self.is_generator_return_type(
+ return_type, is_coroutine
+ ) and not self.is_async_generator_return_type(return_type):
# If the function doesn't have a proper Generator (or
# Awaitable) return type, anything is permissible.
return AnyType(TypeOfAny.from_error)
elif not isinstance(return_type, Instance):
# Same as above, but written as a separate branch so the typechecker can understand.
return AnyType(TypeOfAny.from_error)
- elif return_type.type.fullname == 'typing.Awaitable':
+ elif return_type.type.fullname == "typing.Awaitable":
# Awaitable: ty is Any.
return AnyType(TypeOfAny.special_form)
elif return_type.args:
@@ -715,22 +885,29 @@ def get_generator_receive_type(self, return_type: Type, is_coroutine: bool) -> T
if isinstance(return_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=return_type)
- elif (not self.is_generator_return_type(return_type, is_coroutine)
- and not self.is_async_generator_return_type(return_type)):
+ elif isinstance(return_type, UnionType):
+ return make_simplified_union(
+ [self.get_generator_receive_type(item, is_coroutine) for item in return_type.items]
+ )
+ elif not self.is_generator_return_type(
+ return_type, is_coroutine
+ ) and not self.is_async_generator_return_type(return_type):
# If the function doesn't have a proper Generator (or
# Awaitable) return type, anything is permissible.
return AnyType(TypeOfAny.from_error)
elif not isinstance(return_type, Instance):
# Same as above, but written as a separate branch so the typechecker can understand.
return AnyType(TypeOfAny.from_error)
- elif return_type.type.fullname == 'typing.Awaitable':
+ elif return_type.type.fullname == "typing.Awaitable":
# Awaitable, AwaitableGenerator: tc is Any.
return AnyType(TypeOfAny.special_form)
- elif (return_type.type.fullname in ('typing.Generator', 'typing.AwaitableGenerator')
- and len(return_type.args) >= 3):
+ elif (
+ return_type.type.fullname in ("typing.Generator", "typing.AwaitableGenerator")
+ and len(return_type.args) >= 3
+ ):
# Generator: tc is args[1].
return return_type.args[1]
- elif return_type.type.fullname == 'typing.AsyncGenerator' and len(return_type.args) >= 2:
+ elif return_type.type.fullname == "typing.AsyncGenerator" and len(return_type.args) >= 2:
return return_type.args[1]
else:
# `return_type` is a supertype of Generator, so callers won't be able to send it
@@ -751,6 +928,10 @@ def get_generator_return_type(self, return_type: Type, is_coroutine: bool) -> Ty
if isinstance(return_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=return_type)
+ elif isinstance(return_type, UnionType):
+ return make_simplified_union(
+ [self.get_generator_return_type(item, is_coroutine) for item in return_type.items]
+ )
elif not self.is_generator_return_type(return_type, is_coroutine):
# If the function doesn't have a proper Generator (or
# Awaitable) return type, anything is permissible.
@@ -758,11 +939,13 @@ def get_generator_return_type(self, return_type: Type, is_coroutine: bool) -> Ty
elif not isinstance(return_type, Instance):
# Same as above, but written as a separate branch so the typechecker can understand.
return AnyType(TypeOfAny.from_error)
- elif return_type.type.fullname == 'typing.Awaitable' and len(return_type.args) == 1:
+ elif return_type.type.fullname == "typing.Awaitable" and len(return_type.args) == 1:
# Awaitable: tr is args[0].
return return_type.args[0]
- elif (return_type.type.fullname in ('typing.Generator', 'typing.AwaitableGenerator')
- and len(return_type.args) >= 3):
+ elif (
+ return_type.type.fullname in ("typing.Generator", "typing.AwaitableGenerator")
+ and len(return_type.args) >= 3
+ ):
# AwaitableGenerator, Generator: tr is args[2].
return return_type.args[2]
else:
@@ -791,16 +974,16 @@ def _visit_func_def(self, defn: FuncDef) -> None:
new_type = self.function_type(defn)
if isinstance(defn.original_def, FuncDef):
# Function definition overrides function definition.
- if not is_same_type(new_type, self.function_type(defn.original_def)):
- self.msg.incompatible_conditional_function_def(defn)
+ old_type = self.function_type(defn.original_def)
+ if not is_same_type(new_type, old_type):
+ self.msg.incompatible_conditional_function_def(defn, old_type, new_type)
else:
# Function definition overrides a variable initialized via assignment or a
# decorated function.
orig_type = defn.original_def.type
if orig_type is None:
- # XXX This can be None, as happens in
- # test_testcheck_TypeCheckSuite.testRedefinedFunctionInTryWithElse
- self.msg.note("Internal mypy error checking function redefinition", defn)
+ # If other branch is unreachable, we don't type check it and so we might
+ # not have a type for the original definition
return
if isinstance(orig_type, PartialType):
if orig_type.type is None:
@@ -818,15 +1001,25 @@ def _visit_func_def(self, defn: FuncDef) -> None:
# Trying to redefine something like partial empty list as function.
self.fail(message_registry.INCOMPATIBLE_REDEFINITION, defn)
else:
- # TODO: Update conditional type binder.
- self.check_subtype(new_type, orig_type, defn,
- message_registry.INCOMPATIBLE_REDEFINITION,
- 'redefinition with type',
- 'original type')
-
- def check_func_item(self, defn: FuncItem,
- type_override: Optional[CallableType] = None,
- name: Optional[str] = None) -> None:
+ name_expr = NameExpr(defn.name)
+ name_expr.node = defn.original_def
+ self.binder.assign_type(name_expr, new_type, orig_type)
+ self.check_subtype(
+ new_type,
+ orig_type,
+ defn,
+ message_registry.INCOMPATIBLE_REDEFINITION,
+ "redefinition with type",
+ "original type",
+ )
+
+ def check_func_item(
+ self,
+ defn: FuncItem,
+ type_override: CallableType | None = None,
+ name: str | None = None,
+ allow_empty: bool = False,
+ ) -> None:
"""Type check a function.
If type_override is provided, use it as the function type.
@@ -839,14 +1032,14 @@ def check_func_item(self, defn: FuncItem,
typ = type_override.copy_modified(line=typ.line, column=typ.column)
if isinstance(typ, CallableType):
with self.enter_attribute_inference_context():
- self.check_func_def(defn, typ, name)
+ self.check_func_def(defn, typ, name, allow_empty)
else:
- raise RuntimeError('Not supported')
+ raise RuntimeError("Not supported")
self.dynamic_funcs.pop()
self.current_node_deferred = False
- if name == '__exit__':
+ if name == "__exit__":
self.check__exit__return_type(defn)
@contextmanager
@@ -856,7 +1049,9 @@ def enter_attribute_inference_context(self) -> Iterator[None]:
yield None
self.inferred_attribute_types = old_types
- def check_func_def(self, defn: FuncItem, typ: CallableType, name: Optional[str]) -> None:
+ def check_func_def(
+ self, defn: FuncItem, typ: CallableType, name: str | None, allow_empty: bool = False
+ ) -> None:
"""Type check a function definition."""
# Expand type variables with value restrictions to ordinary types.
expanded = self.expand_typevars(defn, typ)
@@ -871,15 +1066,21 @@ def check_func_def(self, defn: FuncItem, typ: CallableType, name: Optional[str])
# precise type.
if isinstance(item, FuncDef):
fdef = item
- # Check if __init__ has an invalid, non-None return type.
- if (fdef.info and fdef.name in ('__init__', '__init_subclass__') and
- not isinstance(get_proper_type(typ.ret_type), NoneType) and
- not self.dynamic_funcs[-1]):
- self.fail(message_registry.MUST_HAVE_NONE_RETURN_TYPE.format(fdef.name),
- item)
+ # Check if __init__ has an invalid return type.
+ if (
+ fdef.info
+ and fdef.name in ("__init__", "__init_subclass__")
+ and not isinstance(
+ get_proper_type(typ.ret_type), (NoneType, UninhabitedType)
+ )
+ and not self.dynamic_funcs[-1]
+ ):
+ self.fail(
+ message_registry.MUST_HAVE_NONE_RETURN_TYPE.format(fdef.name), item
+ )
# Check validity of __new__ signature
- if fdef.info and fdef.name == '__new__':
+ if fdef.info and fdef.name == "__new__":
self.check___new___signature(fdef, typ)
self.check_for_missing_annotations(fdef)
@@ -890,44 +1091,39 @@ def check_func_def(self, defn: FuncItem, typ: CallableType, name: Optional[str])
self.msg.unimported_type_becomes_any("Return type", ret_type, fdef)
for idx, arg_type in enumerate(fdef.type.arg_types):
if has_any_from_unimported_type(arg_type):
- prefix = "Argument {} to \"{}\"".format(idx + 1, fdef.name)
+ prefix = f'Argument {idx + 1} to "{fdef.name}"'
self.msg.unimported_type_becomes_any(prefix, arg_type, fdef)
- check_for_explicit_any(fdef.type, self.options, self.is_typeshed_stub,
- self.msg, context=fdef)
+ check_for_explicit_any(
+ fdef.type, self.options, self.is_typeshed_stub, self.msg, context=fdef
+ )
if name: # Special method names
if defn.info and self.is_reverse_op_method(name):
self.check_reverse_op_method(item, typ, name, defn)
- elif name in ('__getattr__', '__getattribute__'):
+ elif name in ("__getattr__", "__getattribute__"):
self.check_getattr_method(typ, defn, name)
- elif name == '__setattr__':
+ elif name == "__setattr__":
self.check_setattr_method(typ, defn)
# Refuse contravariant return type variable
if isinstance(typ.ret_type, TypeVarType):
if typ.ret_type.variance == CONTRAVARIANT:
- self.fail(message_registry.RETURN_TYPE_CANNOT_BE_CONTRAVARIANT,
- typ.ret_type)
+ self.fail(
+ message_registry.RETURN_TYPE_CANNOT_BE_CONTRAVARIANT, typ.ret_type
+ )
+ self.check_unbound_return_typevar(typ)
# Check that Generator functions have the appropriate return type.
if defn.is_generator:
if defn.is_async_generator:
if not self.is_async_generator_return_type(typ.ret_type):
- self.fail(message_registry.INVALID_RETURN_TYPE_FOR_ASYNC_GENERATOR,
- typ)
+ self.fail(
+ message_registry.INVALID_RETURN_TYPE_FOR_ASYNC_GENERATOR, typ
+ )
else:
if not self.is_generator_return_type(typ.ret_type, defn.is_coroutine):
self.fail(message_registry.INVALID_RETURN_TYPE_FOR_GENERATOR, typ)
- # Python 2 generators aren't allowed to return values.
- orig_ret_type = get_proper_type(typ.ret_type)
- if (self.options.python_version[0] == 2 and
- isinstance(orig_ret_type, Instance) and
- orig_ret_type.type.fullname == 'typing.Generator'):
- if not isinstance(get_proper_type(orig_ret_type.args[2]),
- (NoneType, AnyType)):
- self.fail(message_registry.INVALID_GENERATOR_RETURN_ITEM_TYPE, typ)
-
# Fix the type if decorated with `@types.coroutine` or `@asyncio.coroutine`.
if defn.is_awaitable_coroutine:
# Update the return type to AwaitableGenerator.
@@ -940,8 +1136,9 @@ def check_func_def(self, defn: FuncItem, typ: CallableType, name: Optional[str])
tr = self.get_coroutine_return_type(t)
else:
tr = self.get_generator_return_type(t, c)
- ret_type = self.named_generic_type('typing.AwaitableGenerator',
- [ty, tc, tr, t])
+ ret_type = self.named_generic_type(
+ "typing.AwaitableGenerator", [ty, tc, tr, t]
+ )
typ = typ.copy_modified(ret_type=ret_type)
defn.type = typ
@@ -954,63 +1151,54 @@ def check_func_def(self, defn: FuncItem, typ: CallableType, name: Optional[str])
with self.scope.push_function(defn):
# We temporary push the definition to get the self type as
# visible from *inside* of this function/method.
- ref_type: Optional[Type] = self.scope.active_self_type()
- if (isinstance(defn, FuncDef) and ref_type is not None and i == 0
- and not defn.is_static
- and typ.arg_kinds[0] not in [nodes.ARG_STAR, nodes.ARG_STAR2]):
- isclass = defn.is_class or defn.name in ('__new__', '__init_subclass__')
+ ref_type: Type | None = self.scope.active_self_type()
+ if (
+ isinstance(defn, FuncDef)
+ and ref_type is not None
+ and i == 0
+ and not defn.is_static
+ and typ.arg_kinds[0] not in [nodes.ARG_STAR, nodes.ARG_STAR2]
+ ):
+ isclass = defn.is_class or defn.name in ("__new__", "__init_subclass__")
if isclass:
ref_type = mypy.types.TypeType.make_normalized(ref_type)
erased = get_proper_type(erase_to_bound(arg_type))
if not is_subtype(ref_type, erased, ignore_type_params=True):
- note = None
- if (isinstance(erased, Instance) and erased.type.is_protocol or
- isinstance(erased, TypeType) and
- isinstance(erased.item, Instance) and
- erased.item.type.is_protocol):
+ if (
+ isinstance(erased, Instance)
+ and erased.type.is_protocol
+ or isinstance(erased, TypeType)
+ and isinstance(erased.item, Instance)
+ and erased.item.type.is_protocol
+ ):
# We allow the explicit self-type to be not a supertype of
# the current class if it is a protocol. For such cases
# the consistency check will be performed at call sites.
msg = None
- elif typ.arg_names[i] in {'self', 'cls'}:
- if (self.options.python_version[0] < 3
- and is_same_type(erased, arg_type) and not isclass):
- msg = message_registry.INVALID_SELF_TYPE_OR_EXTRA_ARG
- note = '(Hint: typically annotations omit the type for self)'
- else:
- msg = message_registry.ERASED_SELF_TYPE_NOT_SUPERTYPE.format(
- erased, ref_type)
+ elif typ.arg_names[i] in {"self", "cls"}:
+ msg = message_registry.ERASED_SELF_TYPE_NOT_SUPERTYPE.format(
+ erased, ref_type
+ )
else:
msg = message_registry.MISSING_OR_INVALID_SELF_TYPE
if msg:
self.fail(msg, defn)
- if note:
- self.note(note, defn)
elif isinstance(arg_type, TypeVarType):
# Refuse covariant parameter type variables
# TODO: check recursively for inner type variables
- if (
- arg_type.variance == COVARIANT and
- defn.name not in ('__init__', '__new__')
+ if arg_type.variance == COVARIANT and defn.name not in (
+ "__init__",
+ "__new__",
):
ctx: Context = arg_type
if ctx.line < 0:
ctx = typ
self.fail(message_registry.FUNCTION_PARAMETER_CANNOT_BE_COVARIANT, ctx)
- if typ.arg_kinds[i] == nodes.ARG_STAR:
- if not isinstance(arg_type, ParamSpecType):
- # builtins.tuple[T] is typing.Tuple[T, ...]
- arg_type = self.named_generic_type('builtins.tuple',
- [arg_type])
- elif typ.arg_kinds[i] == nodes.ARG_STAR2:
- if not isinstance(arg_type, ParamSpecType):
- arg_type = self.named_generic_type('builtins.dict',
- [self.str_type(),
- arg_type])
- item.arguments[i].variable.type = arg_type
+ # Need to store arguments again for the expanded item.
+ store_argument_type(item, i, typ, self.named_generic_type)
# Type check initialization expressions.
- body_is_trivial = self.is_trivial_body(defn.body)
+ body_is_trivial = is_trivial_body(defn.body)
self.check_default_args(item, body_is_trivial)
# Type check body in a new scope.
@@ -1027,31 +1215,117 @@ def check_func_def(self, defn: FuncItem, typ: CallableType, name: Optional[str])
self.accept(item.body)
unreachable = self.binder.is_unreachable()
- if self.options.warn_no_return and not unreachable:
- if (defn.is_generator or
- is_named_instance(self.return_types[-1], 'typing.AwaitableGenerator')):
- return_type = self.get_generator_return_type(self.return_types[-1],
- defn.is_coroutine)
+ if not unreachable:
+ if defn.is_generator or is_named_instance(
+ self.return_types[-1], "typing.AwaitableGenerator"
+ ):
+ return_type = self.get_generator_return_type(
+ self.return_types[-1], defn.is_coroutine
+ )
elif defn.is_coroutine:
return_type = self.get_coroutine_return_type(self.return_types[-1])
else:
return_type = self.return_types[-1]
-
return_type = get_proper_type(return_type)
- if not isinstance(return_type, (NoneType, AnyType)) and not body_is_trivial:
- # Control flow fell off the end of a function that was
- # declared to return a non-None type and is not
- # entirely pass/Ellipsis/raise NotImplementedError.
- if isinstance(return_type, UninhabitedType):
- # This is a NoReturn function
- self.fail(message_registry.INVALID_IMPLICIT_RETURN, defn)
- else:
- self.fail(message_registry.MISSING_RETURN_STATEMENT, defn)
+
+ allow_empty = allow_empty or self.options.allow_empty_bodies
+
+ show_error = (
+ not body_is_trivial
+ or
+ # Allow empty bodies for abstract methods, overloads, in tests and stubs.
+ (
+ not allow_empty
+ and not (
+ isinstance(defn, FuncDef) and defn.abstract_status != NOT_ABSTRACT
+ )
+ and not self.is_stub
+ )
+ )
+
+ # Ignore plugin generated methods, these usually don't need any bodies.
+ if defn.info is not FUNC_NO_INFO and (
+ defn.name not in defn.info.names or defn.info.names[defn.name].plugin_generated
+ ):
+ show_error = False
+
+ # Ignore also definitions that appear in `if TYPE_CHECKING: ...` blocks.
+ # These can't be called at runtime anyway (similar to plugin-generated).
+ if isinstance(defn, FuncDef) and defn.is_mypy_only:
+ show_error = False
+
+ # We want to minimize the fallout from checking empty bodies
+ # that was absent in many mypy versions.
+ if body_is_trivial and is_subtype(NoneType(), return_type):
+ show_error = False
+
+ may_be_abstract = (
+ body_is_trivial
+ and defn.info is not FUNC_NO_INFO
+ and defn.info.metaclass_type is not None
+ and defn.info.metaclass_type.type.has_base("abc.ABCMeta")
+ )
+
+ if self.options.warn_no_return:
+ if (
+ not self.current_node_deferred
+ and not isinstance(return_type, (NoneType, AnyType))
+ and show_error
+ ):
+ # Control flow fell off the end of a function that was
+ # declared to return a non-None type.
+ if isinstance(return_type, UninhabitedType):
+ # This is a NoReturn function
+ msg = message_registry.INVALID_IMPLICIT_RETURN
+ else:
+ msg = message_registry.MISSING_RETURN_STATEMENT
+ if body_is_trivial:
+ msg = msg._replace(code=codes.EMPTY_BODY)
+ self.fail(msg, defn)
+ if may_be_abstract:
+ self.note(message_registry.EMPTY_BODY_ABSTRACT, defn)
+ elif show_error:
+ msg = message_registry.INCOMPATIBLE_RETURN_VALUE_TYPE
+ if body_is_trivial:
+ msg = msg._replace(code=codes.EMPTY_BODY)
+ # similar to code in check_return_stmt
+ if (
+ not self.check_subtype(
+ subtype_label="implicitly returns",
+ subtype=NoneType(),
+ supertype_label="expected",
+ supertype=return_type,
+ context=defn,
+ msg=msg,
+ )
+ and may_be_abstract
+ ):
+ self.note(message_registry.EMPTY_BODY_ABSTRACT, defn)
self.return_types.pop()
self.binder = old_binder
+ def check_unbound_return_typevar(self, typ: CallableType) -> None:
+ """Fails when the return typevar is not defined in arguments."""
+ if isinstance(typ.ret_type, TypeVarType) and typ.ret_type in typ.variables:
+ arg_type_visitor = CollectArgTypeVarTypes()
+ for argtype in typ.arg_types:
+ argtype.accept(arg_type_visitor)
+
+ if typ.ret_type not in arg_type_visitor.arg_types:
+ self.fail(message_registry.UNBOUND_TYPEVAR, typ.ret_type, code=TYPE_VAR)
+ upper_bound = get_proper_type(typ.ret_type.upper_bound)
+ if not (
+ isinstance(upper_bound, Instance)
+ and upper_bound.type.fullname == "builtins.object"
+ ):
+ self.note(
+ "Consider using the upper bound "
+ f"{format_type(typ.ret_type.upper_bound)} instead",
+ context=typ.ret_type,
+ )
+
def check_default_args(self, item: FuncItem, body_is_trivial: bool) -> None:
for arg in item.arguments:
if arg.initializer is None:
@@ -1059,31 +1333,39 @@ def check_default_args(self, item: FuncItem, body_is_trivial: bool) -> None:
if body_is_trivial and isinstance(arg.initializer, EllipsisExpr):
continue
name = arg.variable.name
- msg = 'Incompatible default for '
- if name.startswith('__tuple_arg_'):
- msg += "tuple argument {}".format(name[12:])
+ msg = "Incompatible default for "
+ if name.startswith("__tuple_arg_"):
+ msg += f"tuple argument {name[12:]}"
+ else:
+ msg += f'argument "{name}"'
+ if (
+ not self.options.implicit_optional
+ and isinstance(arg.initializer, NameExpr)
+ and arg.initializer.fullname == "builtins.None"
+ ):
+ notes = [
+ "PEP 484 prohibits implicit Optional. "
+ "Accordingly, mypy has changed its default to no_implicit_optional=True",
+ "Use https://github.com/hauntsaninja/no_implicit_optional to automatically "
+ "upgrade your codebase",
+ ]
else:
- msg += 'argument "{}"'.format(name)
+ notes = None
self.check_simple_assignment(
arg.variable.type,
arg.initializer,
context=arg.initializer,
- msg=msg,
- lvalue_name='argument',
- rvalue_name='default',
- code=codes.ASSIGNMENT)
+ msg=ErrorMessage(msg, code=codes.ASSIGNMENT),
+ lvalue_name="argument",
+ rvalue_name="default",
+ notes=notes,
+ )
def is_forward_op_method(self, method_name: str) -> bool:
- if self.options.python_version[0] == 2 and method_name == '__div__':
- return True
- else:
- return method_name in operators.reverse_op_methods
+ return method_name in operators.reverse_op_methods
def is_reverse_op_method(self, method_name: str) -> bool:
- if self.options.python_version[0] == 2 and method_name == '__rdiv__':
- return True
- else:
- return method_name in operators.reverse_op_method_set
+ return method_name in operators.reverse_op_method_set
def check_for_missing_annotations(self, fdef: FuncItem) -> None:
# Check for functions with unspecified/not fully specified types.
@@ -1092,20 +1374,25 @@ def is_unannotated_any(t: Type) -> bool:
return False
return isinstance(t, AnyType) and t.type_of_any == TypeOfAny.unannotated
- has_explicit_annotation = (isinstance(fdef.type, CallableType)
- and any(not is_unannotated_any(t)
- for t in fdef.type.arg_types + [fdef.type.ret_type]))
+ has_explicit_annotation = isinstance(fdef.type, CallableType) and any(
+ not is_unannotated_any(t) for t in fdef.type.arg_types + [fdef.type.ret_type]
+ )
show_untyped = not self.is_typeshed_stub or self.options.warn_incomplete_stub
check_incomplete_defs = self.options.disallow_incomplete_defs and has_explicit_annotation
if show_untyped and (self.options.disallow_untyped_defs or check_incomplete_defs):
if fdef.type is None and self.options.disallow_untyped_defs:
- if (not fdef.arguments or (len(fdef.arguments) == 1 and
- (fdef.arg_names[0] == 'self' or fdef.arg_names[0] == 'cls'))):
+ if not fdef.arguments or (
+ len(fdef.arguments) == 1
+ and (fdef.arg_names[0] == "self" or fdef.arg_names[0] == "cls")
+ ):
self.fail(message_registry.RETURN_TYPE_EXPECTED, fdef)
if not has_return_statement(fdef) and not fdef.is_generator:
- self.note('Use "-> None" if function does not return a value', fdef,
- code=codes.NO_UNTYPED_DEF)
+ self.note(
+ 'Use "-> None" if function does not return a value',
+ fdef,
+ code=codes.NO_UNTYPED_DEF,
+ )
else:
self.fail(message_registry.FUNCTION_TYPE_EXPECTED, fdef)
elif isinstance(fdef.type, CallableType):
@@ -1113,8 +1400,9 @@ def is_unannotated_any(t: Type) -> bool:
if is_unannotated_any(ret_type):
self.fail(message_registry.RETURN_TYPE_EXPECTED, fdef)
elif fdef.is_generator:
- if is_unannotated_any(self.get_generator_return_type(ret_type,
- fdef.is_coroutine)):
+ if is_unannotated_any(
+ self.get_generator_return_type(ret_type, fdef.is_coroutine)
+ ):
self.fail(message_registry.RETURN_TYPE_EXPECTED, fdef)
elif fdef.is_coroutine and isinstance(ret_type, Instance):
if is_unannotated_any(self.get_coroutine_return_type(ret_type)):
@@ -1127,22 +1415,23 @@ def check___new___signature(self, fdef: FuncDef, typ: CallableType) -> None:
bound_type = bind_self(typ, self_type, is_classmethod=True)
# Check that __new__ (after binding cls) returns an instance
# type (or any).
- if isinstance(fdef.info, TypeInfo) and fdef.info.is_metaclass():
+ if fdef.info.is_metaclass():
# This is a metaclass, so it must return a new unrelated type.
self.check_subtype(
bound_type.ret_type,
self.type_type(),
fdef,
message_registry.INVALID_NEW_TYPE,
- 'returns',
- 'but must return a subtype of'
+ "returns",
+ "but must return a subtype of",
)
- elif not isinstance(get_proper_type(bound_type.ret_type),
- (AnyType, Instance, TupleType)):
+ elif not isinstance(
+ get_proper_type(bound_type.ret_type), (AnyType, Instance, TupleType, UninhabitedType)
+ ):
self.fail(
- message_registry.NON_INSTANCE_NEW_TYPE.format(
- format_type(bound_type.ret_type)),
- fdef)
+ message_registry.NON_INSTANCE_NEW_TYPE.format(format_type(bound_type.ret_type)),
+ fdef,
+ )
else:
# And that it returns a subtype of the class
self.check_subtype(
@@ -1150,58 +1439,13 @@ def check___new___signature(self, fdef: FuncDef, typ: CallableType) -> None:
self_type,
fdef,
message_registry.INVALID_NEW_TYPE,
- 'returns',
- 'but must return a subtype of'
+ "returns",
+ "but must return a subtype of",
)
- def is_trivial_body(self, block: Block) -> bool:
- """Returns 'true' if the given body is "trivial" -- if it contains just a "pass",
- "..." (ellipsis), or "raise NotImplementedError()". A trivial body may also
- start with a statement containing just a string (e.g. a docstring).
-
- Note: functions that raise other kinds of exceptions do not count as
- "trivial". We use this function to help us determine when it's ok to
- relax certain checks on body, but functions that raise arbitrary exceptions
- are more likely to do non-trivial work. For example:
-
- def halt(self, reason: str = ...) -> NoReturn:
- raise MyCustomError("Fatal error: " + reason, self.line, self.context)
-
- A function that raises just NotImplementedError is much less likely to be
- this complex.
- """
- body = block.body
-
- # Skip a docstring
- if (body and isinstance(body[0], ExpressionStmt) and
- isinstance(body[0].expr, (StrExpr, UnicodeExpr))):
- body = block.body[1:]
-
- if len(body) == 0:
- # There's only a docstring (or no body at all).
- return True
- elif len(body) > 1:
- return False
-
- stmt = body[0]
-
- if isinstance(stmt, RaiseStmt):
- expr = stmt.expr
- if expr is None:
- return False
- if isinstance(expr, CallExpr):
- expr = expr.callee
-
- return (isinstance(expr, NameExpr)
- and expr.fullname == 'builtins.NotImplementedError')
-
- return (isinstance(stmt, PassStmt) or
- (isinstance(stmt, ExpressionStmt) and
- isinstance(stmt.expr, EllipsisExpr)))
-
- def check_reverse_op_method(self, defn: FuncItem,
- reverse_type: CallableType, reverse_name: str,
- context: Context) -> None:
+ def check_reverse_op_method(
+ self, defn: FuncItem, reverse_type: CallableType, reverse_name: str, context: Context
+ ) -> None:
"""Check a reverse operator method such as __radd__."""
# Decides whether it's worth calling check_overlapping_op_methods().
@@ -1212,17 +1456,18 @@ def check_reverse_op_method(self, defn: FuncItem,
assert defn.info
# First check for a valid signature
- method_type = CallableType([AnyType(TypeOfAny.special_form),
- AnyType(TypeOfAny.special_form)],
- [nodes.ARG_POS, nodes.ARG_POS],
- [None, None],
- AnyType(TypeOfAny.special_form),
- self.named_type('builtins.function'))
+ method_type = CallableType(
+ [AnyType(TypeOfAny.special_form), AnyType(TypeOfAny.special_form)],
+ [nodes.ARG_POS, nodes.ARG_POS],
+ [None, None],
+ AnyType(TypeOfAny.special_form),
+ self.named_type("builtins.function"),
+ )
if not is_subtype(reverse_type, method_type):
self.msg.invalid_signature(reverse_type, context)
return
- if reverse_name in ('__eq__', '__ne__'):
+ if reverse_name in ("__eq__", "__ne__"):
# These are defined for all objects => can't cause trouble.
return
@@ -1232,18 +1477,17 @@ def check_reverse_op_method(self, defn: FuncItem,
if isinstance(ret_type, AnyType):
return
if isinstance(ret_type, Instance):
- if ret_type.type.fullname == 'builtins.object':
+ if ret_type.type.fullname == "builtins.object":
return
if reverse_type.arg_kinds[0] == ARG_STAR:
- reverse_type = reverse_type.copy_modified(arg_types=[reverse_type.arg_types[0]] * 2,
- arg_kinds=[ARG_POS] * 2,
- arg_names=[reverse_type.arg_names[0], "_"])
+ reverse_type = reverse_type.copy_modified(
+ arg_types=[reverse_type.arg_types[0]] * 2,
+ arg_kinds=[ARG_POS] * 2,
+ arg_names=[reverse_type.arg_names[0], "_"],
+ )
assert len(reverse_type.arg_types) >= 2
- if self.options.python_version[0] == 2 and reverse_name == '__rdiv__':
- forward_name = '__div__'
- else:
- forward_name = operators.normal_from_reverse_op[reverse_name]
+ forward_name = operators.normal_from_reverse_op[reverse_name]
forward_inst = get_proper_type(reverse_type.arg_types[1])
if isinstance(forward_inst, TypeVarType):
forward_inst = get_proper_type(forward_inst.upper_bound)
@@ -1257,24 +1501,35 @@ def check_reverse_op_method(self, defn: FuncItem,
opt_meta = item.type.metaclass_type
if opt_meta is not None:
forward_inst = opt_meta
- if not (isinstance(forward_inst, (Instance, UnionType))
- and forward_inst.has_readable_member(forward_name)):
+ if not (
+ isinstance(forward_inst, (Instance, UnionType))
+ and forward_inst.has_readable_member(forward_name)
+ ):
return
forward_base = reverse_type.arg_types[1]
- forward_type = self.expr_checker.analyze_external_member_access(forward_name, forward_base,
- context=defn)
- self.check_overlapping_op_methods(reverse_type, reverse_name, defn.info,
- forward_type, forward_name, forward_base,
- context=defn)
-
- def check_overlapping_op_methods(self,
- reverse_type: CallableType,
- reverse_name: str,
- reverse_class: TypeInfo,
- forward_type: Type,
- forward_name: str,
- forward_base: Type,
- context: Context) -> None:
+ forward_type = self.expr_checker.analyze_external_member_access(
+ forward_name, forward_base, context=defn
+ )
+ self.check_overlapping_op_methods(
+ reverse_type,
+ reverse_name,
+ defn.info,
+ forward_type,
+ forward_name,
+ forward_base,
+ context=defn,
+ )
+
+ def check_overlapping_op_methods(
+ self,
+ reverse_type: CallableType,
+ reverse_name: str,
+ reverse_class: TypeInfo,
+ forward_type: Type,
+ forward_name: str,
+ forward_base: Type,
+ context: Context,
+ ) -> None:
"""Check for overlapping method and reverse method signatures.
This function assumes that:
@@ -1313,26 +1568,25 @@ def check_overlapping_op_methods(self,
# inheritance. (This is consistent with how we handle overloads: we also
# do not try checking unsafe overlaps due to multiple inheritance there.)
- for forward_item in union_items(forward_type):
+ for forward_item in flatten_nested_unions([forward_type]):
+ forward_item = get_proper_type(forward_item)
if isinstance(forward_item, CallableType):
if self.is_unsafe_overlapping_op(forward_item, forward_base, reverse_type):
self.msg.operator_method_signatures_overlap(
- reverse_class, reverse_name,
- forward_base, forward_name, context)
+ reverse_class, reverse_name, forward_base, forward_name, context
+ )
elif isinstance(forward_item, Overloaded):
for item in forward_item.items:
if self.is_unsafe_overlapping_op(item, forward_base, reverse_type):
self.msg.operator_method_signatures_overlap(
- reverse_class, reverse_name,
- forward_base, forward_name,
- context)
+ reverse_class, reverse_name, forward_base, forward_name, context
+ )
elif not isinstance(forward_item, AnyType):
self.msg.forward_operator_not_callable(forward_name, context)
- def is_unsafe_overlapping_op(self,
- forward_item: CallableType,
- forward_base: Type,
- reverse_type: CallableType) -> bool:
+ def is_unsafe_overlapping_op(
+ self, forward_item: CallableType, forward_base: Type, reverse_type: CallableType
+ ) -> bool:
# TODO: check argument kinds?
if len(forward_item.arg_types) < 1:
# Not a valid operator method -- can't succeed anyway.
@@ -1386,11 +1640,12 @@ def check_inplace_operator_method(self, defn: FuncBase) -> None:
return
typ = bind_self(self.function_type(defn))
cls = defn.info
- other_method = '__' + method[3:]
+ other_method = "__" + method[3:]
if cls.has_readable_member(other_method):
instance = fill_typevars(cls)
- typ2 = get_proper_type(self.expr_checker.analyze_external_member_access(
- other_method, instance, defn))
+ typ2 = get_proper_type(
+ self.expr_checker.analyze_external_member_access(other_method, instance, defn)
+ )
fail = False
if isinstance(typ2, FunctionLike):
if not is_more_general_arg_prefix(typ, typ2):
@@ -1404,24 +1659,27 @@ def check_inplace_operator_method(self, defn: FuncBase) -> None:
def check_getattr_method(self, typ: Type, context: Context, name: str) -> None:
if len(self.scope.stack) == 1:
# module scope
- if name == '__getattribute__':
+ if name == "__getattribute__":
self.fail(message_registry.MODULE_LEVEL_GETATTRIBUTE, context)
return
# __getattr__ is fine at the module level as of Python 3.7 (PEP 562). We could
# show an error for Python < 3.7, but that would be annoying in code that supports
# both 3.7 and older versions.
- method_type = CallableType([self.named_type('builtins.str')],
- [nodes.ARG_POS],
- [None],
- AnyType(TypeOfAny.special_form),
- self.named_type('builtins.function'))
+ method_type = CallableType(
+ [self.named_type("builtins.str")],
+ [nodes.ARG_POS],
+ [None],
+ AnyType(TypeOfAny.special_form),
+ self.named_type("builtins.function"),
+ )
elif self.scope.active_class():
- method_type = CallableType([AnyType(TypeOfAny.special_form),
- self.named_type('builtins.str')],
- [nodes.ARG_POS, nodes.ARG_POS],
- [None, None],
- AnyType(TypeOfAny.special_form),
- self.named_type('builtins.function'))
+ method_type = CallableType(
+ [AnyType(TypeOfAny.special_form), self.named_type("builtins.str")],
+ [nodes.ARG_POS, nodes.ARG_POS],
+ [None, None],
+ AnyType(TypeOfAny.special_form),
+ self.named_type("builtins.function"),
+ )
else:
return
if not is_subtype(typ, method_type):
@@ -1430,39 +1688,56 @@ def check_getattr_method(self, typ: Type, context: Context, name: str) -> None:
def check_setattr_method(self, typ: Type, context: Context) -> None:
if not self.scope.active_class():
return
- method_type = CallableType([AnyType(TypeOfAny.special_form),
- self.named_type('builtins.str'),
- AnyType(TypeOfAny.special_form)],
- [nodes.ARG_POS, nodes.ARG_POS, nodes.ARG_POS],
- [None, None, None],
- NoneType(),
- self.named_type('builtins.function'))
+ method_type = CallableType(
+ [
+ AnyType(TypeOfAny.special_form),
+ self.named_type("builtins.str"),
+ AnyType(TypeOfAny.special_form),
+ ],
+ [nodes.ARG_POS, nodes.ARG_POS, nodes.ARG_POS],
+ [None, None, None],
+ NoneType(),
+ self.named_type("builtins.function"),
+ )
if not is_subtype(typ, method_type):
- self.msg.invalid_signature_for_special_method(typ, context, '__setattr__')
+ self.msg.invalid_signature_for_special_method(typ, context, "__setattr__")
def check_slots_definition(self, typ: Type, context: Context) -> None:
"""Check the type of __slots__."""
str_type = self.named_type("builtins.str")
- expected_type = UnionType([str_type,
- self.named_generic_type("typing.Iterable", [str_type])])
- self.check_subtype(typ, expected_type, context,
- message_registry.INVALID_TYPE_FOR_SLOTS,
- 'actual type',
- 'expected type',
- code=codes.ASSIGNMENT)
+ expected_type = UnionType(
+ [str_type, self.named_generic_type("typing.Iterable", [str_type])]
+ )
+ self.check_subtype(
+ typ,
+ expected_type,
+ context,
+ message_registry.INVALID_TYPE_FOR_SLOTS,
+ "actual type",
+ "expected type",
+ code=codes.ASSIGNMENT,
+ )
def check_match_args(self, var: Var, typ: Type, context: Context) -> None:
"""Check that __match_args__ contains literal strings"""
+ if not self.scope.active_class():
+ return
typ = get_proper_type(typ)
- if not isinstance(typ, TupleType) or \
- not all([is_string_literal(item) for item in typ.items]):
- self.msg.note("__match_args__ must be a tuple containing string literals for checking "
- "of match statements to work", context, code=codes.LITERAL_REQ)
+ if not isinstance(typ, TupleType) or not all(
+ [is_string_literal(item) for item in typ.items]
+ ):
+ self.msg.note(
+ "__match_args__ must be a tuple containing string literals for checking "
+ "of match statements to work",
+ context,
+ code=codes.LITERAL_REQ,
+ )
- def expand_typevars(self, defn: FuncItem,
- typ: CallableType) -> List[Tuple[FuncItem, CallableType]]:
+ def expand_typevars(
+ self, defn: FuncItem, typ: CallableType
+ ) -> list[tuple[FuncItem, CallableType]]:
# TODO use generator
- subst: List[List[Tuple[TypeVarId, Type]]] = []
+ subst: list[list[tuple[TypeVarId, Type]]] = []
tvars = list(typ.variables) or []
if defn.info:
# Class type variables
@@ -1474,8 +1749,8 @@ def expand_typevars(self, defn: FuncItem,
# Make a copy of the function to check for each combination of
# value restricted type variables. (Except when running mypyc,
# where we need one canonical version of the function.)
- if subst and not self.options.mypyc:
- result: List[Tuple[FuncItem, CallableType]] = []
+ if subst and not (self.options.mypyc or self.options.inspections):
+ result: list[tuple[FuncItem, CallableType]] = []
for substitutions in itertools.product(*subst):
mapping = dict(substitutions)
expanded = cast(CallableType, expand_type(typ, mapping))
@@ -1484,7 +1759,7 @@ def expand_typevars(self, defn: FuncItem,
else:
return [(defn, typ)]
- def check_method_override(self, defn: Union[FuncDef, OverloadedFuncDef, Decorator]) -> None:
+ def check_method_override(self, defn: FuncDef | OverloadedFuncDef | Decorator) -> None:
"""Check if function definition is compatible with base classes.
This may defer the method if a signature is not available in at least one base class.
@@ -1495,10 +1770,9 @@ def check_method_override(self, defn: Union[FuncDef, OverloadedFuncDef, Decorato
# Node was deferred, we will have another attempt later.
return
- def check_method_or_accessor_override_for_base(self, defn: Union[FuncDef,
- OverloadedFuncDef,
- Decorator],
- base: TypeInfo) -> bool:
+ def check_method_or_accessor_override_for_base(
+ self, defn: FuncDef | OverloadedFuncDef | Decorator, base: TypeInfo
+ ) -> bool:
"""Check if method definition is compatible with a base class.
Return True if the node was deferred because one of the corresponding
@@ -1516,25 +1790,24 @@ def check_method_or_accessor_override_for_base(self, defn: Union[FuncDef,
self.check_if_final_var_override_writable(name, base_attr.node, defn)
# Check the type of override.
- if name not in ('__init__', '__new__', '__init_subclass__'):
+ if name not in ("__init__", "__new__", "__init_subclass__"):
# Check method override
# (__init__, __new__, __init_subclass__ are special).
if self.check_method_override_for_base_with_name(defn, name, base):
return True
if name in operators.inplace_operator_methods:
# Figure out the name of the corresponding operator method.
- method = '__' + name[3:]
+ method = "__" + name[3:]
# An inplace operator method such as __iadd__ might not be
# always introduced safely if a base class defined __add__.
# TODO can't come up with an example where this is
# necessary; now it's "just in case"
- return self.check_method_override_for_base_with_name(defn, method,
- base)
+ return self.check_method_override_for_base_with_name(defn, method, base)
return False
def check_method_override_for_base_with_name(
- self, defn: Union[FuncDef, OverloadedFuncDef, Decorator],
- name: str, base: TypeInfo) -> bool:
+ self, defn: FuncDef | OverloadedFuncDef | Decorator, name: str, base: TypeInfo
+ ) -> bool:
"""Check if overriding an attribute `name` of `base` with `defn` is valid.
Return True if the supertype node was not analysed yet, and `defn` was deferred.
@@ -1551,6 +1824,7 @@ def check_method_override_for_base_with_name(
context = defn.func
# Construct the type of the overriding method.
+ # TODO: this logic is much less complete than similar one in checkmember.py
if isinstance(defn, (FuncDef, OverloadedFuncDef)):
typ: Type = self.function_type(defn)
override_class_or_static = defn.is_class or defn.is_static
@@ -1563,13 +1837,14 @@ def check_method_override_for_base_with_name(
override_class = defn.func.is_class
typ = get_proper_type(typ)
if isinstance(typ, FunctionLike) and not is_static(context):
- typ = bind_self(typ, self.scope.active_self_type(),
- is_classmethod=override_class)
+ typ = bind_self(typ, self.scope.active_self_type(), is_classmethod=override_class)
# Map the overridden method type to subtype context so that
# it can be checked for compatibility.
original_type = get_proper_type(base_attr.type)
original_node = base_attr.node
- if original_type is None:
+ # `original_type` can be partial if (e.g.) it is originally an
+ # instance variable from an `__init__` block that becomes deferred.
+ if original_type is None or isinstance(original_type, PartialType):
if self.pass_num < self.last_pass:
# If there are passes left, defer this node until next pass,
# otherwise try reconstructing the method type from available information.
@@ -1590,7 +1865,8 @@ def check_method_override_for_base_with_name(
else:
original_type = NoneType()
else:
- assert False, str(base_attr.node)
+ # Will always fail to typecheck below, since we know the node is a method
+ original_type = NoneType()
if isinstance(original_node, (FuncDef, OverloadedFuncDef)):
original_class_or_static = original_node.is_class or original_node.is_static
elif isinstance(original_node, Decorator):
@@ -1598,38 +1874,81 @@ def check_method_override_for_base_with_name(
original_class_or_static = fdef.is_class or fdef.is_static
else:
original_class_or_static = False # a variable can't be class or static
+
+ if isinstance(original_type, FunctionLike):
+ active_self_type = self.scope.active_self_type()
+ if isinstance(original_type, Overloaded) and active_self_type:
+ # If we have an overload, filter to overloads that match the self type.
+ # This avoids false positives for concrete subclasses of generic classes,
+ # see testSelfTypeOverrideCompatibility for an example.
+ # It's possible we might want to do this as part of bind_and_map_method
+ filtered_items = [
+ item
+ for item in original_type.items
+ if not item.arg_types or is_subtype(active_self_type, item.arg_types[0])
+ ]
+ # If we don't have any filtered_items, maybe it's always a valid override
+ # of the superclass? However if you get to that point you're in murky type
+ # territory anyway, so we just preserve the type and have the behaviour match
+ # that of older versions of mypy.
+ if filtered_items:
+ original_type = Overloaded(filtered_items)
+ original_type = self.bind_and_map_method(base_attr, original_type, defn.info, base)
+ if original_node and is_property(original_node):
+ original_type = get_property_type(original_type)
+
+ if isinstance(typ, FunctionLike) and is_property(defn):
+ typ = get_property_type(typ)
+ if (
+ isinstance(original_node, Var)
+ and not original_node.is_final
+ and (not original_node.is_property or original_node.is_settable_property)
+ and isinstance(defn, Decorator)
+ ):
+ # We only give an error where no other similar errors will be given.
+ if not isinstance(original_type, AnyType):
+ self.msg.fail(
+ "Cannot override writeable attribute with read-only property",
+ # Give an error on function line to match old behaviour.
+ defn.func,
+ code=codes.OVERRIDE,
+ )
+
if isinstance(original_type, AnyType) or isinstance(typ, AnyType):
pass
elif isinstance(original_type, FunctionLike) and isinstance(typ, FunctionLike):
- original = self.bind_and_map_method(base_attr, original_type,
- defn.info, base)
# Check that the types are compatible.
# TODO overloaded signatures
- self.check_override(typ,
- original,
- defn.name,
- name,
- base.name,
- original_class_or_static,
- override_class_or_static,
- context)
+ self.check_override(
+ typ,
+ original_type,
+ defn.name,
+ name,
+ base.name,
+ original_class_or_static,
+ override_class_or_static,
+ context,
+ )
elif is_equivalent(original_type, typ):
# Assume invariance for a non-callable attribute here. Note
# that this doesn't affect read-only properties which can have
# covariant overrides.
#
pass
- elif (base_attr.node and not self.is_writable_attribute(base_attr.node)
- and is_subtype(typ, original_type)):
+ elif (
+ original_node
+ and not self.is_writable_attribute(original_node)
+ and is_subtype(typ, original_type)
+ ):
# If the attribute is read-only, allow covariance
pass
else:
- self.msg.signature_incompatible_with_supertype(
- defn.name, name, base.name, context)
+ self.msg.signature_incompatible_with_supertype(defn.name, name, base.name, context)
return False
- def bind_and_map_method(self, sym: SymbolTableNode, typ: FunctionLike,
- sub_info: TypeInfo, super_info: TypeInfo) -> FunctionLike:
+ def bind_and_map_method(
+ self, sym: SymbolTableNode, typ: FunctionLike, sub_info: TypeInfo, super_info: TypeInfo
+ ) -> FunctionLike:
"""Bind self-type and map type variables for a method.
Arguments:
@@ -1638,8 +1957,9 @@ def bind_and_map_method(self, sym: SymbolTableNode, typ: FunctionLike,
sub_info: class where the method is used
super_info: class where the method was defined
"""
- if (isinstance(sym.node, (FuncDef, OverloadedFuncDef, Decorator))
- and not is_static(sym.node)):
+ if isinstance(sym.node, (FuncDef, OverloadedFuncDef, Decorator)) and not is_static(
+ sym.node
+ ):
if isinstance(sym.node, Decorator):
is_class_method = sym.node.func.is_class
else:
@@ -1649,7 +1969,7 @@ def bind_and_map_method(self, sym: SymbolTableNode, typ: FunctionLike,
bound = typ
return cast(FunctionLike, map_type_from_supertype(bound, sub_info, super_info))
- def get_op_other_domain(self, tp: FunctionLike) -> Optional[Type]:
+ def get_op_other_domain(self, tp: FunctionLike) -> Type | None:
if isinstance(tp, CallableType):
if tp.arg_kinds and tp.arg_kinds[0] == ARG_POS:
return tp.arg_types[0]
@@ -1663,11 +1983,17 @@ def get_op_other_domain(self, tp: FunctionLike) -> Optional[Type]:
else:
assert False, "Need to check all FunctionLike subtypes here"
- def check_override(self, override: FunctionLike, original: FunctionLike,
- name: str, name_in_super: str, supertype: str,
- original_class_or_static: bool,
- override_class_or_static: bool,
- node: Context) -> None:
+ def check_override(
+ self,
+ override: FunctionLike,
+ original: FunctionLike,
+ name: str,
+ name_in_super: str,
+ supertype: str,
+ original_class_or_static: bool,
+ override_class_or_static: bool,
+ node: Context,
+ ) -> None:
"""Check a method override with given signatures.
Arguments:
@@ -1687,11 +2013,14 @@ def check_override(self, override: FunctionLike, original: FunctionLike,
# this could be unsafe with reverse operator methods.
original_domain = self.get_op_other_domain(original)
override_domain = self.get_op_other_domain(override)
- if (original_domain and override_domain and
- not is_subtype(override_domain, original_domain)):
+ if (
+ original_domain
+ and override_domain
+ and not is_subtype(override_domain, original_domain)
+ ):
fail = True
op_method_wider_note = True
- if isinstance(original, FunctionLike) and isinstance(override, FunctionLike):
+ if isinstance(override, FunctionLike):
if original_class_or_static and not override_class_or_static:
fail = True
elif isinstance(original, CallableType) and isinstance(override, CallableType):
@@ -1703,10 +2032,19 @@ def check_override(self, override: FunctionLike, original: FunctionLike,
if fail:
emitted_msg = False
- if (isinstance(override, CallableType) and
- isinstance(original, CallableType) and
- len(override.arg_types) == len(original.arg_types) and
- override.min_args == original.min_args):
+
+ # Normalize signatures, so we get better diagnostics.
+ if isinstance(override, (CallableType, Overloaded)):
+ override = override.with_unpacked_kwargs()
+ if isinstance(original, (CallableType, Overloaded)):
+ original = original.with_unpacked_kwargs()
+
+ if (
+ isinstance(override, CallableType)
+ and isinstance(original, CallableType)
+ and len(override.arg_types) == len(original.arg_types)
+ and override.min_args == original.min_args
+ ):
# Give more detailed messages for the common case of both
# signatures having the same number of arguments and no
# overloads.
@@ -1726,8 +2064,9 @@ def erase_override(t: Type) -> Type:
return erase_typevars(t, ids_to_erase=override_ids)
for i in range(len(override.arg_types)):
- if not is_subtype(original.arg_types[i],
- erase_override(override.arg_types[i])):
+ if not is_subtype(
+ original.arg_types[i], erase_override(override.arg_types[i])
+ ):
arg_type_in_super = original.arg_types[i]
self.msg.argument_incompatible_with_supertype(
i + 1,
@@ -1736,14 +2075,14 @@ def erase_override(t: Type) -> Type:
name_in_super,
arg_type_in_super,
supertype,
- node
+ node,
)
emitted_msg = True
- if not is_subtype(erase_override(override.ret_type),
- original.ret_type):
+ if not is_subtype(erase_override(override.ret_type), original.ret_type):
self.msg.return_type_incompatible_with_supertype(
- name, name_in_super, supertype, original.ret_type, override.ret_type, node)
+ name, name_in_super, supertype, original.ret_type, override.ret_type, node
+ )
emitted_msg = True
elif isinstance(override, Overloaded) and isinstance(original, Overloaded):
# Give a more detailed message in the case where the user is trying to
@@ -1762,16 +2101,21 @@ def erase_override(t: Type) -> Type:
if len(order) == len(original.items) and order != sorted(order):
self.msg.overload_signature_incompatible_with_supertype(
- name, name_in_super, supertype, node)
+ name, name_in_super, supertype, node
+ )
emitted_msg = True
if not emitted_msg:
# Fall back to generic incompatibility message.
self.msg.signature_incompatible_with_supertype(
- name, name_in_super, supertype, node, original=original, override=override)
+ name, name_in_super, supertype, node, original=original, override=override
+ )
if op_method_wider_note:
- self.note("Overloaded operator methods can't have wider argument types"
- " in overrides", node, code=codes.OVERRIDE)
+ self.note(
+ "Overloaded operator methods can't have wider argument types in overrides",
+ node,
+ code=codes.OVERRIDE,
+ )
def check__exit__return_type(self, defn: FuncItem) -> None:
"""Generate error if the return type of __exit__ is problematic.
@@ -1792,8 +2136,10 @@ def check__exit__return_type(self, defn: FuncItem) -> None:
if not returns:
return
- if all(isinstance(ret.expr, NameExpr) and ret.expr.fullname == 'builtins.False'
- for ret in returns):
+ if all(
+ isinstance(ret.expr, NameExpr) and ret.expr.fullname == "builtins.False"
+ for ret in returns
+ ):
self.msg.incorrect__exit__return(defn)
def visit_class_def(self, defn: ClassDef) -> None:
@@ -1815,14 +2161,16 @@ def visit_class_def(self, defn: ClassDef) -> None:
if not defn.has_incompatible_baseclass:
# Otherwise we've already found errors; more errors are not useful
self.check_multiple_inheritance(typ)
+ self.check_metaclass_compatibility(typ)
self.check_final_deletable(typ)
if defn.decorators:
sig: Type = type_object_type(defn.info, self.named_type)
# Decorators are applied in reverse order.
for decorator in reversed(defn.decorators):
- if (isinstance(decorator, CallExpr)
- and isinstance(decorator.analyzed, PromoteExpr)):
+ if isinstance(decorator, CallExpr) and isinstance(
+ decorator.analyzed, PromoteExpr
+ ):
# _promote is a special type checking related construct.
continue
@@ -1830,15 +2178,36 @@ def visit_class_def(self, defn: ClassDef) -> None:
temp = self.temp_node(sig, context=decorator)
fullname = None
if isinstance(decorator, RefExpr):
- fullname = decorator.fullname
+ fullname = decorator.fullname or None
# TODO: Figure out how to have clearer error messages.
# (e.g. "class decorator must be a function that accepts a type."
- sig, _ = self.expr_checker.check_call(dec, [temp],
- [nodes.ARG_POS], defn,
- callable_name=fullname)
+ old_allow_abstract_call = self.allow_abstract_call
+ self.allow_abstract_call = True
+ sig, _ = self.expr_checker.check_call(
+ dec, [temp], [nodes.ARG_POS], defn, callable_name=fullname
+ )
+ self.allow_abstract_call = old_allow_abstract_call
# TODO: Apply the sig to the actual TypeInfo so we can handle decorators
# that completely swap out the type. (e.g. Callable[[Type[A]], Type[B]])
+ if typ.defn.type_vars:
+ for base_inst in typ.bases:
+ for base_tvar, base_decl_tvar in zip(
+ base_inst.args, base_inst.type.defn.type_vars
+ ):
+ if (
+ isinstance(base_tvar, TypeVarType)
+ and base_tvar.variance != INVARIANT
+ and isinstance(base_decl_tvar, TypeVarType)
+ and base_decl_tvar.variance != base_tvar.variance
+ ):
+ self.fail(
+ f'Variance of TypeVar "{base_tvar.name}" incompatible '
+ "with variance in parent type",
+ context=defn,
+ code=codes.TYPE_VAR,
+ )
+
if typ.is_protocol and typ.defn.type_vars:
self.check_protocol_variance(defn)
if not defn.has_incompatible_baseclass and defn.info.is_enum:
@@ -1867,25 +2236,27 @@ def check_init_subclass(self, defn: ClassDef) -> None:
Base.__init_subclass__(thing=5) is called at line 4. This is what we simulate here.
Child.__init_subclass__ is never called.
"""
- if (defn.info.metaclass_type and
- defn.info.metaclass_type.type.fullname not in ('builtins.type', 'abc.ABCMeta')):
+ if defn.info.metaclass_type and defn.info.metaclass_type.type.fullname not in (
+ "builtins.type",
+ "abc.ABCMeta",
+ ):
# We can't safely check situations when both __init_subclass__ and a custom
# metaclass are present.
return
# At runtime, only Base.__init_subclass__ will be called, so
# we skip the current class itself.
for base in defn.info.mro[1:]:
- if '__init_subclass__' not in base.names:
+ if "__init_subclass__" not in base.names:
continue
name_expr = NameExpr(defn.name)
name_expr.node = base
- callee = MemberExpr(name_expr, '__init_subclass__')
+ callee = MemberExpr(name_expr, "__init_subclass__")
args = list(defn.keywords.values())
- arg_names: List[Optional[str]] = list(defn.keywords.keys())
+ arg_names: list[str | None] = list(defn.keywords.keys())
# 'metaclass' keyword is consumed by the rest of the type machinery,
# and is never passed to __init_subclass__ implementations
- if 'metaclass' in arg_names:
- idx = arg_names.index('metaclass')
+ if "metaclass" in arg_names:
+ idx = arg_names.index("metaclass")
arg_names.pop(idx)
args.pop(idx)
arg_kinds = [ARG_NAMED] * len(args)
@@ -1893,9 +2264,7 @@ def check_init_subclass(self, defn: ClassDef) -> None:
call_expr.line = defn.line
call_expr.column = defn.column
call_expr.end_line = defn.end_line
- self.expr_checker.accept(call_expr,
- allow_none_return=True,
- always_allow_any=True)
+ self.expr_checker.accept(call_expr, allow_none_return=True, always_allow_any=True)
# We are only interested in the first Base having __init_subclass__,
# all other bases have already been checked.
break
@@ -1904,13 +2273,14 @@ def check_enum(self, defn: ClassDef) -> None:
assert defn.info.is_enum
if defn.info.fullname not in ENUM_BASES:
for sym in defn.info.names.values():
- if (isinstance(sym.node, Var) and sym.node.has_explicit_value and
- sym.node.name == '__members__'):
+ if (
+ isinstance(sym.node, Var)
+ and sym.node.has_explicit_value
+ and sym.node.name == "__members__"
+ ):
# `__members__` will always be overwritten by `Enum` and is considered
# read-only so we disallow assigning a value to it
- self.fail(
- message_registry.ENUM_MEMBERS_ATTR_WILL_BE_OVERRIDEN, sym.node
- )
+ self.fail(message_registry.ENUM_MEMBERS_ATTR_WILL_BE_OVERRIDEN, sym.node)
for base in defn.info.mro[1:-1]: # we don't need self and `object`
if base.is_enum and base.fullname not in ENUM_BASES:
self.check_final_enum(defn, base)
@@ -1921,12 +2291,7 @@ def check_enum(self, defn: ClassDef) -> None:
def check_final_enum(self, defn: ClassDef, base: TypeInfo) -> None:
for sym in base.names.values():
if self.is_final_enum_value(sym):
- self.fail(
- 'Cannot extend enum with existing members: "{}"'.format(
- base.name,
- ),
- defn,
- )
+ self.fail(f'Cannot extend enum with existing members: "{base.name}"', defn)
break
def is_final_enum_value(self, sym: SymbolTableNode) -> bool:
@@ -1951,30 +2316,40 @@ def is_final_enum_value(self, sym: SymbolTableNode) -> bool:
):
return False
- if self.is_stub or sym.node.has_explicit_value:
- return True
- return False
+ return self.is_stub or sym.node.has_explicit_value
def check_enum_bases(self, defn: ClassDef) -> None:
- enum_base: Optional[Instance] = None
+ """
+ Non-enum mixins cannot appear after enum bases; this is disallowed at runtime:
+
+ class Foo: ...
+ class Bar(enum.Enum, Foo): ...
+
+ But any number of enum mixins can appear in a class definition
+ (even if multiple enum bases define __new__). So this is fine:
+
+ class Foo(enum.Enum):
+ def __new__(cls, val): ...
+ class Bar(enum.Enum):
+ def __new__(cls, val): ...
+ class Baz(int, Foo, Bar, enum.Flag): ...
+ """
+ enum_base: Instance | None = None
for base in defn.info.bases:
if enum_base is None and base.type.is_enum:
enum_base = base
continue
- elif enum_base is not None:
- self.fail(
- 'No base classes are allowed after "{}"'.format(enum_base),
- defn,
- )
+ elif enum_base is not None and not base.type.is_enum:
+ self.fail(f'No non-enum mixin classes are allowed after "{enum_base}"', defn)
break
def check_enum_new(self, defn: ClassDef) -> None:
def has_new_method(info: TypeInfo) -> bool:
- new_method = info.get('__new__')
+ new_method = info.get("__new__")
return bool(
new_method
and new_method.node
- and new_method.node.fullname != 'builtins.object.__new__'
+ and new_method.node.fullname != "builtins.object.__new__"
)
has_new = False
@@ -1983,16 +2358,13 @@ def has_new_method(info: TypeInfo) -> bool:
if base.type.is_enum:
# If we have an `Enum`, then we need to check all its bases.
- candidate = any(
- not b.is_enum and has_new_method(b)
- for b in base.type.mro[1:-1]
- )
+ candidate = any(not b.is_enum and has_new_method(b) for b in base.type.mro[1:-1])
else:
candidate = has_new_method(base.type)
if candidate and has_new:
self.fail(
- 'Only a single data type mixin is allowed for Enum subtypes, '
+ "Only a single data type mixin is allowed for Enum subtypes, "
'found extra "{}"'.format(base),
defn,
)
@@ -2011,11 +2383,11 @@ def check_protocol_variance(self, defn: ClassDef) -> None:
object_type = Instance(info.mro[-1], [])
tvars = info.defn.type_vars
for i, tvar in enumerate(tvars):
- up_args: List[Type] = [
+ up_args: list[Type] = [
object_type if i == j else AnyType(TypeOfAny.special_form)
for j, _ in enumerate(tvars)
]
- down_args: List[Type] = [
+ down_args: list[Type] = [
UninhabitedType() if i == j else AnyType(TypeOfAny.special_form)
for j, _ in enumerate(tvars)
]
@@ -2044,29 +2416,39 @@ def check_multiple_inheritance(self, typ: TypeInfo) -> None:
for name in non_overridden_attrs:
if is_private(name):
continue
- for base2 in mro[i + 1:]:
+ for base2 in mro[i + 1 :]:
# We only need to check compatibility of attributes from classes not
# in a subclass relationship. For subclasses, normal (single inheritance)
# checks suffice (these are implemented elsewhere).
if name in base2.names and base2 not in base.mro:
self.check_compatibility(name, base, base2, typ)
- def determine_type_of_class_member(self, sym: SymbolTableNode) -> Optional[Type]:
+ def determine_type_of_member(self, sym: SymbolTableNode) -> Type | None:
if sym.type is not None:
return sym.type
if isinstance(sym.node, FuncBase):
return self.function_type(sym.node)
if isinstance(sym.node, TypeInfo):
- # nested class
- return type_object_type(sym.node, self.named_type)
+ if sym.node.typeddict_type:
+ # We special-case TypedDict, because they don't define any constructor.
+ return self.expr_checker.typeddict_callable(sym.node)
+ else:
+ return type_object_type(sym.node, self.named_type)
if isinstance(sym.node, TypeVarExpr):
# Use of TypeVars is rejected in an expression/runtime context, so
# we don't need to check supertype compatibility for them.
return AnyType(TypeOfAny.special_form)
+ if isinstance(sym.node, TypeAlias):
+ with self.msg.filter_errors():
+ # Suppress any errors, they will be given when analyzing the corresponding node.
+ # Here we may have incorrect options and location context.
+ return self.expr_checker.alias_type_in_runtime_context(sym.node, ctx=sym.node)
+ # TODO: handle more node kinds here.
return None
- def check_compatibility(self, name: str, base1: TypeInfo,
- base2: TypeInfo, ctx: TypeInfo) -> None:
+ def check_compatibility(
+ self, name: str, base1: TypeInfo, base2: TypeInfo, ctx: TypeInfo
+ ) -> None:
"""Check if attribute name in base1 is compatible with base2 in multiple inheritance.
Assume base1 comes before base2 in the MRO, and that base1 and base2 don't have
@@ -2087,32 +2469,42 @@ class C(B, A[int]): ... # this is unsafe because...
x: A[int] = C()
x.foo # ...runtime type is (str) -> None, while static type is (int) -> None
"""
- if name in ('__init__', '__new__', '__init_subclass__'):
+ if name in ("__init__", "__new__", "__init_subclass__"):
# __init__ and friends can be incompatible -- it's a special case.
return
first = base1.names[name]
second = base2.names[name]
- first_type = get_proper_type(self.determine_type_of_class_member(first))
- second_type = get_proper_type(self.determine_type_of_class_member(second))
+ first_type = get_proper_type(self.determine_type_of_member(first))
+ second_type = get_proper_type(self.determine_type_of_member(second))
- if (isinstance(first_type, FunctionLike) and
- isinstance(second_type, FunctionLike)):
+ if isinstance(first_type, FunctionLike) and isinstance(second_type, FunctionLike):
if first_type.is_type_obj() and second_type.is_type_obj():
# For class objects only check the subtype relationship of the classes,
# since we allow incompatible overrides of '__init__'/'__new__'
- ok = is_subtype(left=fill_typevars_with_any(first_type.type_object()),
- right=fill_typevars_with_any(second_type.type_object()))
+ ok = is_subtype(
+ left=fill_typevars_with_any(first_type.type_object()),
+ right=fill_typevars_with_any(second_type.type_object()),
+ )
else:
# First bind/map method types when necessary.
first_sig = self.bind_and_map_method(first, first_type, ctx, base1)
second_sig = self.bind_and_map_method(second, second_type, ctx, base2)
ok = is_subtype(first_sig, second_sig, ignore_pos_arg_names=True)
elif first_type and second_type:
+ if isinstance(first.node, Var):
+ first_type = expand_self_type(first.node, first_type, fill_typevars(ctx))
+ if isinstance(second.node, Var):
+ second_type = expand_self_type(second.node, second_type, fill_typevars(ctx))
ok = is_equivalent(first_type, second_type)
if not ok:
second_node = base2[name].node
- if isinstance(second_node, Decorator) and second_node.func.is_property:
- ok = is_subtype(first_type, cast(CallableType, second_type).ret_type)
+ if (
+ isinstance(second_type, FunctionLike)
+ and second_node is not None
+ and is_property(second_node)
+ ):
+ second_type = get_property_type(second_type)
+ ok = is_subtype(first_type, second_type)
else:
if first_type is None:
self.msg.cannot_determine_type_in_base(name, base1.name, ctx)
@@ -2130,8 +2522,36 @@ class C(B, A[int]): ... # this is unsafe because...
if isinstance(second.node, Var) and second.node.allow_incompatible_override:
ok = True
if not ok:
- self.msg.base_class_definitions_incompatible(name, base1, base2,
- ctx)
+ self.msg.base_class_definitions_incompatible(name, base1, base2, ctx)
+
+ def check_metaclass_compatibility(self, typ: TypeInfo) -> None:
+ """Ensures that metaclasses of all parent types are compatible."""
+ if (
+ typ.is_metaclass()
+ or typ.is_protocol
+ or typ.is_named_tuple
+ or typ.is_enum
+ or typ.typeddict_type is not None
+ ):
+ return # Reasonable exceptions from this check
+
+ metaclasses = [
+ entry.metaclass_type
+ for entry in typ.mro[1:-1]
+ if entry.metaclass_type
+ and not is_named_instance(entry.metaclass_type, "builtins.type")
+ ]
+ if not metaclasses:
+ return
+ if typ.metaclass_type is not None and all(
+ is_subtype(typ.metaclass_type, meta) for meta in metaclasses
+ ):
+ return
+ self.fail(
+ "Metaclass conflict: the metaclass of a derived class must be "
+ "a (non-strict) subclass of the metaclasses of all its bases",
+ typ,
+ )
def visit_import_from(self, node: ImportFrom) -> None:
self.check_import(node)
@@ -2139,8 +2559,8 @@ def visit_import_from(self, node: ImportFrom) -> None:
def visit_import_all(self, node: ImportAll) -> None:
self.check_import(node)
- def visit_import(self, s: Import) -> None:
- pass
+ def visit_import(self, node: Import) -> None:
+ self.check_import(node)
def check_import(self, node: ImportBase) -> None:
for assign in node.assignments:
@@ -2149,11 +2569,17 @@ def check_import(self, node: ImportBase) -> None:
if lvalue_type is None:
# TODO: This is broken.
lvalue_type = AnyType(TypeOfAny.special_form)
- message = '{} "{}"'.format(message_registry.INCOMPATIBLE_IMPORT_OF,
- cast(NameExpr, assign.rvalue).name)
- self.check_simple_assignment(lvalue_type, assign.rvalue, node,
- msg=message, lvalue_name='local name',
- rvalue_name='imported name')
+ message = message_registry.INCOMPATIBLE_IMPORT_OF.format(
+ cast(NameExpr, assign.rvalue).name
+ )
+ self.check_simple_assignment(
+ lvalue_type,
+ assign.rvalue,
+ node,
+ msg=message,
+ lvalue_name="local name",
+ rvalue_name="imported name",
+ )
#
# Statements
@@ -2174,9 +2600,12 @@ def visit_block(self, b: Block) -> None:
self.accept(s)
def should_report_unreachable_issues(self) -> bool:
- return (self.in_checked_function()
- and self.options.warn_unreachable
- and not self.binder.is_unreachable_warning_suppressed())
+ return (
+ self.in_checked_function()
+ and self.options.warn_unreachable
+ and not self.current_node_deferred
+ and not self.binder.is_unreachable_warning_suppressed()
+ )
def is_raising_or_empty(self, s: Statement) -> bool:
"""Returns 'true' if the given statement either throws an error of some kind
@@ -2195,9 +2624,12 @@ def is_raising_or_empty(self, s: Statement) -> bool:
if isinstance(s.expr, EllipsisExpr):
return True
elif isinstance(s.expr, CallExpr):
- with self.expr_checker.msg.disable_errors():
- typ = get_proper_type(self.expr_checker.accept(
- s.expr, allow_none_return=True, always_allow_any=True))
+ with self.expr_checker.msg.filter_errors():
+ typ = get_proper_type(
+ self.expr_checker.accept(
+ s.expr, allow_none_return=True, always_allow_any=True
+ )
+ )
if isinstance(typ, UninhabitedType):
return True
@@ -2218,14 +2650,17 @@ def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
if s.is_alias_def:
self.check_type_alias_rvalue(s)
- if (s.type is not None and
- self.options.disallow_any_unimported and
- has_any_from_unimported_type(s.type)):
+ if (
+ s.type is not None
+ and self.options.disallow_any_unimported
+ and has_any_from_unimported_type(s.type)
+ ):
if isinstance(s.lvalues[-1], TupleExpr):
# This is a multiple assignment. Instead of figuring out which type is problematic,
# give a generic error message.
- self.msg.unimported_type_becomes_any("A type on this line",
- AnyType(TypeOfAny.special_form), s)
+ self.msg.unimported_type_becomes_any(
+ "A type on this line", AnyType(TypeOfAny.special_form), s
+ )
else:
self.msg.unimported_type_becomes_any("Type of variable", s.type, s)
check_for_explicit_any(s.type, self.options, self.is_typeshed_stub, self.msg, context=s)
@@ -2233,83 +2668,78 @@ def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
if len(s.lvalues) > 1:
# Chained assignment (e.g. x = y = ...).
# Make sure that rvalue type will not be reinferred.
- if s.rvalue not in self.type_map:
+ if not self.has_type(s.rvalue):
self.expr_checker.accept(s.rvalue)
- rvalue = self.temp_node(self.type_map[s.rvalue], s)
+ rvalue = self.temp_node(self.lookup_type(s.rvalue), s)
for lv in s.lvalues[:-1]:
with self.enter_final_context(s.is_final_def):
self.check_assignment(lv, rvalue, s.type is None)
self.check_final(s)
- if (s.is_final_def and s.type and not has_no_typevars(s.type)
- and self.scope.active_class() is not None):
+ if (
+ s.is_final_def
+ and s.type
+ and not has_no_typevars(s.type)
+ and self.scope.active_class() is not None
+ ):
self.fail(message_registry.DEPENDENT_FINAL_IN_CLASS_BODY, s)
- def check_type_alias_rvalue(self, s: AssignmentStmt) -> None:
- if not (self.is_stub and isinstance(s.rvalue, OpExpr) and s.rvalue.op == '|'):
- # We do this mostly for compatibility with old semantic analyzer.
- # TODO: should we get rid of this?
- alias_type = self.expr_checker.accept(s.rvalue)
- else:
- # Avoid type checking 'X | Y' in stubs, since there can be errors
- # on older Python targets.
- alias_type = AnyType(TypeOfAny.special_form)
-
- def accept_items(e: Expression) -> None:
- if isinstance(e, OpExpr) and e.op == '|':
- accept_items(e.left)
- accept_items(e.right)
- else:
- # Nested union types have been converted to type context
- # in semantic analysis (such as in 'list[int | str]'),
- # so we don't need to deal with them here.
- self.expr_checker.accept(e)
+ if s.unanalyzed_type and not self.in_checked_function():
+ self.msg.annotation_in_unchecked_function(context=s)
- accept_items(s.rvalue)
+ def check_type_alias_rvalue(self, s: AssignmentStmt) -> None:
+ alias_type = self.expr_checker.accept(s.rvalue)
self.store_type(s.lvalues[-1], alias_type)
- def check_assignment(self, lvalue: Lvalue, rvalue: Expression, infer_lvalue_type: bool = True,
- new_syntax: bool = False) -> None:
+ def check_assignment(
+ self,
+ lvalue: Lvalue,
+ rvalue: Expression,
+ infer_lvalue_type: bool = True,
+ new_syntax: bool = False,
+ ) -> None:
"""Type check a single assignment: lvalue = rvalue."""
if isinstance(lvalue, TupleExpr) or isinstance(lvalue, ListExpr):
- self.check_assignment_to_multiple_lvalues(lvalue.items, rvalue, rvalue,
- infer_lvalue_type)
+ self.check_assignment_to_multiple_lvalues(
+ lvalue.items, rvalue, rvalue, infer_lvalue_type
+ )
else:
- self.try_infer_partial_generic_type_from_assignment(lvalue, rvalue, '=')
+ self.try_infer_partial_generic_type_from_assignment(lvalue, rvalue, "=")
lvalue_type, index_lvalue, inferred = self.check_lvalue(lvalue)
# If we're assigning to __getattr__ or similar methods, check that the signature is
# valid.
if isinstance(lvalue, NameExpr) and lvalue.node:
name = lvalue.node.name
- if name in ('__setattr__', '__getattribute__', '__getattr__'):
+ if name in ("__setattr__", "__getattribute__", "__getattr__"):
# If an explicit type is given, use that.
if lvalue_type:
signature = lvalue_type
else:
signature = self.expr_checker.accept(rvalue)
if signature:
- if name == '__setattr__':
+ if name == "__setattr__":
self.check_setattr_method(signature, lvalue)
else:
self.check_getattr_method(signature, lvalue, name)
- if name == '__slots__':
+ if name == "__slots__":
typ = lvalue_type or self.expr_checker.accept(rvalue)
self.check_slots_definition(typ, lvalue)
- if name == '__match_args__' and inferred is not None:
+ if name == "__match_args__" and inferred is not None:
typ = self.expr_checker.accept(rvalue)
self.check_match_args(inferred, typ, lvalue)
# Defer PartialType's super type checking.
- if (isinstance(lvalue, RefExpr) and
- not (isinstance(lvalue_type, PartialType) and
- lvalue_type.type is None) and
- not (isinstance(lvalue, NameExpr) and lvalue.name == '__match_args__')):
+ if (
+ isinstance(lvalue, RefExpr)
+ and not (isinstance(lvalue_type, PartialType) and lvalue_type.type is None)
+ and not (isinstance(lvalue, NameExpr) and lvalue.name == "__match_args__")
+ ):
if self.check_compatibility_all_supers(lvalue, lvalue_type, rvalue):
# We hit an error on this line; don't check for any others
return
- if isinstance(lvalue, MemberExpr) and lvalue.name == '__match_args__':
+ if isinstance(lvalue, MemberExpr) and lvalue.name == "__match_args__":
self.fail(message_registry.CANNOT_MODIFY_MATCH_ARGS, lvalue)
if lvalue_type:
@@ -2321,15 +2751,14 @@ def check_assignment(self, lvalue: Lvalue, rvalue: Expression, infer_lvalue_type
# None initializers preserve the partial None type.
return
- if is_valid_inferred_type(rvalue_type):
- var = lvalue_type.var
+ var = lvalue_type.var
+ if is_valid_inferred_type(rvalue_type, is_lvalue_final=var.is_final):
partial_types = self.find_partial_types(var)
if partial_types is not None:
if not self.current_node_deferred:
# Partial type can't be final, so strip any literal values.
rvalue_type = remove_instance_last_known_values(rvalue_type)
- inferred_type = make_simplified_union(
- [rvalue_type, NoneType()])
+ inferred_type = make_simplified_union([rvalue_type, NoneType()])
self.set_inferred_type(var, lvalue, inferred_type)
else:
var.type = None
@@ -2340,22 +2769,27 @@ def check_assignment(self, lvalue: Lvalue, rvalue: Expression, infer_lvalue_type
# an error will be reported elsewhere.
self.infer_partial_type(lvalue_type.var, lvalue, rvalue_type)
# Handle None PartialType's super type checking here, after it's resolved.
- if (isinstance(lvalue, RefExpr) and
- self.check_compatibility_all_supers(lvalue, lvalue_type, rvalue)):
+ if isinstance(lvalue, RefExpr) and self.check_compatibility_all_supers(
+ lvalue, lvalue_type, rvalue
+ ):
# We hit an error on this line; don't check for any others
return
- elif (is_literal_none(rvalue) and
- isinstance(lvalue, NameExpr) and
- isinstance(lvalue.node, Var) and
- lvalue.node.is_initialized_in_class and
- not new_syntax):
+ elif (
+ is_literal_none(rvalue)
+ and isinstance(lvalue, NameExpr)
+ and isinstance(lvalue.node, Var)
+ and lvalue.node.is_initialized_in_class
+ and not new_syntax
+ ):
# Allow None's to be assigned to class variables with non-Optional types.
rvalue_type = lvalue_type
- elif (isinstance(lvalue, MemberExpr) and
- lvalue.kind is None): # Ignore member access to modules
+ elif (
+ isinstance(lvalue, MemberExpr) and lvalue.kind is None
+ ): # Ignore member access to modules
instance_type = self.expr_checker.accept(lvalue.expr)
rvalue_type, lvalue_type, infer_lvalue_type = self.check_member_assignment(
- instance_type, lvalue_type, rvalue, context=rvalue)
+ instance_type, lvalue_type, rvalue, context=rvalue
+ )
else:
# Hacky special case for assigning a literal None
# to a variable defined in a previous if
@@ -2363,13 +2797,15 @@ def check_assignment(self, lvalue: Lvalue, rvalue: Expression, infer_lvalue_type
# make the type optional. This is somewhat
# unpleasant, and a generalization of this would
# be an improvement!
- if (is_literal_none(rvalue) and
- isinstance(lvalue, NameExpr) and
- lvalue.kind == LDEF and
- isinstance(lvalue.node, Var) and
- lvalue.node.type and
- lvalue.node in self.var_decl_frames and
- not isinstance(get_proper_type(lvalue_type), AnyType)):
+ if (
+ is_literal_none(rvalue)
+ and isinstance(lvalue, NameExpr)
+ and lvalue.kind == LDEF
+ and isinstance(lvalue.node, Var)
+ and lvalue.node.type
+ and lvalue.node in self.var_decl_frames
+ and not isinstance(get_proper_type(lvalue_type), AnyType)
+ ):
decl_frame_map = self.var_decl_frames[lvalue.node]
# Check if the nearest common ancestor frame for the definition site
# and the current site is the enclosing frame of an if/elif/else block.
@@ -2382,21 +2818,26 @@ def check_assignment(self, lvalue: Lvalue, rvalue: Expression, infer_lvalue_type
lvalue_type = make_optional_type(lvalue_type)
self.set_inferred_type(lvalue.node, lvalue, lvalue_type)
- rvalue_type = self.check_simple_assignment(lvalue_type, rvalue, context=rvalue,
- code=codes.ASSIGNMENT)
+ rvalue_type = self.check_simple_assignment(lvalue_type, rvalue, context=rvalue)
# Special case: only non-abstract non-protocol classes can be assigned to
# variables with explicit type Type[A], where A is protocol or abstract.
- rvalue_type = get_proper_type(rvalue_type)
- lvalue_type = get_proper_type(lvalue_type)
- if (isinstance(rvalue_type, CallableType) and rvalue_type.is_type_obj() and
- (rvalue_type.type_object().is_abstract or
- rvalue_type.type_object().is_protocol) and
- isinstance(lvalue_type, TypeType) and
- isinstance(lvalue_type.item, Instance) and
- (lvalue_type.item.type.is_abstract or
- lvalue_type.item.type.is_protocol)):
- self.msg.concrete_only_assign(lvalue_type, rvalue)
+ p_rvalue_type = get_proper_type(rvalue_type)
+ p_lvalue_type = get_proper_type(lvalue_type)
+ if (
+ isinstance(p_rvalue_type, CallableType)
+ and p_rvalue_type.is_type_obj()
+ and (
+ p_rvalue_type.type_object().is_abstract
+ or p_rvalue_type.type_object().is_protocol
+ )
+ and isinstance(p_lvalue_type, TypeType)
+ and isinstance(p_lvalue_type.item, Instance)
+ and (
+ p_lvalue_type.item.type.is_abstract or p_lvalue_type.item.type.is_protocol
+ )
+ ):
+ self.msg.concrete_only_assign(p_lvalue_type, rvalue)
return
if rvalue_type and infer_lvalue_type and not isinstance(lvalue_type, PartialType):
# Don't use type binder for definitions of special forms, like named tuples.
@@ -2407,23 +2848,45 @@ def check_assignment(self, lvalue: Lvalue, rvalue: Expression, infer_lvalue_type
self.check_indexed_assignment(index_lvalue, rvalue, lvalue)
if inferred:
- rvalue_type = self.expr_checker.accept(rvalue)
- if not (inferred.is_final or (isinstance(lvalue, NameExpr) and
- lvalue.name == '__match_args__')):
+ type_context = self.get_variable_type_context(inferred)
+ rvalue_type = self.expr_checker.accept(rvalue, type_context=type_context)
+ if not (
+ inferred.is_final
+ or (isinstance(lvalue, NameExpr) and lvalue.name == "__match_args__")
+ ):
rvalue_type = remove_instance_last_known_values(rvalue_type)
self.infer_variable_type(inferred, lvalue, rvalue_type, rvalue)
self.check_assignment_to_slots(lvalue)
# (type, operator) tuples for augmented assignments supported with partial types
- partial_type_augmented_ops: Final = {
- ('builtins.list', '+'),
- ('builtins.set', '|'),
- }
-
- def try_infer_partial_generic_type_from_assignment(self,
- lvalue: Lvalue,
- rvalue: Expression,
- op: str) -> None:
+ partial_type_augmented_ops: Final = {("builtins.list", "+"), ("builtins.set", "|")}
+
+ def get_variable_type_context(self, inferred: Var) -> Type | None:
+ type_contexts = []
+ if inferred.info:
+ for base in inferred.info.mro[1:]:
+ base_type, base_node = self.lvalue_type_from_base(inferred, base)
+ if (
+ base_type
+ and not (isinstance(base_node, Var) and base_node.invalid_partial_type)
+ and not isinstance(base_type, PartialType)
+ ):
+ type_contexts.append(base_type)
+ # Use most derived supertype as type context if available.
+ if not type_contexts:
+ return None
+ candidate = type_contexts[0]
+ for other in type_contexts:
+ if is_proper_subtype(other, candidate):
+ candidate = other
+ elif not is_subtype(candidate, other):
+ # Multiple incompatible candidates, cannot use any of them as context.
+ return None
+ return candidate
+
+ def try_infer_partial_generic_type_from_assignment(
+ self, lvalue: Lvalue, rvalue: Expression, op: str
+ ) -> None:
"""Try to infer a precise type for partial generic type from assignment.
'op' is '=' for normal assignment and a binary operator ('+', ...) for
@@ -2436,9 +2899,11 @@ def try_infer_partial_generic_type_from_assignment(self,
x = [1] # Infer List[int] as type of 'x'
"""
var = None
- if (isinstance(lvalue, NameExpr)
- and isinstance(lvalue.node, Var)
- and isinstance(lvalue.node.type, PartialType)):
+ if (
+ isinstance(lvalue, NameExpr)
+ and isinstance(lvalue.node, Var)
+ and isinstance(lvalue.node.type, PartialType)
+ ):
var = lvalue.node
elif isinstance(lvalue, MemberExpr):
var = self.expr_checker.get_partial_self_var(lvalue)
@@ -2448,7 +2913,7 @@ def try_infer_partial_generic_type_from_assignment(self,
if typ.type is None:
return
# Return if this is an unsupported augmented assignment.
- if op != '=' and (typ.type.fullname, op) not in self.partial_type_augmented_ops:
+ if op != "=" and (typ.type.fullname, op) not in self.partial_type_augmented_ops:
return
# TODO: some logic here duplicates the None partial type counterpart
# inlined in check_assignment(), see #8043.
@@ -2465,26 +2930,25 @@ def try_infer_partial_generic_type_from_assignment(self,
var.type = fill_typevars_with_any(typ.type)
del partial_types[var]
- def check_compatibility_all_supers(self, lvalue: RefExpr, lvalue_type: Optional[Type],
- rvalue: Expression) -> bool:
+ def check_compatibility_all_supers(
+ self, lvalue: RefExpr, lvalue_type: Type | None, rvalue: Expression
+ ) -> bool:
lvalue_node = lvalue.node
# Check if we are a class variable with at least one base class
- if (isinstance(lvalue_node, Var) and
- lvalue.kind in (MDEF, None) and # None for Vars defined via self
- len(lvalue_node.info.bases) > 0):
+ if (
+ isinstance(lvalue_node, Var)
+ and lvalue.kind in (MDEF, None)
+ and len(lvalue_node.info.bases) > 0 # None for Vars defined via self
+ ):
for base in lvalue_node.info.mro[1:]:
tnode = base.names.get(lvalue_node.name)
if tnode is not None:
- if not self.check_compatibility_classvar_super(lvalue_node,
- base,
- tnode.node):
+ if not self.check_compatibility_classvar_super(lvalue_node, base, tnode.node):
# Show only one error per variable
break
- if not self.check_compatibility_final_super(lvalue_node,
- base,
- tnode.node):
+ if not self.check_compatibility_final_super(lvalue_node, base, tnode.node):
# Show only one error per variable
break
@@ -2495,24 +2959,23 @@ def check_compatibility_all_supers(self, lvalue: RefExpr, lvalue_type: Optional[
# The type of "__slots__" and some other attributes usually doesn't need to
# be compatible with a base class. We'll still check the type of "__slots__"
# against "object" as an exception.
- if (isinstance(lvalue_node, Var) and lvalue_node.allow_incompatible_override and
- not (lvalue_node.name == "__slots__" and
- base.fullname == "builtins.object")):
+ if lvalue_node.allow_incompatible_override and not (
+ lvalue_node.name == "__slots__" and base.fullname == "builtins.object"
+ ):
continue
if is_private(lvalue_node.name):
continue
base_type, base_node = self.lvalue_type_from_base(lvalue_node, base)
+ if isinstance(base_type, PartialType):
+ base_type = None
if base_type:
assert base_node is not None
- if not self.check_compatibility_super(lvalue,
- lvalue_type,
- rvalue,
- base,
- base_type,
- base_node):
+ if not self.check_compatibility_super(
+ lvalue, lvalue_type, rvalue, base, base_type, base_node
+ ):
# Only show one error per variable; even if other
# base classes are also incompatible
return True
@@ -2522,9 +2985,15 @@ def check_compatibility_all_supers(self, lvalue: RefExpr, lvalue_type: Optional[
break
return False
- def check_compatibility_super(self, lvalue: RefExpr, lvalue_type: Optional[Type],
- rvalue: Expression, base: TypeInfo, base_type: Type,
- base_node: Node) -> bool:
+ def check_compatibility_super(
+ self,
+ lvalue: RefExpr,
+ lvalue_type: Type | None,
+ rvalue: Expression,
+ base: TypeInfo,
+ base_type: Type,
+ base_node: Node,
+ ) -> bool:
lvalue_node = lvalue.node
assert isinstance(lvalue_node, Var)
@@ -2546,8 +3015,7 @@ def check_compatibility_super(self, lvalue: RefExpr, lvalue_type: Optional[Type]
base_type = get_proper_type(base_type)
compare_type = get_proper_type(compare_type)
if compare_type:
- if (isinstance(base_type, CallableType) and
- isinstance(compare_type, CallableType)):
+ if isinstance(base_type, CallableType) and isinstance(compare_type, CallableType):
base_static = is_node_static(base_node)
compare_static = is_node_static(compare_node)
@@ -2572,15 +3040,19 @@ def check_compatibility_super(self, lvalue: RefExpr, lvalue_type: Optional[Type]
if base_static and compare_static:
lvalue_node.is_staticmethod = True
- return self.check_subtype(compare_type, base_type, rvalue,
- message_registry.INCOMPATIBLE_TYPES_IN_ASSIGNMENT,
- 'expression has type',
- 'base class "%s" defined the type as' % base.name,
- code=codes.ASSIGNMENT)
+ return self.check_subtype(
+ compare_type,
+ base_type,
+ rvalue,
+ message_registry.INCOMPATIBLE_TYPES_IN_ASSIGNMENT,
+ "expression has type",
+ f'base class "{base.name}" defined the type as',
+ )
return True
- def lvalue_type_from_base(self, expr_node: Var,
- base: TypeInfo) -> Tuple[Optional[Type], Optional[Node]]:
+ def lvalue_type_from_base(
+ self, expr_node: Var, base: TypeInfo
+ ) -> tuple[Type | None, Node | None]:
"""For a NameExpr that is part of a class, walk all base classes and try
to find the first class that defines a Type for the same name."""
expr_name = expr_node.name
@@ -2589,6 +3061,8 @@ def lvalue_type_from_base(self, expr_node: Var,
if base_var:
base_node = base_var.node
base_type = base_var.type
+ if isinstance(base_node, Var) and base_type is not None:
+ base_type = expand_self_type(base_node, base_type, fill_typevars(expr_node.info))
if isinstance(base_node, Decorator):
base_node = base_node.func
base_type = base_node.type
@@ -2610,8 +3084,9 @@ def lvalue_type_from_base(self, expr_node: Var,
# value, not the Callable
if base_node.is_property:
base_type = get_proper_type(base_type.ret_type)
- if isinstance(base_type, FunctionLike) and isinstance(base_node,
- OverloadedFuncDef):
+ if isinstance(base_type, FunctionLike) and isinstance(
+ base_node, OverloadedFuncDef
+ ):
# Same for properties with setter
if base_node.is_property:
base_type = base_type.items[0].ret_type
@@ -2620,8 +3095,9 @@ def lvalue_type_from_base(self, expr_node: Var,
return None, None
- def check_compatibility_classvar_super(self, node: Var,
- base: TypeInfo, base_node: Optional[Node]) -> bool:
+ def check_compatibility_classvar_super(
+ self, node: Var, base: TypeInfo, base_node: Node | None
+ ) -> bool:
if not isinstance(base_node, Var):
return True
if node.is_classvar and not base_node.is_classvar:
@@ -2632,8 +3108,9 @@ def check_compatibility_classvar_super(self, node: Var,
return False
return True
- def check_compatibility_final_super(self, node: Var,
- base: TypeInfo, base_node: Optional[Node]) -> bool:
+ def check_compatibility_final_super(
+ self, node: Var, base: TypeInfo, base_node: Node | None
+ ) -> bool:
"""Check if an assignment overrides a final attribute in a base class.
This only checks situations where either a node in base class is not a variable
@@ -2658,10 +3135,9 @@ def check_compatibility_final_super(self, node: Var,
self.check_if_final_var_override_writable(node.name, base_node, node)
return True
- def check_if_final_var_override_writable(self,
- name: str,
- base_node: Optional[Node],
- ctx: Context) -> None:
+ def check_if_final_var_override_writable(
+ self, name: str, base_node: Node | None, ctx: Context
+ ) -> None:
"""Check that a final variable doesn't override writeable attribute.
This is done to prevent situations like this:
@@ -2693,8 +3169,7 @@ def enter_final_context(self, is_final_def: bool) -> Iterator[None]:
finally:
self._is_final_def = old_ctx
- def check_final(self,
- s: Union[AssignmentStmt, OperatorAssignmentStmt, AssignmentExpr]) -> None:
+ def check_final(self, s: AssignmentStmt | OperatorAssignmentStmt | AssignmentExpr) -> None:
"""Check if this assignment does not assign to a final attribute.
This function performs the check only for name assignments at module
@@ -2711,13 +3186,19 @@ def check_final(self,
if is_final_decl and self.scope.active_class():
lv = lvs[0]
assert isinstance(lv, RefExpr)
- assert isinstance(lv.node, Var)
- if (lv.node.final_unset_in_class and not lv.node.final_set_in_init and
- not self.is_stub and # It is OK to skip initializer in stub files.
+ if lv.node is not None:
+ assert isinstance(lv.node, Var)
+ if (
+ lv.node.final_unset_in_class
+ and not lv.node.final_set_in_init
+ and not self.is_stub
+ and # It is OK to skip initializer in stub files.
# Avoid extra error messages, if there is no type in Final[...],
# then we already reported the error about missing r.h.s.
- isinstance(s, AssignmentStmt) and s.type is not None):
- self.msg.final_without_value(s)
+ isinstance(s, AssignmentStmt)
+ and s.type is not None
+ ):
+ self.msg.final_without_value(s)
for lv in lvs:
if isinstance(lv, RefExpr) and isinstance(lv.node, Var):
name = lv.node.name
@@ -2751,7 +3232,7 @@ def check_assignment_to_slots(self, lvalue: Lvalue) -> None:
if lvalue.name in inst.type.slots:
return # We are assigning to an existing slot
for base_info in inst.type.mro[:-1]:
- if base_info.names.get('__setattr__') is not None:
+ if base_info.names.get("__setattr__") is not None:
# When type has `__setattr__` defined,
# we can assign any dynamic value.
# We exclude object, because it always has `__setattr__`.
@@ -2767,14 +3248,11 @@ def check_assignment_to_slots(self, lvalue: Lvalue) -> None:
return
self.fail(
- message_registry.NAME_NOT_IN_SLOTS.format(
- lvalue.name, inst.type.fullname,
- ),
- lvalue,
+ message_registry.NAME_NOT_IN_SLOTS.format(lvalue.name, inst.type.fullname), lvalue
)
- def is_assignable_slot(self, lvalue: Lvalue, typ: Optional[Type]) -> bool:
- if getattr(lvalue, 'node', None):
+ def is_assignable_slot(self, lvalue: Lvalue, typ: Type | None) -> bool:
+ if getattr(lvalue, "node", None):
return False # This is a definition
typ = get_proper_type(typ)
@@ -2785,32 +3263,37 @@ def is_assignable_slot(self, lvalue: Lvalue, typ: Optional[Type]) -> bool:
# `__set__` special method. Like `@property` does.
# This makes assigning to properties possible,
# even without extra slot spec.
- return typ.type.get('__set__') is not None
+ return typ.type.get("__set__") is not None
if isinstance(typ, FunctionLike):
return True # Can be a property, or some other magic
if isinstance(typ, UnionType):
return all(self.is_assignable_slot(lvalue, u) for u in typ.items)
return False
- def check_assignment_to_multiple_lvalues(self, lvalues: List[Lvalue], rvalue: Expression,
- context: Context,
- infer_lvalue_type: bool = True) -> None:
+ def check_assignment_to_multiple_lvalues(
+ self,
+ lvalues: list[Lvalue],
+ rvalue: Expression,
+ context: Context,
+ infer_lvalue_type: bool = True,
+ ) -> None:
if isinstance(rvalue, TupleExpr) or isinstance(rvalue, ListExpr):
# Recursively go into Tuple or List expression rhs instead of
# using the type of rhs, because this allowed more fine grained
# control in cases like: a, b = [int, str] where rhs would get
# type List[object]
- rvalues: List[Expression] = []
- iterable_type: Optional[Type] = None
- last_idx: Optional[int] = None
+ rvalues: list[Expression] = []
+ iterable_type: Type | None = None
+ last_idx: int | None = None
for idx_rval, rval in enumerate(rvalue.items):
if isinstance(rval, StarExpr):
typs = get_proper_type(self.expr_checker.visit_star_expr(rval).type)
if isinstance(typs, TupleType):
rvalues.extend([TempNode(typ) for typ in typs.items])
elif self.type_is_iterable(typs) and isinstance(typs, Instance):
- if (iterable_type is not None
- and iterable_type != self.iterable_item_type(typs)):
+ if iterable_type is not None and iterable_type != self.iterable_item_type(
+ typs
+ ):
self.fail(message_registry.CONTIGUOUS_ITERABLE_EXPECTED, context)
else:
if last_idx is None or last_idx + 1 == idx_rval:
@@ -2820,12 +3303,11 @@ def check_assignment_to_multiple_lvalues(self, lvalues: List[Lvalue], rvalue: Ex
else:
self.fail(message_registry.CONTIGUOUS_ITERABLE_EXPECTED, context)
else:
- self.fail(message_registry.ITERABLE_TYPE_EXPECTED.format(typs),
- context)
+ self.fail(message_registry.ITERABLE_TYPE_EXPECTED.format(typs), context)
else:
rvalues.append(rval)
- iterable_start: Optional[int] = None
- iterable_end: Optional[int] = None
+ iterable_start: int | None = None
+ iterable_end: int | None = None
for i, rval in enumerate(rvalues):
if isinstance(rval, StarExpr):
typs = get_proper_type(self.expr_checker.visit_star_expr(rval).type)
@@ -2833,31 +3315,39 @@ def check_assignment_to_multiple_lvalues(self, lvalues: List[Lvalue], rvalue: Ex
if iterable_start is None:
iterable_start = i
iterable_end = i
- if (iterable_start is not None
- and iterable_end is not None
- and iterable_type is not None):
+ if (
+ iterable_start is not None
+ and iterable_end is not None
+ and iterable_type is not None
+ ):
iterable_num = iterable_end - iterable_start + 1
rvalue_needed = len(lvalues) - (len(rvalues) - iterable_num)
if rvalue_needed > 0:
- rvalues = rvalues[0: iterable_start] + [TempNode(iterable_type)
- for i in range(rvalue_needed)] + rvalues[iterable_end + 1:]
+ rvalues = (
+ rvalues[0:iterable_start]
+ + [TempNode(iterable_type) for i in range(rvalue_needed)]
+ + rvalues[iterable_end + 1 :]
+ )
if self.check_rvalue_count_in_assignment(lvalues, len(rvalues), context):
- star_index = next((i for i, lv in enumerate(lvalues) if
- isinstance(lv, StarExpr)), len(lvalues))
+ star_index = next(
+ (i for i, lv in enumerate(lvalues) if isinstance(lv, StarExpr)), len(lvalues)
+ )
left_lvs = lvalues[:star_index]
- star_lv = cast(StarExpr,
- lvalues[star_index]) if star_index != len(lvalues) else None
- right_lvs = lvalues[star_index + 1:]
+ star_lv = (
+ cast(StarExpr, lvalues[star_index]) if star_index != len(lvalues) else None
+ )
+ right_lvs = lvalues[star_index + 1 :]
left_rvs, star_rvs, right_rvs = self.split_around_star(
- rvalues, star_index, len(lvalues))
+ rvalues, star_index, len(lvalues)
+ )
lr_pairs = list(zip(left_lvs, left_rvs))
if star_lv:
rv_list = ListExpr(star_rvs)
- rv_list.set_line(rvalue.get_line())
+ rv_list.set_line(rvalue)
lr_pairs.append((star_lv.expr, rv_list))
lr_pairs.extend(zip(right_lvs, right_rvs))
@@ -2866,30 +3356,36 @@ def check_assignment_to_multiple_lvalues(self, lvalues: List[Lvalue], rvalue: Ex
else:
self.check_multi_assignment(lvalues, rvalue, context, infer_lvalue_type)
- def check_rvalue_count_in_assignment(self, lvalues: List[Lvalue], rvalue_count: int,
- context: Context) -> bool:
+ def check_rvalue_count_in_assignment(
+ self, lvalues: list[Lvalue], rvalue_count: int, context: Context
+ ) -> bool:
if any(isinstance(lvalue, StarExpr) for lvalue in lvalues):
if len(lvalues) - 1 > rvalue_count:
- self.msg.wrong_number_values_to_unpack(rvalue_count,
- len(lvalues) - 1, context)
+ self.msg.wrong_number_values_to_unpack(rvalue_count, len(lvalues) - 1, context)
return False
elif rvalue_count != len(lvalues):
self.msg.wrong_number_values_to_unpack(rvalue_count, len(lvalues), context)
return False
return True
- def check_multi_assignment(self, lvalues: List[Lvalue],
- rvalue: Expression,
- context: Context,
- infer_lvalue_type: bool = True,
- rv_type: Optional[Type] = None,
- undefined_rvalue: bool = False) -> None:
+ def check_multi_assignment(
+ self,
+ lvalues: list[Lvalue],
+ rvalue: Expression,
+ context: Context,
+ infer_lvalue_type: bool = True,
+ rv_type: Type | None = None,
+ undefined_rvalue: bool = False,
+ ) -> None:
"""Check the assignment of one rvalue to a number of lvalues."""
# Infer the type of an ordinary rvalue expression.
# TODO: maybe elsewhere; redundant.
rvalue_type = get_proper_type(rv_type or self.expr_checker.accept(rvalue))
+ if isinstance(rvalue_type, TypeVarLikeType):
+ rvalue_type = get_proper_type(rvalue_type.upper_bound)
+
if isinstance(rvalue_type, UnionType):
# If this is an Optional type in non-strict Optional code, unwrap it.
relevant_items = rvalue_type.relevant_items()
@@ -2900,24 +3396,33 @@ def check_multi_assignment(self, lvalues: List[Lvalue],
for lv in lvalues:
if isinstance(lv, StarExpr):
lv = lv.expr
- temp_node = self.temp_node(AnyType(TypeOfAny.from_another_any,
- source_any=rvalue_type), context)
+ temp_node = self.temp_node(
+ AnyType(TypeOfAny.from_another_any, source_any=rvalue_type), context
+ )
self.check_assignment(lv, temp_node, infer_lvalue_type)
elif isinstance(rvalue_type, TupleType):
- self.check_multi_assignment_from_tuple(lvalues, rvalue, rvalue_type,
- context, undefined_rvalue, infer_lvalue_type)
+ self.check_multi_assignment_from_tuple(
+ lvalues, rvalue, rvalue_type, context, undefined_rvalue, infer_lvalue_type
+ )
elif isinstance(rvalue_type, UnionType):
- self.check_multi_assignment_from_union(lvalues, rvalue, rvalue_type, context,
- infer_lvalue_type)
- elif isinstance(rvalue_type, Instance) and rvalue_type.type.fullname == 'builtins.str':
+ self.check_multi_assignment_from_union(
+ lvalues, rvalue, rvalue_type, context, infer_lvalue_type
+ )
+ elif isinstance(rvalue_type, Instance) and rvalue_type.type.fullname == "builtins.str":
self.msg.unpacking_strings_disallowed(context)
else:
- self.check_multi_assignment_from_iterable(lvalues, rvalue_type,
- context, infer_lvalue_type)
+ self.check_multi_assignment_from_iterable(
+ lvalues, rvalue_type, context, infer_lvalue_type
+ )
- def check_multi_assignment_from_union(self, lvalues: List[Expression], rvalue: Expression,
- rvalue_type: UnionType, context: Context,
- infer_lvalue_type: bool) -> None:
+ def check_multi_assignment_from_union(
+ self,
+ lvalues: list[Expression],
+ rvalue: Expression,
+ rvalue_type: UnionType,
+ context: Context,
+ infer_lvalue_type: bool,
+ ) -> None:
"""Check assignment to multiple lvalue targets when rvalue type is a Union[...].
For example:
@@ -2931,17 +3436,24 @@ def check_multi_assignment_from_union(self, lvalues: List[Expression], rvalue: E
for binder.
"""
self.no_partial_types = True
- transposed: Tuple[List[Type], ...] = tuple([] for _ in self.flatten_lvalues(lvalues))
+ transposed: tuple[list[Type], ...] = tuple([] for _ in self.flatten_lvalues(lvalues))
# Notify binder that we want to defer bindings and instead collect types.
with self.binder.accumulate_type_assignments() as assignments:
for item in rvalue_type.items:
# Type check the assignment separately for each union item and collect
# the inferred lvalue types for each union item.
- self.check_multi_assignment(lvalues, rvalue, context,
- infer_lvalue_type=infer_lvalue_type,
- rv_type=item, undefined_rvalue=True)
+ self.check_multi_assignment(
+ lvalues,
+ rvalue,
+ context,
+ infer_lvalue_type=infer_lvalue_type,
+ rv_type=item,
+ undefined_rvalue=True,
+ )
for t, lv in zip(transposed, self.flatten_lvalues(lvalues)):
- t.append(self.type_map.pop(lv, AnyType(TypeOfAny.special_form)))
+ # We can access _type_maps directly since temporary type maps are
+ # only created within expressions.
+ t.append(self._type_maps[0].pop(lv, AnyType(TypeOfAny.special_form)))
union_types = tuple(make_simplified_union(col) for col in transposed)
for expr, items in assignments.items():
# Bind a union of types collected in 'assignments' to every expression.
@@ -2950,17 +3462,18 @@ def check_multi_assignment_from_union(self, lvalues: List[Expression], rvalue: E
# TODO: See todo in binder.py, ConditionalTypeBinder.assign_type
# It's unclear why the 'declared_type' param is sometimes 'None'
- clean_items: List[Tuple[Type, Type]] = []
+ clean_items: list[tuple[Type, Type]] = []
for type, declared_type in items:
assert declared_type is not None
clean_items.append((type, declared_type))
- # TODO: fix signature of zip() in typeshed.
- types, declared_types = cast(Any, zip)(*clean_items)
- self.binder.assign_type(expr,
- make_simplified_union(list(types)),
- make_simplified_union(list(declared_types)),
- False)
+ types, declared_types = zip(*clean_items)
+ self.binder.assign_type(
+ expr,
+ make_simplified_union(list(types)),
+ make_simplified_union(list(declared_types)),
+ False,
+ )
for union, lv in zip(union_types, self.flatten_lvalues(lvalues)):
# Properly store the inferred types.
_1, _2, inferred = self.check_lvalue(lv)
@@ -2970,8 +3483,8 @@ def check_multi_assignment_from_union(self, lvalues: List[Expression], rvalue: E
self.store_type(lv, union)
self.no_partial_types = False
- def flatten_lvalues(self, lvalues: List[Expression]) -> List[Expression]:
- res: List[Expression] = []
+ def flatten_lvalues(self, lvalues: list[Expression]) -> list[Expression]:
+ res: list[Expression] = []
for lv in lvalues:
if isinstance(lv, (TupleExpr, ListExpr)):
res.extend(self.flatten_lvalues(lv.items))
@@ -2981,23 +3494,30 @@ def flatten_lvalues(self, lvalues: List[Expression]) -> List[Expression]:
res.append(lv)
return res
- def check_multi_assignment_from_tuple(self, lvalues: List[Lvalue], rvalue: Expression,
- rvalue_type: TupleType, context: Context,
- undefined_rvalue: bool,
- infer_lvalue_type: bool = True) -> None:
+ def check_multi_assignment_from_tuple(
+ self,
+ lvalues: list[Lvalue],
+ rvalue: Expression,
+ rvalue_type: TupleType,
+ context: Context,
+ undefined_rvalue: bool,
+ infer_lvalue_type: bool = True,
+ ) -> None:
if self.check_rvalue_count_in_assignment(lvalues, len(rvalue_type.items), context):
- star_index = next((i for i, lv in enumerate(lvalues)
- if isinstance(lv, StarExpr)), len(lvalues))
+ star_index = next(
+ (i for i, lv in enumerate(lvalues) if isinstance(lv, StarExpr)), len(lvalues)
+ )
left_lvs = lvalues[:star_index]
star_lv = cast(StarExpr, lvalues[star_index]) if star_index != len(lvalues) else None
- right_lvs = lvalues[star_index + 1:]
+ right_lvs = lvalues[star_index + 1 :]
if not undefined_rvalue:
# Infer rvalue again, now in the correct type context.
lvalue_type = self.lvalue_type_for_inference(lvalues, rvalue_type)
- reinferred_rvalue_type = get_proper_type(self.expr_checker.accept(rvalue,
- lvalue_type))
+ reinferred_rvalue_type = get_proper_type(
+ self.expr_checker.accept(rvalue, lvalue_type)
+ )
if isinstance(reinferred_rvalue_type, UnionType):
# If this is an Optional type in non-strict Optional code, unwrap it.
@@ -3005,9 +3525,9 @@ def check_multi_assignment_from_tuple(self, lvalues: List[Lvalue], rvalue: Expre
if len(relevant_items) == 1:
reinferred_rvalue_type = get_proper_type(relevant_items[0])
if isinstance(reinferred_rvalue_type, UnionType):
- self.check_multi_assignment_from_union(lvalues, rvalue,
- reinferred_rvalue_type, context,
- infer_lvalue_type)
+ self.check_multi_assignment_from_union(
+ lvalues, rvalue, reinferred_rvalue_type, context, infer_lvalue_type
+ )
return
if isinstance(reinferred_rvalue_type, AnyType):
# We can get Any if the current node is
@@ -3021,30 +3541,34 @@ def check_multi_assignment_from_tuple(self, lvalues: List[Lvalue], rvalue: Expre
rvalue_type = reinferred_rvalue_type
left_rv_types, star_rv_types, right_rv_types = self.split_around_star(
- rvalue_type.items, star_index, len(lvalues))
+ rvalue_type.items, star_index, len(lvalues)
+ )
for lv, rv_type in zip(left_lvs, left_rv_types):
self.check_assignment(lv, self.temp_node(rv_type, context), infer_lvalue_type)
if star_lv:
- list_expr = ListExpr([self.temp_node(rv_type, context)
- for rv_type in star_rv_types])
- list_expr.set_line(context.get_line())
+ list_expr = ListExpr(
+ [self.temp_node(rv_type, context) for rv_type in star_rv_types]
+ )
+ list_expr.set_line(context)
self.check_assignment(star_lv.expr, list_expr, infer_lvalue_type)
for lv, rv_type in zip(right_lvs, right_rv_types):
self.check_assignment(lv, self.temp_node(rv_type, context), infer_lvalue_type)
- def lvalue_type_for_inference(self, lvalues: List[Lvalue], rvalue_type: TupleType) -> Type:
- star_index = next((i for i, lv in enumerate(lvalues)
- if isinstance(lv, StarExpr)), len(lvalues))
+ def lvalue_type_for_inference(self, lvalues: list[Lvalue], rvalue_type: TupleType) -> Type:
+ star_index = next(
+ (i for i, lv in enumerate(lvalues) if isinstance(lv, StarExpr)), len(lvalues)
+ )
left_lvs = lvalues[:star_index]
star_lv = cast(StarExpr, lvalues[star_index]) if star_index != len(lvalues) else None
- right_lvs = lvalues[star_index + 1:]
+ right_lvs = lvalues[star_index + 1 :]
left_rv_types, star_rv_types, right_rv_types = self.split_around_star(
- rvalue_type.items, star_index, len(lvalues))
+ rvalue_type.items, star_index, len(lvalues)
+ )
- type_parameters: List[Type] = []
+ type_parameters: list[Type] = []
- def append_types_for_inference(lvs: List[Expression], rv_types: List[Type]) -> None:
+ def append_types_for_inference(lvs: list[Expression], rv_types: list[Type]) -> None:
for lv, rv_type in zip(lvs, rv_types):
sub_lvalue_type, index_expr, inferred = self.check_lvalue(lv)
if sub_lvalue_type and not isinstance(sub_lvalue_type, PartialType):
@@ -3067,10 +3591,11 @@ def append_types_for_inference(lvs: List[Expression], rv_types: List[Type]) -> N
append_types_for_inference(right_lvs, right_rv_types)
- return TupleType(type_parameters, self.named_type('builtins.tuple'))
+ return TupleType(type_parameters, self.named_type("builtins.tuple"))
- def split_around_star(self, items: List[T], star_index: int,
- length: int) -> Tuple[List[T], List[T], List[T]]:
+ def split_around_star(
+ self, items: list[T], star_index: int, length: int
+ ) -> tuple[list[T], list[T], list[T]]:
"""Splits a list of items in three to match another list of length 'length'
that contains a starred expression at 'star_index' in the following way:
@@ -3088,29 +3613,34 @@ def type_is_iterable(self, type: Type) -> bool:
type = get_proper_type(type)
if isinstance(type, CallableType) and type.is_type_obj():
type = type.fallback
- return is_subtype(type, self.named_generic_type('typing.Iterable',
- [AnyType(TypeOfAny.special_form)]))
+ return is_subtype(
+ type, self.named_generic_type("typing.Iterable", [AnyType(TypeOfAny.special_form)])
+ )
- def check_multi_assignment_from_iterable(self, lvalues: List[Lvalue], rvalue_type: Type,
- context: Context,
- infer_lvalue_type: bool = True) -> None:
+ def check_multi_assignment_from_iterable(
+ self,
+ lvalues: list[Lvalue],
+ rvalue_type: Type,
+ context: Context,
+ infer_lvalue_type: bool = True,
+ ) -> None:
rvalue_type = get_proper_type(rvalue_type)
if self.type_is_iterable(rvalue_type) and isinstance(rvalue_type, Instance):
item_type = self.iterable_item_type(rvalue_type)
for lv in lvalues:
if isinstance(lv, StarExpr):
- items_type = self.named_generic_type('builtins.list', [item_type])
- self.check_assignment(lv.expr, self.temp_node(items_type, context),
- infer_lvalue_type)
+ items_type = self.named_generic_type("builtins.list", [item_type])
+ self.check_assignment(
+ lv.expr, self.temp_node(items_type, context), infer_lvalue_type
+ )
else:
- self.check_assignment(lv, self.temp_node(item_type, context),
- infer_lvalue_type)
+ self.check_assignment(
+ lv, self.temp_node(item_type, context), infer_lvalue_type
+ )
else:
self.msg.type_not_iterable(rvalue_type, context)
- def check_lvalue(self, lvalue: Lvalue) -> Tuple[Optional[Type],
- Optional[IndexExpr],
- Optional[Var]]:
+ def check_lvalue(self, lvalue: Lvalue) -> tuple[Type | None, IndexExpr | None, Var | None]:
lvalue_type = None
index_lvalue = None
inferred = None
@@ -3134,11 +3664,14 @@ def check_lvalue(self, lvalue: Lvalue) -> Tuple[Optional[Type],
lvalue_type = self.expr_checker.analyze_ref_expr(lvalue, lvalue=True)
self.store_type(lvalue, lvalue_type)
elif isinstance(lvalue, TupleExpr) or isinstance(lvalue, ListExpr):
- types = [self.check_lvalue(sub_expr)[0] or
- # This type will be used as a context for further inference of rvalue,
- # we put Uninhabited if there is no information available from lvalue.
- UninhabitedType() for sub_expr in lvalue.items]
- lvalue_type = TupleType(types, self.named_type('builtins.tuple'))
+ types = [
+ self.check_lvalue(sub_expr)[0] or
+ # This type will be used as a context for further inference of rvalue,
+ # we put Uninhabited if there is no information available from lvalue.
+ UninhabitedType()
+ for sub_expr in lvalue.items
+ ]
+ lvalue_type = TupleType(types, self.named_type("builtins.tuple"))
elif isinstance(lvalue, StarExpr):
typ, _, _ = self.check_lvalue(lvalue.expr)
lvalue_type = StarType(typ) if typ else None
@@ -3163,13 +3696,16 @@ def is_definition(self, s: Lvalue) -> bool:
return s.is_inferred_def
return False
- def infer_variable_type(self, name: Var, lvalue: Lvalue,
- init_type: Type, context: Context) -> None:
+ def infer_variable_type(
+ self, name: Var, lvalue: Lvalue, init_type: Type, context: Context
+ ) -> None:
"""Infer the type of initialized variables from initializer type."""
- init_type = get_proper_type(init_type)
if isinstance(init_type, DeletedType):
self.msg.deleted_as_rvalue(init_type, context)
- elif not is_valid_inferred_type(init_type) and not self.no_partial_types:
+ elif (
+ not is_valid_inferred_type(init_type, is_lvalue_final=name.is_final)
+ and not self.no_partial_types
+ ):
# We cannot use the type of the initialization expression for full type
# inference (it's not specific enough), but we might be able to give
# partial type which will be made more specific later. A partial type
@@ -3177,9 +3713,13 @@ def infer_variable_type(self, name: Var, lvalue: Lvalue,
if not self.infer_partial_type(name, lvalue, init_type):
self.msg.need_annotation_for_var(name, context, self.options.python_version)
self.set_inference_error_fallback_type(name, lvalue, init_type)
- elif (isinstance(lvalue, MemberExpr) and self.inferred_attribute_types is not None
- and lvalue.def_var and lvalue.def_var in self.inferred_attribute_types
- and not is_same_type(self.inferred_attribute_types[lvalue.def_var], init_type)):
+ elif (
+ isinstance(lvalue, MemberExpr)
+ and self.inferred_attribute_types is not None
+ and lvalue.def_var
+ and lvalue.def_var in self.inferred_attribute_types
+ and not is_same_type(self.inferred_attribute_types[lvalue.def_var], init_type)
+ ):
# Multiple, inconsistent types inferred for an attribute.
self.msg.need_annotation_for_var(name, context, self.options.python_version)
name.type = AnyType(TypeOfAny.from_error)
@@ -3198,19 +3738,26 @@ def infer_partial_type(self, name: Var, lvalue: Lvalue, init_type: Type) -> bool
elif isinstance(init_type, Instance):
fullname = init_type.type.fullname
is_ref = isinstance(lvalue, RefExpr)
- if (is_ref and
- (fullname == 'builtins.list' or
- fullname == 'builtins.set' or
- fullname == 'builtins.dict' or
- fullname == 'collections.OrderedDict') and
- all(isinstance(t, (NoneType, UninhabitedType))
- for t in get_proper_types(init_type.args))):
+ if (
+ is_ref
+ and (
+ fullname == "builtins.list"
+ or fullname == "builtins.set"
+ or fullname == "builtins.dict"
+ or fullname == "collections.OrderedDict"
+ )
+ and all(
+ isinstance(t, (NoneType, UninhabitedType))
+ for t in get_proper_types(init_type.args)
+ )
+ ):
partial_type = PartialType(init_type.type, name)
- elif is_ref and fullname == 'collections.defaultdict':
+ elif is_ref and fullname == "collections.defaultdict":
arg0 = get_proper_type(init_type.args[0])
arg1 = get_proper_type(init_type.args[1])
- if (isinstance(arg0, (NoneType, UninhabitedType)) and
- self.is_valid_defaultdict_partial_value_type(arg1)):
+ if isinstance(
+ arg0, (NoneType, UninhabitedType)
+ ) and self.is_valid_defaultdict_partial_value_type(arg1):
arg1 = erase_type(arg1)
assert isinstance(arg1, Instance)
partial_type = PartialType(init_type.type, name, arg1)
@@ -3286,33 +3833,88 @@ def inference_error_fallback_type(self, type: Type) -> Type:
# we therefore need to erase them.
return erase_typevars(fallback)
- def check_simple_assignment(self, lvalue_type: Optional[Type], rvalue: Expression,
- context: Context,
- msg: str = message_registry.INCOMPATIBLE_TYPES_IN_ASSIGNMENT,
- lvalue_name: str = 'variable',
- rvalue_name: str = 'expression', *,
- code: Optional[ErrorCode] = None) -> Type:
+ def simple_rvalue(self, rvalue: Expression) -> bool:
+ """Returns True for expressions for which inferred type should not depend on context.
+
+ Note that this function can still return False for some expressions where inferred type
+ does not depend on context. It only exists for performance optimizations.
+ """
+ if isinstance(rvalue, (IntExpr, StrExpr, BytesExpr, FloatExpr, RefExpr)):
+ return True
+ if isinstance(rvalue, CallExpr):
+ if isinstance(rvalue.callee, RefExpr) and isinstance(rvalue.callee.node, FuncBase):
+ typ = rvalue.callee.node.type
+ if isinstance(typ, CallableType):
+ return not typ.variables
+ elif isinstance(typ, Overloaded):
+ return not any(item.variables for item in typ.items)
+ return False
+
+ def check_simple_assignment(
+ self,
+ lvalue_type: Type | None,
+ rvalue: Expression,
+ context: Context,
+ msg: ErrorMessage = message_registry.INCOMPATIBLE_TYPES_IN_ASSIGNMENT,
+ lvalue_name: str = "variable",
+ rvalue_name: str = "expression",
+ *,
+ notes: list[str] | None = None,
+ ) -> Type:
if self.is_stub and isinstance(rvalue, EllipsisExpr):
# '...' is always a valid initializer in a stub.
return AnyType(TypeOfAny.special_form)
else:
- lvalue_type = get_proper_type(lvalue_type)
- always_allow_any = lvalue_type is not None and not isinstance(lvalue_type, AnyType)
- rvalue_type = self.expr_checker.accept(rvalue, lvalue_type,
- always_allow_any=always_allow_any)
- rvalue_type = get_proper_type(rvalue_type)
+ always_allow_any = lvalue_type is not None and not isinstance(
+ get_proper_type(lvalue_type), AnyType
+ )
+ rvalue_type = self.expr_checker.accept(
+ rvalue, lvalue_type, always_allow_any=always_allow_any
+ )
+ if (
+ isinstance(get_proper_type(lvalue_type), UnionType)
+ # Skip literal types, as they have special logic (for better errors).
+ and not isinstance(get_proper_type(rvalue_type), LiteralType)
+ and not self.simple_rvalue(rvalue)
+ ):
+ # Try re-inferring r.h.s. in empty context, and use that if it
+ # results in a narrower type. We don't do this always because this
+ # may cause some perf impact, plus we want to partially preserve
+ # the old behavior. This helps with various practical examples, see
+ # e.g. testOptionalTypeNarrowedByGenericCall.
+ with self.msg.filter_errors() as local_errors, self.local_type_map() as type_map:
+ alt_rvalue_type = self.expr_checker.accept(
+ rvalue, None, always_allow_any=always_allow_any
+ )
+ if (
+ not local_errors.has_new_errors()
+ # Skip Any type, since it is special cased in binder.
+ and not isinstance(get_proper_type(alt_rvalue_type), AnyType)
+ and is_valid_inferred_type(alt_rvalue_type)
+ and is_proper_subtype(alt_rvalue_type, rvalue_type)
+ ):
+ rvalue_type = alt_rvalue_type
+ self.store_types(type_map)
if isinstance(rvalue_type, DeletedType):
self.msg.deleted_as_rvalue(rvalue_type, context)
if isinstance(lvalue_type, DeletedType):
self.msg.deleted_as_lvalue(lvalue_type, context)
elif lvalue_type:
- self.check_subtype(rvalue_type, lvalue_type, context, msg,
- '{} has type'.format(rvalue_name),
- '{} has type'.format(lvalue_name), code=code)
+ self.check_subtype(
+ # Preserve original aliases for error messages when possible.
+ rvalue_type,
+ lvalue_type,
+ context,
+ msg,
+ f"{rvalue_name} has type",
+ f"{lvalue_name} has type",
+ notes=notes,
+ )
return rvalue_type
- def check_member_assignment(self, instance_type: Type, attribute_type: Type,
- rvalue: Expression, context: Context) -> Tuple[Type, Type, bool]:
+ def check_member_assignment(
+ self, instance_type: Type, attribute_type: Type, rvalue: Expression, context: Context
+ ) -> tuple[Type, Type, bool]:
"""Type member assignment.
This defers to check_simple_assignment, unless the member expression
@@ -3327,62 +3929,74 @@ def check_member_assignment(self, instance_type: Type, attribute_type: Type,
instance_type = get_proper_type(instance_type)
attribute_type = get_proper_type(attribute_type)
# Descriptors don't participate in class-attribute access
- if ((isinstance(instance_type, FunctionLike) and instance_type.is_type_obj()) or
- isinstance(instance_type, TypeType)):
- rvalue_type = self.check_simple_assignment(attribute_type, rvalue, context,
- code=codes.ASSIGNMENT)
+ if (isinstance(instance_type, FunctionLike) and instance_type.is_type_obj()) or isinstance(
+ instance_type, TypeType
+ ):
+ rvalue_type = self.check_simple_assignment(attribute_type, rvalue, context)
return rvalue_type, attribute_type, True
if not isinstance(attribute_type, Instance):
# TODO: support __set__() for union types.
- rvalue_type = self.check_simple_assignment(attribute_type, rvalue, context,
- code=codes.ASSIGNMENT)
+ rvalue_type = self.check_simple_assignment(attribute_type, rvalue, context)
return rvalue_type, attribute_type, True
mx = MemberContext(
- is_lvalue=False, is_super=False, is_operator=False,
- original_type=instance_type, context=context, self_type=None,
- msg=self.msg, chk=self,
+ is_lvalue=False,
+ is_super=False,
+ is_operator=False,
+ original_type=instance_type,
+ context=context,
+ self_type=None,
+ msg=self.msg,
+ chk=self,
)
get_type = analyze_descriptor_access(attribute_type, mx)
- if not attribute_type.type.has_readable_member('__set__'):
+ if not attribute_type.type.has_readable_member("__set__"):
# If there is no __set__, we type-check that the assigned value matches
# the return type of __get__. This doesn't match the python semantics,
# (which allow you to override the descriptor with any value), but preserves
# the type of accessing the attribute (even after the override).
- rvalue_type = self.check_simple_assignment(get_type, rvalue, context,
- code=codes.ASSIGNMENT)
+ rvalue_type = self.check_simple_assignment(get_type, rvalue, context)
return rvalue_type, get_type, True
- dunder_set = attribute_type.type.get_method('__set__')
+ dunder_set = attribute_type.type.get_method("__set__")
if dunder_set is None:
self.fail(message_registry.DESCRIPTOR_SET_NOT_CALLABLE.format(attribute_type), context)
return AnyType(TypeOfAny.from_error), get_type, False
bound_method = analyze_decorator_or_funcbase_access(
- defn=dunder_set, itype=attribute_type, info=attribute_type.type,
- self_type=attribute_type, name='__set__', mx=mx)
+ defn=dunder_set,
+ itype=attribute_type,
+ info=attribute_type.type,
+ self_type=attribute_type,
+ name="__set__",
+ mx=mx,
+ )
typ = map_instance_to_supertype(attribute_type, dunder_set.info)
dunder_set_type = expand_type_by_instance(bound_method, typ)
callable_name = self.expr_checker.method_fullname(attribute_type, "__set__")
dunder_set_type = self.expr_checker.transform_callee_type(
- callable_name, dunder_set_type,
+ callable_name,
+ dunder_set_type,
[TempNode(instance_type, context=context), rvalue],
[nodes.ARG_POS, nodes.ARG_POS],
- context, object_type=attribute_type,
+ context,
+ object_type=attribute_type,
)
# For non-overloaded setters, the result should be type-checked like a regular assignment.
# Hence, we first only try to infer the type by using the rvalue as type context.
type_context = rvalue
- with self.msg.disable_errors():
+ with self.msg.filter_errors():
_, inferred_dunder_set_type = self.expr_checker.check_call(
dunder_set_type,
[TempNode(instance_type, context=context), type_context],
[nodes.ARG_POS, nodes.ARG_POS],
- context, object_type=attribute_type,
- callable_name=callable_name)
+ context,
+ object_type=attribute_type,
+ callable_name=callable_name,
+ )
# And now we in fact type check the call, to show errors related to wrong arguments
# count, etc., replacing the type context for non-overloaded setters only.
@@ -3393,12 +4007,15 @@ def check_member_assignment(self, instance_type: Type, attribute_type: Type,
dunder_set_type,
[TempNode(instance_type, context=context), type_context],
[nodes.ARG_POS, nodes.ARG_POS],
- context, object_type=attribute_type,
- callable_name=callable_name)
+ context,
+ object_type=attribute_type,
+ callable_name=callable_name,
+ )
# In the following cases, a message already will have been recorded in check_call.
- if ((not isinstance(inferred_dunder_set_type, CallableType)) or
- (len(inferred_dunder_set_type.arg_types) < 2)):
+ if (not isinstance(inferred_dunder_set_type, CallableType)) or (
+ len(inferred_dunder_set_type.arg_types) < 2
+ ):
return AnyType(TypeOfAny.from_error), get_type, False
set_type = inferred_dunder_set_type.arg_types[1]
@@ -3406,13 +4023,13 @@ def check_member_assignment(self, instance_type: Type, attribute_type: Type,
# and '__get__' type is narrower than '__set__', then we invoke the binder to narrow type
# by this assignment. Technically, this is not safe, but in practice this is
# what a user expects.
- rvalue_type = self.check_simple_assignment(set_type, rvalue, context,
- code=codes.ASSIGNMENT)
+ rvalue_type = self.check_simple_assignment(set_type, rvalue, context)
infer = is_subtype(rvalue_type, get_type) and is_subtype(get_type, set_type)
return rvalue_type if infer else set_type, get_type, infer
- def check_indexed_assignment(self, lvalue: IndexExpr,
- rvalue: Expression, context: Context) -> None:
+ def check_indexed_assignment(
+ self, lvalue: IndexExpr, rvalue: Expression, context: Context
+ ) -> None:
"""Type check indexed assignment base[index] = rvalue.
The lvalue argument is the base[index] expression.
@@ -3420,15 +4037,22 @@ def check_indexed_assignment(self, lvalue: IndexExpr,
self.try_infer_partial_type_from_indexed_assignment(lvalue, rvalue)
basetype = get_proper_type(self.expr_checker.accept(lvalue.base))
method_type = self.expr_checker.analyze_external_member_access(
- '__setitem__', basetype, lvalue)
+ "__setitem__", basetype, lvalue
+ )
lvalue.method_type = method_type
self.expr_checker.check_method_call(
- '__setitem__', basetype, method_type, [lvalue.index, rvalue],
- [nodes.ARG_POS, nodes.ARG_POS], context)
+ "__setitem__",
+ basetype,
+ method_type,
+ [lvalue.index, rvalue],
+ [nodes.ARG_POS, nodes.ARG_POS],
+ context,
+ )
def try_infer_partial_type_from_indexed_assignment(
- self, lvalue: IndexExpr, rvalue: Expression) -> None:
+ self, lvalue: IndexExpr, rvalue: Expression
+ ) -> None:
# TODO: Should we share some of this with try_infer_partial_type?
var = None
if isinstance(lvalue.base, RefExpr) and isinstance(lvalue.base.node, Var):
@@ -3444,23 +4068,28 @@ def try_infer_partial_type_from_indexed_assignment(
if partial_types is None:
return
typename = type_type.fullname
- if (typename == 'builtins.dict'
- or typename == 'collections.OrderedDict'
- or typename == 'collections.defaultdict'):
+ if (
+ typename == "builtins.dict"
+ or typename == "collections.OrderedDict"
+ or typename == "collections.defaultdict"
+ ):
# TODO: Don't infer things twice.
key_type = self.expr_checker.accept(lvalue.index)
value_type = self.expr_checker.accept(rvalue)
- if (is_valid_inferred_type(key_type) and
- is_valid_inferred_type(value_type) and
- not self.current_node_deferred and
- not (typename == 'collections.defaultdict' and
- var.type.value_type is not None and
- not is_equivalent(value_type, var.type.value_type))):
- var.type = self.named_generic_type(typename,
- [key_type, value_type])
+ if (
+ is_valid_inferred_type(key_type)
+ and is_valid_inferred_type(value_type)
+ and not self.current_node_deferred
+ and not (
+ typename == "collections.defaultdict"
+ and var.type.value_type is not None
+ and not is_equivalent(value_type, var.type.value_type)
+ )
+ ):
+ var.type = self.named_generic_type(typename, [key_type, value_type])
del partial_types[var]
- def type_requires_usage(self, typ: Type) -> Optional[Tuple[str, ErrorCode]]:
+ def type_requires_usage(self, typ: Type) -> tuple[str, ErrorCode] | None:
"""Some types require usage in all cases. The classic example is
an unused coroutine.
@@ -3496,8 +4125,9 @@ def check_return_stmt(self, s: ReturnStmt) -> None:
defn = self.scope.top_function()
if defn is not None:
if defn.is_generator:
- return_type = self.get_generator_return_type(self.return_types[-1],
- defn.is_coroutine)
+ return_type = self.get_generator_return_type(
+ self.return_types[-1], defn.is_coroutine
+ )
elif defn.is_coroutine:
return_type = self.get_coroutine_return_type(self.return_types[-1])
else:
@@ -3521,8 +4151,11 @@ def check_return_stmt(self, s: ReturnStmt) -> None:
allow_none_func_call = is_lambda or declared_none_return or declared_any_return
# Return with a value.
- typ = get_proper_type(self.expr_checker.accept(
- s.expr, return_type, allow_none_return=allow_none_func_call))
+ typ = get_proper_type(
+ self.expr_checker.accept(
+ s.expr, return_type, allow_none_return=allow_none_func_call
+ )
+ )
if defn.is_async_generator:
self.fail(message_registry.RETURN_IN_ASYNC_GENERATOR, s)
@@ -3531,13 +4164,19 @@ def check_return_stmt(self, s: ReturnStmt) -> None:
if isinstance(typ, AnyType):
# (Unless you asked to be warned in that case, and the
# function is not declared to return Any)
- if (self.options.warn_return_any
+ if (
+ self.options.warn_return_any
and not self.current_node_deferred
and not is_proper_subtype(AnyType(TypeOfAny.special_form), return_type)
- and not (defn.name in BINARY_MAGIC_METHODS and
- is_literal_not_implemented(s.expr))
- and not (isinstance(return_type, Instance) and
- return_type.type.fullname == 'builtins.object')):
+ and not (
+ defn.name in BINARY_MAGIC_METHODS
+ and is_literal_not_implemented(s.expr)
+ )
+ and not (
+ isinstance(return_type, Instance)
+ and return_type.type.fullname == "builtins.object"
+ )
+ ):
self.msg.incorrectly_returning_any(return_type, s)
return
@@ -3551,19 +4190,22 @@ def check_return_stmt(self, s: ReturnStmt) -> None:
self.fail(message_registry.NO_RETURN_VALUE_EXPECTED, s)
else:
self.check_subtype(
- subtype_label='got',
+ subtype_label="got",
subtype=typ,
- supertype_label='expected',
+ supertype_label="expected",
supertype=return_type,
context=s.expr,
outer_context=s,
msg=message_registry.INCOMPATIBLE_RETURN_VALUE_TYPE,
- code=codes.RETURN_VALUE)
+ )
else:
# Empty returns are valid in Generators with Any typed returns, but not in
# coroutines.
- if (defn.is_generator and not defn.is_coroutine and
- isinstance(return_type, AnyType)):
+ if (
+ defn.is_generator
+ and not defn.is_coroutine
+ and isinstance(return_type, AnyType)
+ ):
return
if isinstance(return_type, (NoneType, AnyType)):
@@ -3600,12 +4242,10 @@ def visit_if_stmt(self, s: IfStmt) -> None:
def visit_while_stmt(self, s: WhileStmt) -> None:
"""Type check a while statement."""
if_stmt = IfStmt([s.expr], [s.body], None)
- if_stmt.set_line(s.get_line(), s.get_column())
- self.accept_loop(if_stmt, s.else_body,
- exit_condition=s.expr)
+ if_stmt.set_line(s)
+ self.accept_loop(if_stmt, s.else_body, exit_condition=s.expr)
- def visit_operator_assignment_stmt(self,
- s: OperatorAssignmentStmt) -> None:
+ def visit_operator_assignment_stmt(self, s: OperatorAssignmentStmt) -> None:
"""Type check an operator assignment statement, e.g. x += 1."""
self.try_infer_partial_generic_type_from_assignment(s.lvalue, s.rvalue, s.op)
if isinstance(s.lvalue, MemberExpr):
@@ -3617,16 +4257,16 @@ def visit_operator_assignment_stmt(self,
inplace, method = infer_operator_assignment_method(lvalue_type, s.op)
if inplace:
# There is __ifoo__, treat as x = x.__ifoo__(y)
- rvalue_type, method_type = self.expr_checker.check_op(
- method, lvalue_type, s.rvalue, s)
+ rvalue_type, method_type = self.expr_checker.check_op(method, lvalue_type, s.rvalue, s)
if not is_subtype(rvalue_type, lvalue_type):
self.msg.incompatible_operator_assignment(s.op, s)
else:
# There is no __ifoo__, treat as x = x y
expr = OpExpr(s.op, s.lvalue, s.rvalue)
expr.set_line(s)
- self.check_assignment(lvalue=s.lvalue, rvalue=expr,
- infer_lvalue_type=True, new_syntax=False)
+ self.check_assignment(
+ lvalue=s.lvalue, rvalue=expr, infer_lvalue_type=True, new_syntax=False
+ )
self.check_final(s)
def visit_assert_stmt(self, s: AssertStmt) -> None:
@@ -3649,21 +4289,13 @@ def visit_raise_stmt(self, s: RaiseStmt) -> None:
self.type_check_raise(s.from_expr, s, optional=True)
self.binder.unreachable()
- def type_check_raise(self, e: Expression, s: RaiseStmt,
- optional: bool = False) -> None:
+ def type_check_raise(self, e: Expression, s: RaiseStmt, optional: bool = False) -> None:
typ = get_proper_type(self.expr_checker.accept(e))
if isinstance(typ, DeletedType):
self.msg.deleted_as_rvalue(typ, e)
return
- if self.options.python_version[0] == 2:
- # Since `raise` has very different rule on python2, we use a different helper.
- # https://github.com/python/mypy/pull/11289
- self._type_check_raise_python2(e, s, typ)
- return
-
- # Python3 case:
- exc_type = self.named_type('builtins.BaseException')
+ exc_type = self.named_type("builtins.BaseException")
expected_type_items = [exc_type, TypeType(exc_type)]
if optional:
# This is used for `x` part in a case like `raise e from x`,
@@ -3671,78 +4303,13 @@ def type_check_raise(self, e: Expression, s: RaiseStmt,
expected_type_items.append(NoneType())
self.check_subtype(
- typ, UnionType.make_union(expected_type_items), s,
- message_registry.INVALID_EXCEPTION,
+ typ, UnionType.make_union(expected_type_items), s, message_registry.INVALID_EXCEPTION
)
if isinstance(typ, FunctionLike):
# https://github.com/python/mypy/issues/11089
self.expr_checker.check_call(typ, [], [], e)
- def _type_check_raise_python2(self, e: Expression, s: RaiseStmt, typ: ProperType) -> None:
- # Python2 has two possible major cases:
- # 1. `raise expr`, where `expr` is some expression, it can be:
- # - Exception typ
- # - Exception instance
- # - Old style class (not supported)
- # - Tuple, where 0th item is exception type or instance
- # 2. `raise exc, msg, traceback`, where:
- # - `exc` is exception type (not instance!)
- # - `traceback` is `types.TracebackType | None`
- # Important note: `raise exc, msg` is not the same as `raise (exc, msg)`
- # We call `raise exc, msg, traceback` - legacy mode.
- exc_type = self.named_type('builtins.BaseException')
- exc_inst_or_type = UnionType([exc_type, TypeType(exc_type)])
-
- if (not s.legacy_mode and (isinstance(typ, TupleType) and typ.items
- or (isinstance(typ, Instance) and typ.args
- and typ.type.fullname == 'builtins.tuple'))):
- # `raise (exc, ...)` case:
- item = typ.items[0] if isinstance(typ, TupleType) else typ.args[0]
- self.check_subtype(
- item, exc_inst_or_type, s,
- 'When raising a tuple, first element must by derived from BaseException',
- )
- return
- elif s.legacy_mode:
- # `raise Exception, msg` case
- # `raise Exception, msg, traceback` case
- # https://docs.python.org/2/reference/simple_stmts.html#the-raise-statement
- assert isinstance(typ, TupleType) # Is set in fastparse2.py
- if (len(typ.items) >= 2
- and isinstance(get_proper_type(typ.items[1]), NoneType)):
- expected_type: Type = exc_inst_or_type
- else:
- expected_type = TypeType(exc_type)
- self.check_subtype(
- typ.items[0], expected_type, s,
- 'Argument 1 must be "{}" subtype'.format(expected_type),
- )
-
- # Typecheck `traceback` part:
- if len(typ.items) == 3:
- # Now, we typecheck `traceback` argument if it is present.
- # We do this after the main check for better error message
- # and better ordering: first about `BaseException` subtype,
- # then about `traceback` type.
- traceback_type = UnionType.make_union([
- self.named_type('types.TracebackType'),
- NoneType(),
- ])
- self.check_subtype(
- typ.items[2], traceback_type, s,
- 'Argument 3 must be "{}" subtype'.format(traceback_type),
- )
- else:
- expected_type_items = [
- # `raise Exception` and `raise Exception()` cases:
- exc_type, TypeType(exc_type),
- ]
- self.check_subtype(
- typ, UnionType.make_union(expected_type_items),
- s, message_registry.INVALID_EXCEPTION,
- )
-
def visit_try_stmt(self, s: TryStmt) -> None:
"""Type check a try statement."""
# Our enclosing frame will get the result if the try/except falls through.
@@ -3797,46 +4364,31 @@ def visit_try_without_finally(self, s: TryStmt, try_frame: bool) -> None:
with self.binder.frame_context(can_skip=True, fall_through=4):
typ = s.types[i]
if typ:
- t = self.check_except_handler_test(typ)
+ t = self.check_except_handler_test(typ, s.is_star)
var = s.vars[i]
if var:
# To support local variables, we make this a definition line,
# causing assignment to set the variable's type.
var.is_inferred_def = True
- # We also temporarily set current_node_deferred to False to
- # make sure the inference happens.
- # TODO: Use a better solution, e.g. a
- # separate Var for each except block.
- am_deferring = self.current_node_deferred
- self.current_node_deferred = False
self.check_assignment(var, self.temp_node(t, var))
- self.current_node_deferred = am_deferring
self.accept(s.handlers[i])
var = s.vars[i]
if var:
- # Exception variables are deleted in python 3 but not python 2.
- # But, since it's bad form in python 2 and the type checking
- # wouldn't work very well, we delete it anyway.
-
+ # Exception variables are deleted.
# Unfortunately, this doesn't let us detect usage before the
# try/except block.
- if self.options.python_version[0] >= 3:
- source = var.name
- else:
- source = ('(exception variable "{}", which we do not '
- 'accept outside except: blocks even in '
- 'python 2)'.format(var.name))
+ source = var.name
if isinstance(var.node, Var):
var.node.type = DeletedType(source=source)
self.binder.cleanse(var)
if s.else_body:
self.accept(s.else_body)
- def check_except_handler_test(self, n: Expression) -> Type:
+ def check_except_handler_test(self, n: Expression, is_star: bool) -> Type:
"""Type check an exception handler test clause."""
typ = self.expr_checker.accept(n)
- all_types: List[Type] = []
+ all_types: list[Type] = []
test_types = self.get_types_from_except_handler(typ, n)
for ttype in get_proper_types(test_types):
@@ -3848,23 +4400,48 @@ def check_except_handler_test(self, n: Expression) -> Type:
item = ttype.items[0]
if not item.is_type_obj():
self.fail(message_registry.INVALID_EXCEPTION_TYPE, n)
- return AnyType(TypeOfAny.from_error)
- exc_type = item.ret_type
+ return self.default_exception_type(is_star)
+ exc_type = erase_typevars(item.ret_type)
elif isinstance(ttype, TypeType):
exc_type = ttype.item
else:
self.fail(message_registry.INVALID_EXCEPTION_TYPE, n)
- return AnyType(TypeOfAny.from_error)
+ return self.default_exception_type(is_star)
- if not is_subtype(exc_type, self.named_type('builtins.BaseException')):
+ if not is_subtype(exc_type, self.named_type("builtins.BaseException")):
self.fail(message_registry.INVALID_EXCEPTION_TYPE, n)
- return AnyType(TypeOfAny.from_error)
+ return self.default_exception_type(is_star)
all_types.append(exc_type)
+ if is_star:
+ new_all_types: list[Type] = []
+ for typ in all_types:
+ if is_proper_subtype(typ, self.named_type("builtins.BaseExceptionGroup")):
+ self.fail(message_registry.INVALID_EXCEPTION_GROUP, n)
+ new_all_types.append(AnyType(TypeOfAny.from_error))
+ else:
+ new_all_types.append(typ)
+ return self.wrap_exception_group(new_all_types)
return make_simplified_union(all_types)
- def get_types_from_except_handler(self, typ: Type, n: Expression) -> List[Type]:
+ def default_exception_type(self, is_star: bool) -> Type:
+ """Exception type to return in case of a previous type error."""
+ any_type = AnyType(TypeOfAny.from_error)
+ if is_star:
+ return self.named_generic_type("builtins.ExceptionGroup", [any_type])
+ return any_type
+
+ def wrap_exception_group(self, types: Sequence[Type]) -> Type:
+ """Transform except* variable type into an appropriate exception group."""
+ arg = make_simplified_union(types)
+ if is_subtype(arg, self.named_type("builtins.Exception")):
+ base = "builtins.ExceptionGroup"
+ else:
+ base = "builtins.BaseExceptionGroup"
+ return self.named_generic_type(base, [arg])
+
+ def get_types_from_except_handler(self, typ: Type, n: Expression) -> list[Type]:
"""Helper for check_except_handler_test to retrieve handler types."""
typ = get_proper_type(typ)
if isinstance(typ, TupleType):
@@ -3875,7 +4452,7 @@ def get_types_from_except_handler(self, typ: Type, n: Expression) -> List[Type]:
for item in typ.relevant_items()
for union_typ in self.get_types_from_except_handler(item, n)
]
- elif isinstance(typ, Instance) and is_named_instance(typ, 'builtins.tuple'):
+ elif is_named_instance(typ, "builtins.tuple"):
# variadic tuple
return [typ.args[0]]
else:
@@ -3892,21 +4469,26 @@ def visit_for_stmt(self, s: ForStmt) -> None:
self.analyze_index_variables(s.index, item_type, s.index_type is None, s)
self.accept_loop(s.body, s.else_body)
- def analyze_async_iterable_item_type(self, expr: Expression) -> Tuple[Type, Type]:
+ def analyze_async_iterable_item_type(self, expr: Expression) -> tuple[Type, Type]:
"""Analyse async iterable expression and return iterator and iterator item types."""
echk = self.expr_checker
iterable = echk.accept(expr)
- iterator = echk.check_method_call_by_name('__aiter__', iterable, [], [], expr)[0]
- awaitable = echk.check_method_call_by_name('__anext__', iterator, [], [], expr)[0]
- item_type = echk.check_awaitable_expr(awaitable, expr,
- message_registry.INCOMPATIBLE_TYPES_IN_ASYNC_FOR)
+ iterator = echk.check_method_call_by_name("__aiter__", iterable, [], [], expr)[0]
+ awaitable = echk.check_method_call_by_name("__anext__", iterator, [], [], expr)[0]
+ item_type = echk.check_awaitable_expr(
+ awaitable, expr, message_registry.INCOMPATIBLE_TYPES_IN_ASYNC_FOR
+ )
return iterator, item_type
- def analyze_iterable_item_type(self, expr: Expression) -> Tuple[Type, Type]:
+ def analyze_iterable_item_type(self, expr: Expression) -> tuple[Type, Type]:
"""Analyse iterable expression and return iterator and iterator item types."""
echk = self.expr_checker
iterable = get_proper_type(echk.accept(expr))
- iterator = echk.check_method_call_by_name('__iter__', iterable, [], [], expr)[0]
+ iterator = echk.check_method_call_by_name("__iter__", iterable, [], [], expr)[0]
+
+ int_type = self.analyze_range_native_int_type(expr)
+ if int_type:
+ return iterator, int_type
if isinstance(iterable, TupleType):
joined: Type = UninhabitedType()
@@ -3915,27 +4497,53 @@ def analyze_iterable_item_type(self, expr: Expression) -> Tuple[Type, Type]:
return iterator, joined
else:
# Non-tuple iterable.
- if self.options.python_version[0] >= 3:
- nextmethod = '__next__'
- else:
- nextmethod = 'next'
- return iterator, echk.check_method_call_by_name(nextmethod, iterator, [], [], expr)[0]
+ return iterator, echk.check_method_call_by_name("__next__", iterator, [], [], expr)[0]
+
+ def analyze_range_native_int_type(self, expr: Expression) -> Type | None:
+ """Try to infer native int item type from arguments to range(...).
- def analyze_container_item_type(self, typ: Type) -> Optional[Type]:
+ For example, return i64 if the expression is "range(0, i64(n))".
+
+ Return None if unsuccessful.
+ """
+ if (
+ isinstance(expr, CallExpr)
+ and isinstance(expr.callee, RefExpr)
+ and expr.callee.fullname == "builtins.range"
+ and 1 <= len(expr.args) <= 3
+ and all(kind == ARG_POS for kind in expr.arg_kinds)
+ ):
+ native_int: Type | None = None
+ ok = True
+ for arg in expr.args:
+ argt = get_proper_type(self.lookup_type(arg))
+ if isinstance(argt, Instance) and argt.type.fullname in (
+ "mypy_extensions.i64",
+ "mypy_extensions.i32",
+ ):
+ if native_int is None:
+ native_int = argt
+ elif argt != native_int:
+ ok = False
+ if ok and native_int:
+ return native_int
+ return None
+
+ def analyze_container_item_type(self, typ: Type) -> Type | None:
"""Check if a type is a nominal container of a union of such.
Return the corresponding container item type.
"""
typ = get_proper_type(typ)
if isinstance(typ, UnionType):
- types: List[Type] = []
+ types: list[Type] = []
for item in typ.items:
c_type = self.analyze_container_item_type(item)
if c_type:
types.append(c_type)
return UnionType.make_union(types)
- if isinstance(typ, Instance) and typ.type.has_base('typing.Container'):
- supertype = self.named_type('typing.Container').type
+ if isinstance(typ, Instance) and typ.type.has_base("typing.Container"):
+ supertype = self.named_type("typing.Container").type
super_instance = map_instance_to_supertype(typ, supertype)
assert len(super_instance.args) == 1
return super_instance.args[0]
@@ -3943,15 +4551,16 @@ def analyze_container_item_type(self, typ: Type) -> Optional[Type]:
return self.analyze_container_item_type(tuple_fallback(typ))
return None
- def analyze_index_variables(self, index: Expression, item_type: Type,
- infer_lvalue_type: bool, context: Context) -> None:
+ def analyze_index_variables(
+ self, index: Expression, item_type: Type, infer_lvalue_type: bool, context: Context
+ ) -> None:
"""Type check or infer for loop or list comprehension index vars."""
self.check_assignment(index, self.temp_node(item_type, context), infer_lvalue_type)
def visit_del_stmt(self, s: DelStmt) -> None:
if isinstance(s.expr, IndexExpr):
e = s.expr
- m = MemberExpr(e.base, '__delitem__')
+ m = MemberExpr(e.base, "__delitem__")
m.line = s.line
m.column = s.column
c = CallExpr(m, [e.index], [nodes.ARG_POS], [None])
@@ -3962,13 +4571,14 @@ def visit_del_stmt(self, s: DelStmt) -> None:
s.expr.accept(self.expr_checker)
for elt in flatten(s.expr):
if isinstance(elt, NameExpr):
- self.binder.assign_type(elt, DeletedType(source=elt.name),
- get_declaration(elt), False)
+ self.binder.assign_type(
+ elt, DeletedType(source=elt.name), get_declaration(elt), False
+ )
def visit_decorator(self, e: Decorator) -> None:
for d in e.decorators:
if isinstance(d, RefExpr):
- if d.fullname == 'typing.no_type_check':
+ if d.fullname == "typing.no_type_check":
e.var.type = AnyType(TypeOfAny.special_form)
e.var.is_ready = True
return
@@ -3981,45 +4591,49 @@ def visit_decorator(self, e: Decorator) -> None:
# may be different from the declared signature.
sig: Type = self.function_type(e.func)
for d in reversed(e.decorators):
- if refers_to_fullname(d, 'typing.overload'):
+ if refers_to_fullname(d, OVERLOAD_NAMES):
self.fail(message_registry.MULTIPLE_OVERLOADS_REQUIRED, e)
continue
dec = self.expr_checker.accept(d)
temp = self.temp_node(sig, context=e)
fullname = None
if isinstance(d, RefExpr):
- fullname = d.fullname
+ fullname = d.fullname or None
# if this is a expression like @b.a where b is an object, get the type of b
# so we can pass it the method hook in the plugins
- object_type: Optional[Type] = None
- if fullname is None and isinstance(d, MemberExpr) and d.expr in self.type_map:
- object_type = self.type_map[d.expr]
+ object_type: Type | None = None
+ if fullname is None and isinstance(d, MemberExpr) and self.has_type(d.expr):
+ object_type = self.lookup_type(d.expr)
fullname = self.expr_checker.method_fullname(object_type, d.name)
self.check_for_untyped_decorator(e.func, dec, d)
- sig, t2 = self.expr_checker.check_call(dec, [temp],
- [nodes.ARG_POS], e,
- callable_name=fullname,
- object_type=object_type)
+ sig, t2 = self.expr_checker.check_call(
+ dec, [temp], [nodes.ARG_POS], e, callable_name=fullname, object_type=object_type
+ )
self.check_untyped_after_decorator(sig, e.func)
sig = set_callable_name(sig, e.func)
e.var.type = sig
e.var.is_ready = True
if e.func.is_property:
+ if isinstance(sig, CallableType):
+ if len([k for k in sig.arg_kinds if k.is_required()]) > 1:
+ self.msg.fail("Too many arguments for property", e)
self.check_incompatible_property_override(e)
- if e.func.info and not e.func.is_dynamic():
+ # For overloaded functions we already checked override for overload as a whole.
+ if e.func.info and not e.func.is_dynamic() and not e.is_overload:
self.check_method_override(e)
- if e.func.info and e.func.name in ('__init__', '__new__'):
+ if e.func.info and e.func.name in ("__init__", "__new__"):
if e.type and not isinstance(get_proper_type(e.type), (FunctionLike, AnyType)):
self.fail(message_registry.BAD_CONSTRUCTOR_TYPE, e)
- def check_for_untyped_decorator(self,
- func: FuncDef,
- dec_type: Type,
- dec_expr: Expression) -> None:
- if (self.options.disallow_untyped_decorators and
- is_typed_callable(func.type) and
- is_untyped_decorator(dec_type)):
+ def check_for_untyped_decorator(
+ self, func: FuncDef, dec_type: Type, dec_expr: Expression
+ ) -> None:
+ if (
+ self.options.disallow_untyped_decorators
+ and is_typed_callable(func.type)
+ and is_untyped_decorator(dec_type)
+ ):
self.msg.typed_function_untyped_decorator(func.name, dec_expr)
def check_incompatible_property_override(self, e: Decorator) -> None:
@@ -4029,10 +4643,11 @@ def check_incompatible_property_override(self, e: Decorator) -> None:
base_attr = base.names.get(name)
if not base_attr:
continue
- if (isinstance(base_attr.node, OverloadedFuncDef) and
- base_attr.node.is_property and
- cast(Decorator,
- base_attr.node.items[0]).var.is_settable_property):
+ if (
+ isinstance(base_attr.node, OverloadedFuncDef)
+ and base_attr.node.is_property
+ and cast(Decorator, base_attr.node.items[0]).var.is_settable_property
+ ):
self.fail(message_registry.READ_ONLY_PROPERTY_OVERRIDES_READ_WRITE, e)
def visit_with_stmt(self, s: WithStmt) -> None:
@@ -4047,17 +4662,18 @@ def visit_with_stmt(self, s: WithStmt) -> None:
# exceptions or not. We determine this using a heuristic based on the
# return type of the __exit__ method -- see the discussion in
# https://github.com/python/mypy/issues/7214 and the section about context managers
- # in https://github.com/python/typeshed/blob/master/CONTRIBUTING.md#conventions
+ # in https://github.com/python/typeshed/blob/main/CONTRIBUTING.md#conventions
# for more details.
exit_ret_type = get_proper_type(exit_ret_type)
if is_literal_type(exit_ret_type, "builtins.bool", False):
continue
- if (is_literal_type(exit_ret_type, "builtins.bool", True)
- or (isinstance(exit_ret_type, Instance)
- and exit_ret_type.type.fullname == 'builtins.bool'
- and state.strict_optional)):
+ if is_literal_type(exit_ret_type, "builtins.bool", True) or (
+ isinstance(exit_ret_type, Instance)
+ and exit_ret_type.type.fullname == "builtins.bool"
+ and state.strict_optional
+ ):
# Note: if strict-optional is disabled, this bool instance
# could actually be an Optional[bool].
exceptions_maybe_suppressed = True
@@ -4078,55 +4694,39 @@ def check_untyped_after_decorator(self, typ: Type, func: FuncDef) -> None:
if mypy.checkexpr.has_any_type(typ):
self.msg.untyped_decorated_function(typ, func)
- def check_async_with_item(self, expr: Expression, target: Optional[Expression],
- infer_lvalue_type: bool) -> Type:
+ def check_async_with_item(
+ self, expr: Expression, target: Expression | None, infer_lvalue_type: bool
+ ) -> Type:
echk = self.expr_checker
ctx = echk.accept(expr)
- obj = echk.check_method_call_by_name('__aenter__', ctx, [], [], expr)[0]
+ obj = echk.check_method_call_by_name("__aenter__", ctx, [], [], expr)[0]
obj = echk.check_awaitable_expr(
- obj, expr, message_registry.INCOMPATIBLE_TYPES_IN_ASYNC_WITH_AENTER)
+ obj, expr, message_registry.INCOMPATIBLE_TYPES_IN_ASYNC_WITH_AENTER
+ )
if target:
self.check_assignment(target, self.temp_node(obj, expr), infer_lvalue_type)
arg = self.temp_node(AnyType(TypeOfAny.special_form), expr)
res, _ = echk.check_method_call_by_name(
- '__aexit__', ctx, [arg] * 3, [nodes.ARG_POS] * 3, expr)
+ "__aexit__", ctx, [arg] * 3, [nodes.ARG_POS] * 3, expr
+ )
return echk.check_awaitable_expr(
- res, expr, message_registry.INCOMPATIBLE_TYPES_IN_ASYNC_WITH_AEXIT)
+ res, expr, message_registry.INCOMPATIBLE_TYPES_IN_ASYNC_WITH_AEXIT
+ )
- def check_with_item(self, expr: Expression, target: Optional[Expression],
- infer_lvalue_type: bool) -> Type:
+ def check_with_item(
+ self, expr: Expression, target: Expression | None, infer_lvalue_type: bool
+ ) -> Type:
echk = self.expr_checker
ctx = echk.accept(expr)
- obj = echk.check_method_call_by_name('__enter__', ctx, [], [], expr)[0]
+ obj = echk.check_method_call_by_name("__enter__", ctx, [], [], expr)[0]
if target:
self.check_assignment(target, self.temp_node(obj, expr), infer_lvalue_type)
arg = self.temp_node(AnyType(TypeOfAny.special_form), expr)
res, _ = echk.check_method_call_by_name(
- '__exit__', ctx, [arg] * 3, [nodes.ARG_POS] * 3, expr)
+ "__exit__", ctx, [arg] * 3, [nodes.ARG_POS] * 3, expr
+ )
return res
- def visit_print_stmt(self, s: PrintStmt) -> None:
- for arg in s.args:
- self.expr_checker.accept(arg)
- if s.target:
- target_type = get_proper_type(self.expr_checker.accept(s.target))
- if not isinstance(target_type, NoneType):
- write_type = self.expr_checker.analyze_external_member_access(
- 'write', target_type, s.target)
- required_type = CallableType(
- arg_types=[self.named_type('builtins.str')],
- arg_kinds=[ARG_POS],
- arg_names=[None],
- ret_type=AnyType(TypeOfAny.implementation_artifact),
- fallback=self.named_type('builtins.function'),
- )
- # This has to be hard-coded, since it is a syntax pattern, not a function call.
- if not is_subtype(write_type, required_type):
- self.fail(message_registry.PYTHON2_PRINT_FILE_TYPE.format(
- write_type,
- required_type,
- ), s.target)
-
def visit_break_stmt(self, s: BreakStmt) -> None:
self.binder.handle_break()
@@ -4147,27 +4747,26 @@ def visit_match_stmt(self, s: MatchStmt) -> None:
# will be a union of all capture types). This pass ignores
# guard expressions.
pattern_types = [self.pattern_checker.accept(p, subject_type) for p in s.patterns]
- type_maps: List[TypeMap] = [t.captures for t in pattern_types]
+ type_maps: list[TypeMap] = [t.captures for t in pattern_types]
inferred_types = self.infer_variable_types_from_type_maps(type_maps)
# The second pass narrows down the types and type checks bodies.
for p, g, b in zip(s.patterns, s.guards, s.bodies):
- current_subject_type = self.expr_checker.narrow_type_from_binder(s.subject,
- subject_type)
+ current_subject_type = self.expr_checker.narrow_type_from_binder(
+ s.subject, subject_type
+ )
pattern_type = self.pattern_checker.accept(p, current_subject_type)
with self.binder.frame_context(can_skip=True, fall_through=2):
- if b.is_unreachable or isinstance(get_proper_type(pattern_type.type),
- UninhabitedType):
+ if b.is_unreachable or isinstance(
+ get_proper_type(pattern_type.type), UninhabitedType
+ ):
self.push_type_map(None)
else_map: TypeMap = {}
else:
pattern_map, else_map = conditional_types_to_typemaps(
- s.subject,
- pattern_type.type,
- pattern_type.rest_type
+ s.subject, pattern_type.type, pattern_type.rest_type
)
- self.remove_capture_conflicts(pattern_type.captures,
- inferred_types)
+ self.remove_capture_conflicts(pattern_type.captures, inferred_types)
self.push_type_map(pattern_map)
self.push_type_map(pattern_type.captures)
if g is not None:
@@ -4191,8 +4790,8 @@ def visit_match_stmt(self, s: MatchStmt) -> None:
with self.binder.frame_context(can_skip=False, fall_through=2):
pass
- def infer_variable_types_from_type_maps(self, type_maps: List[TypeMap]) -> Dict[Var, Type]:
- all_captures: Dict[Var, List[Tuple[NameExpr, Type]]] = defaultdict(list)
+ def infer_variable_types_from_type_maps(self, type_maps: list[TypeMap]) -> dict[Var, Type]:
+ all_captures: dict[Var, list[tuple[NameExpr, Type]]] = defaultdict(list)
for tm in type_maps:
if tm is not None:
for expr, typ in tm.items():
@@ -4201,20 +4800,24 @@ def infer_variable_types_from_type_maps(self, type_maps: List[TypeMap]) -> Dict[
assert isinstance(node, Var)
all_captures[node].append((expr, typ))
- inferred_types: Dict[Var, Type] = {}
+ inferred_types: dict[Var, Type] = {}
for var, captures in all_captures.items():
already_exists = False
- types: List[Type] = []
+ types: list[Type] = []
for expr, typ in captures:
types.append(typ)
previous_type, _, _ = self.check_lvalue(expr)
if previous_type is not None:
already_exists = True
- if self.check_subtype(typ, previous_type, expr,
- msg=message_registry.INCOMPATIBLE_TYPES_IN_CAPTURE,
- subtype_label="pattern captures type",
- supertype_label="variable has type"):
+ if self.check_subtype(
+ typ,
+ previous_type,
+ expr,
+ msg=message_registry.INCOMPATIBLE_TYPES_IN_CAPTURE,
+ subtype_label="pattern captures type",
+ supertype_label="variable has type",
+ ):
inferred_types[var] = previous_type
if not already_exists:
@@ -4225,7 +4828,7 @@ def infer_variable_types_from_type_maps(self, type_maps: List[TypeMap]) -> Dict[
self.infer_variable_type(var, first_occurrence, new_type, first_occurrence)
return inferred_types
- def remove_capture_conflicts(self, type_map: TypeMap, inferred_types: Dict[Var, Type]) -> None:
+ def remove_capture_conflicts(self, type_map: TypeMap, inferred_types: dict[Var, Type]) -> None:
if type_map:
for expr, typ in list(type_map.items()):
if isinstance(expr, NameExpr):
@@ -4234,12 +4837,13 @@ def remove_capture_conflicts(self, type_map: TypeMap, inferred_types: Dict[Var,
if node not in inferred_types or not is_subtype(typ, inferred_types[node]):
del type_map[expr]
- def make_fake_typeinfo(self,
- curr_module_fullname: str,
- class_gen_name: str,
- class_short_name: str,
- bases: List[Instance],
- ) -> Tuple[ClassDef, TypeInfo]:
+ def make_fake_typeinfo(
+ self,
+ curr_module_fullname: str,
+ class_gen_name: str,
+ class_short_name: str,
+ bases: list[Instance],
+ ) -> tuple[ClassDef, TypeInfo]:
# Build the fake ClassDef and TypeInfo together.
# The ClassDef is full of lies and doesn't actually contain a body.
# Use format_bare to generate a nice name for error messages.
@@ -4247,18 +4851,17 @@ def make_fake_typeinfo(self,
# should be irrelevant for a generated type like this:
# is_protocol, protocol_members, is_abstract
cdef = ClassDef(class_short_name, Block([]))
- cdef.fullname = curr_module_fullname + '.' + class_gen_name
+ cdef.fullname = curr_module_fullname + "." + class_gen_name
info = TypeInfo(SymbolTable(), cdef, curr_module_fullname)
cdef.info = info
info.bases = bases
calculate_mro(info)
- info.calculate_metaclass_type()
+ info.metaclass_type = info.calculate_metaclass_type()
return cdef, info
- def intersect_instances(self,
- instances: Tuple[Instance, Instance],
- ctx: Context,
- ) -> Optional[Instance]:
+ def intersect_instances(
+ self, instances: tuple[Instance, Instance], errors: list[tuple[str, str]]
+ ) -> Instance | None:
"""Try creating an ad-hoc intersection of the given instances.
Note that this function does *not* try and create a full-fledged
@@ -4284,7 +4887,18 @@ def intersect_instances(self,
curr_module = self.scope.stack[0]
assert isinstance(curr_module, MypyFile)
- def _get_base_classes(instances_: Tuple[Instance, Instance]) -> List[Instance]:
+ # First, retry narrowing while allowing promotions (they are disabled by default
+ # for isinstance() checks, etc). This way we will still type-check branches like
+ # x: complex = 1
+ # if isinstance(x, int):
+ # ...
+ left, right = instances
+ if is_proper_subtype(left, right, ignore_promotions=False):
+ return left
+ if is_proper_subtype(right, left, ignore_promotions=False):
+ return right
+
+ def _get_base_classes(instances_: tuple[Instance, Instance]) -> list[Instance]:
base_classes_ = []
for inst in instances_:
if inst.type.is_intersection:
@@ -4297,23 +4911,16 @@ def _get_base_classes(instances_: Tuple[Instance, Instance]) -> List[Instance]:
return base_classes_
def _make_fake_typeinfo_and_full_name(
- base_classes_: List[Instance],
- curr_module_: MypyFile,
- ) -> Tuple[TypeInfo, str]:
+ base_classes_: list[Instance], curr_module_: MypyFile
+ ) -> tuple[TypeInfo, str]:
names_list = pretty_seq([x.type.name for x in base_classes_], "and")
- short_name = ''.format(names_list)
+ short_name = f""
full_name_ = gen_unique_name(short_name, curr_module_.names)
cdef, info_ = self.make_fake_typeinfo(
- curr_module_.fullname,
- full_name_,
- short_name,
- base_classes_,
+ curr_module_.fullname, full_name_, short_name, base_classes_
)
return info_, full_name_
- old_msg = self.msg
- new_msg = old_msg.clean_copy()
- self.msg = new_msg
base_classes = _get_base_classes(instances)
# We use the pretty_names_list for error messages but can't
# use it for the real name that goes into the symbol table
@@ -4321,31 +4928,24 @@ def _make_fake_typeinfo_and_full_name(
pretty_names_list = pretty_seq(format_type_distinctly(*base_classes, bare=True), "and")
try:
info, full_name = _make_fake_typeinfo_and_full_name(base_classes, curr_module)
- self.check_multiple_inheritance(info)
- if new_msg.is_errors():
+ with self.msg.filter_errors() as local_errors:
+ self.check_multiple_inheritance(info)
+ if local_errors.has_new_errors():
# "class A(B, C)" unsafe, now check "class A(C, B)":
- new_msg = new_msg.clean_copy()
- self.msg = new_msg
base_classes = _get_base_classes(instances[::-1])
info, full_name = _make_fake_typeinfo_and_full_name(base_classes, curr_module)
- self.check_multiple_inheritance(info)
+ with self.msg.filter_errors() as local_errors:
+ self.check_multiple_inheritance(info)
info.is_intersection = True
except MroError:
- if self.should_report_unreachable_issues():
- old_msg.impossible_intersection(
- pretty_names_list, "inconsistent method resolution order", ctx)
+ errors.append((pretty_names_list, "inconsistent method resolution order"))
return None
- finally:
- self.msg = old_msg
-
- if new_msg.is_errors():
- if self.should_report_unreachable_issues():
- self.msg.impossible_intersection(
- pretty_names_list, "incompatible method signatures", ctx)
+ if local_errors.has_new_errors():
+ errors.append((pretty_names_list, "incompatible method signatures"))
return None
curr_module.names[full_name] = SymbolTableNode(GDEF, info)
- return Instance(info, [])
+ return Instance(info, [], extra_attrs=instances[0].extra_attrs or instances[1].extra_attrs)
def intersect_instance_callable(self, typ: Instance, callable_type: CallableType) -> Instance:
"""Creates a fake type that represents the intersection of an Instance and a CallableType.
@@ -4358,39 +4958,40 @@ def intersect_instance_callable(self, typ: Instance, callable_type: CallableType
# have a valid fullname and a corresponding entry in a symbol table. We generate
# a unique name inside the symbol table of the current module.
cur_module = cast(MypyFile, self.scope.stack[0])
- gen_name = gen_unique_name("".format(typ.type.name),
- cur_module.names)
+ gen_name = gen_unique_name(f"", cur_module.names)
# Synthesize a fake TypeInfo
short_name = format_type_bare(typ)
cdef, info = self.make_fake_typeinfo(cur_module.fullname, gen_name, short_name, [typ])
# Build up a fake FuncDef so we can populate the symbol table.
- func_def = FuncDef('__call__', [], Block([]), callable_type)
- func_def._fullname = cdef.fullname + '.__call__'
+ func_def = FuncDef("__call__", [], Block([]), callable_type)
+ func_def._fullname = cdef.fullname + ".__call__"
func_def.info = info
- info.names['__call__'] = SymbolTableNode(MDEF, func_def)
+ info.names["__call__"] = SymbolTableNode(MDEF, func_def)
cur_module.names[gen_name] = SymbolTableNode(GDEF, info)
- return Instance(info, [])
+ return Instance(info, [], extra_attrs=typ.extra_attrs)
def make_fake_callable(self, typ: Instance) -> Instance:
"""Produce a new type that makes type Callable with a generic callable type."""
- fallback = self.named_type('builtins.function')
- callable_type = CallableType([AnyType(TypeOfAny.explicit),
- AnyType(TypeOfAny.explicit)],
- [nodes.ARG_STAR, nodes.ARG_STAR2],
- [None, None],
- ret_type=AnyType(TypeOfAny.explicit),
- fallback=fallback,
- is_ellipsis_args=True)
+ fallback = self.named_type("builtins.function")
+ callable_type = CallableType(
+ [AnyType(TypeOfAny.explicit), AnyType(TypeOfAny.explicit)],
+ [nodes.ARG_STAR, nodes.ARG_STAR2],
+ [None, None],
+ ret_type=AnyType(TypeOfAny.explicit),
+ fallback=fallback,
+ is_ellipsis_args=True,
+ )
return self.intersect_instance_callable(typ, callable_type)
- def partition_by_callable(self, typ: Type,
- unsound_partition: bool) -> Tuple[List[Type], List[Type]]:
+ def partition_by_callable(
+ self, typ: Type, unsound_partition: bool
+ ) -> tuple[list[Type], list[Type]]:
"""Partitions a type into callable subtypes and uncallable subtypes.
Thus, given:
@@ -4422,8 +5023,9 @@ def partition_by_callable(self, typ: Type,
for subtype in typ.items:
# Use unsound_partition when handling unions in order to
# allow the expected type discrimination.
- subcallables, subuncallables = self.partition_by_callable(subtype,
- unsound_partition=True)
+ subcallables, subuncallables = self.partition_by_callable(
+ subtype, unsound_partition=True
+ )
callables.extend(subcallables)
uncallables.extend(subuncallables)
return callables, uncallables
@@ -4437,8 +5039,9 @@ def partition_by_callable(self, typ: Type,
# do better.
# If it is possible for the false branch to execute, return the original
# type to avoid losing type information.
- callables, uncallables = self.partition_by_callable(erase_to_union_or_bound(typ),
- unsound_partition)
+ callables, uncallables = self.partition_by_callable(
+ erase_to_union_or_bound(typ), unsound_partition
+ )
uncallables = [typ] if len(uncallables) else []
return callables, uncallables
@@ -4449,10 +5052,11 @@ def partition_by_callable(self, typ: Type,
ityp = tuple_fallback(typ)
if isinstance(ityp, Instance):
- method = ityp.type.get_method('__call__')
+ method = ityp.type.get_method("__call__")
if method and method.type:
- callables, uncallables = self.partition_by_callable(method.type,
- unsound_partition=False)
+ callables, uncallables = self.partition_by_callable(
+ method.type, unsound_partition=False
+ )
if len(callables) and not len(uncallables):
# Only consider the type callable if its __call__ method is
# definitely callable.
@@ -4471,9 +5075,9 @@ def partition_by_callable(self, typ: Type,
# We don't know how properly make the type callable.
return [typ], [typ]
- def conditional_callable_type_map(self, expr: Expression,
- current_type: Optional[Type],
- ) -> Tuple[TypeMap, TypeMap]:
+ def conditional_callable_type_map(
+ self, expr: Expression, current_type: Type | None
+ ) -> tuple[TypeMap, TypeMap]:
"""Takes in an expression and the current type of the expression.
Returns a 2-tuple: The first element is a map from the expression to
@@ -4487,13 +5091,13 @@ def conditional_callable_type_map(self, expr: Expression,
if isinstance(get_proper_type(current_type), AnyType):
return {}, {}
- callables, uncallables = self.partition_by_callable(current_type,
- unsound_partition=False)
+ callables, uncallables = self.partition_by_callable(current_type, unsound_partition=False)
if len(callables) and len(uncallables):
callable_map = {expr: UnionType.make_union(callables)} if len(callables) else None
- uncallable_map = {
- expr: UnionType.make_union(uncallables)} if len(uncallables) else None
+ uncallable_map = (
+ {expr: UnionType.make_union(uncallables)} if len(uncallables) else None
+ )
return callable_map, uncallable_map
elif len(callables):
@@ -4501,18 +5105,58 @@ def conditional_callable_type_map(self, expr: Expression,
return None, {}
+ def conditional_types_for_iterable(
+ self, item_type: Type, iterable_type: Type
+ ) -> tuple[Type | None, Type | None]:
+ """
+ Narrows the type of `iterable_type` based on the type of `item_type`.
+ For now, we only support narrowing unions of TypedDicts based on left operand being literal string(s).
+ """
+ if_types: list[Type] = []
+ else_types: list[Type] = []
+
+ iterable_type = get_proper_type(iterable_type)
+ if isinstance(iterable_type, UnionType):
+ possible_iterable_types = get_proper_types(iterable_type.relevant_items())
+ else:
+ possible_iterable_types = [iterable_type]
+
+ item_str_literals = try_getting_str_literals_from_type(item_type)
+
+ for possible_iterable_type in possible_iterable_types:
+ if item_str_literals and isinstance(possible_iterable_type, TypedDictType):
+ for key in item_str_literals:
+ if key in possible_iterable_type.required_keys:
+ if_types.append(possible_iterable_type)
+ elif (
+ key in possible_iterable_type.items or not possible_iterable_type.is_final
+ ):
+ if_types.append(possible_iterable_type)
+ else_types.append(possible_iterable_type)
+ else:
+ else_types.append(possible_iterable_type)
+ else:
+ if_types.append(possible_iterable_type)
+ else_types.append(possible_iterable_type)
+
+ return (
+ UnionType.make_union(if_types) if if_types else None,
+ UnionType.make_union(else_types) if else_types else None,
+ )
+
def _is_truthy_type(self, t: ProperType) -> bool:
return (
(
- isinstance(t, Instance) and
- bool(t.type) and
- not t.type.has_readable_member('__bool__') and
- not t.type.has_readable_member('__len__')
+ isinstance(t, Instance)
+ and bool(t.type)
+ and not t.type.has_readable_member("__bool__")
+ and not t.type.has_readable_member("__len__")
+ and t.type.fullname != "builtins.object"
)
or isinstance(t, FunctionLike)
or (
- isinstance(t, UnionType) and
- all(self._is_truthy_type(t) for t in get_proper_types(t.items))
+ isinstance(t, UnionType)
+ and all(self._is_truthy_type(t) for t in get_proper_types(t.items))
)
)
@@ -4535,20 +5179,28 @@ def format_expr_type() -> str:
return f'"{expr.callee.name}" returns {typ}'
elif isinstance(expr.callee, RefExpr) and expr.callee.fullname:
return f'"{expr.callee.fullname}" returns {typ}'
- return f'Call returns {typ}'
+ return f"Call returns {typ}"
else:
- return f'Expression has type {typ}'
+ return f"Expression has type {typ}"
if isinstance(t, FunctionLike):
self.fail(message_registry.FUNCTION_ALWAYS_TRUE.format(format_type(t)), expr)
elif isinstance(t, UnionType):
- self.fail(message_registry.TYPE_ALWAYS_TRUE_UNIONTYPE.format(format_expr_type()),
- expr)
+ self.fail(message_registry.TYPE_ALWAYS_TRUE_UNIONTYPE.format(format_expr_type()), expr)
+ elif isinstance(t, Instance) and t.type.fullname == "typing.Iterable":
+ _, info = self.make_fake_typeinfo("typing", "Collection", "Collection", [])
+ self.fail(
+ message_registry.ITERABLE_ALWAYS_TRUE.format(
+ format_expr_type(), format_type(Instance(info, t.args))
+ ),
+ expr,
+ )
else:
self.fail(message_registry.TYPE_ALWAYS_TRUE.format(format_expr_type()), expr)
- def find_type_equals_check(self, node: ComparisonExpr, expr_indices: List[int]
- ) -> Tuple[TypeMap, TypeMap]:
+ def find_type_equals_check(
+ self, node: ComparisonExpr, expr_indices: list[int]
+ ) -> tuple[TypeMap, TypeMap]:
"""Narrow types based on any checks of the type ``type(x) == T``
Args:
@@ -4556,17 +5208,15 @@ def find_type_equals_check(self, node: ComparisonExpr, expr_indices: List[int]
expr_indices: The list of indices of expressions in ``node`` that are being
compared
"""
- type_map = self.type_map
def is_type_call(expr: CallExpr) -> bool:
"""Is expr a call to type with one argument?"""
- return (refers_to_fullname(expr.callee, 'builtins.type')
- and len(expr.args) == 1)
+ return refers_to_fullname(expr.callee, "builtins.type") and len(expr.args) == 1
# exprs that are being passed into type
- exprs_in_type_calls: List[Expression] = []
+ exprs_in_type_calls: list[Expression] = []
# type that is being compared to type(expr)
- type_being_compared: Optional[List[TypeRange]] = None
+ type_being_compared: list[TypeRange] | None = None
# whether the type being compared to is final
is_final = False
@@ -4576,7 +5226,7 @@ def is_type_call(expr: CallExpr) -> bool:
if isinstance(expr, CallExpr) and is_type_call(expr):
exprs_in_type_calls.append(expr.args[0])
else:
- current_type = get_isinstance_type(expr, type_map)
+ current_type = self.get_isinstance_type(expr)
if current_type is None:
continue
if type_being_compared is not None:
@@ -4595,21 +5245,19 @@ def is_type_call(expr: CallExpr) -> bool:
if not exprs_in_type_calls:
return {}, {}
- if_maps: List[TypeMap] = []
- else_maps: List[TypeMap] = []
+ if_maps: list[TypeMap] = []
+ else_maps: list[TypeMap] = []
for expr in exprs_in_type_calls:
current_if_type, current_else_type = self.conditional_types_with_intersection(
- type_map[expr],
- type_being_compared,
- expr
+ self.lookup_type(expr), type_being_compared, expr
+ )
+ current_if_map, current_else_map = conditional_types_to_typemaps(
+ expr, current_if_type, current_else_type
)
- current_if_map, current_else_map = conditional_types_to_typemaps(expr,
- current_if_type,
- current_else_type)
if_maps.append(current_if_map)
else_maps.append(current_else_map)
- def combine_maps(list_maps: List[TypeMap]) -> TypeMap:
+ def combine_maps(list_maps: list[TypeMap]) -> TypeMap:
"""Combine all typemaps in list_maps into one typemap"""
result_map = {}
for d in list_maps:
@@ -4628,8 +5276,7 @@ def combine_maps(list_maps: List[TypeMap]) -> TypeMap:
else_map = {}
return if_map, else_map
- def find_isinstance_check(self, node: Expression
- ) -> Tuple[TypeMap, TypeMap]:
+ def find_isinstance_check(self, node: Expression) -> tuple[TypeMap, TypeMap]:
"""Find any isinstance checks (within a chain of ands). Includes
implicit and explicit checks for None and calls to callable.
Also includes TypeGuard functions.
@@ -4644,12 +5291,11 @@ def find_isinstance_check(self, node: Expression
Can return None, None in situations involving NoReturn.
"""
if_map, else_map = self.find_isinstance_check_helper(node)
- new_if_map = self.propagate_up_typemap_info(self.type_map, if_map)
- new_else_map = self.propagate_up_typemap_info(self.type_map, else_map)
+ new_if_map = self.propagate_up_typemap_info(if_map)
+ new_else_map = self.propagate_up_typemap_info(else_map)
return new_if_map, new_else_map
- def find_isinstance_check_helper(self, node: Expression) -> Tuple[TypeMap, TypeMap]:
- type_map = self.type_map
+ def find_isinstance_check_helper(self, node: Expression) -> tuple[TypeMap, TypeMap]:
if is_true_literal(node):
return {}, None
if is_false_literal(node):
@@ -4657,29 +5303,33 @@ def find_isinstance_check_helper(self, node: Expression) -> Tuple[TypeMap, TypeM
if isinstance(node, CallExpr) and len(node.args) != 0:
expr = collapse_walrus(node.args[0])
- if refers_to_fullname(node.callee, 'builtins.isinstance'):
+ if refers_to_fullname(node.callee, "builtins.isinstance"):
if len(node.args) != 2: # the error will be reported elsewhere
return {}, {}
if literal(expr) == LITERAL_TYPE:
return conditional_types_to_typemaps(
expr,
*self.conditional_types_with_intersection(
- type_map[expr],
- get_isinstance_type(node.args[1], type_map),
- expr
- )
+ self.lookup_type(expr), self.get_isinstance_type(node.args[1]), expr
+ ),
)
- elif refers_to_fullname(node.callee, 'builtins.issubclass'):
+ elif refers_to_fullname(node.callee, "builtins.issubclass"):
if len(node.args) != 2: # the error will be reported elsewhere
return {}, {}
if literal(expr) == LITERAL_TYPE:
- return self.infer_issubclass_maps(node, expr, type_map)
- elif refers_to_fullname(node.callee, 'builtins.callable'):
+ return self.infer_issubclass_maps(node, expr)
+ elif refers_to_fullname(node.callee, "builtins.callable"):
if len(node.args) != 1: # the error will be reported elsewhere
return {}, {}
if literal(expr) == LITERAL_TYPE:
- vartype = type_map[expr]
+ vartype = self.lookup_type(expr)
return self.conditional_callable_type_map(expr, vartype)
+ elif refers_to_fullname(node.callee, "builtins.hasattr"):
+ if len(node.args) != 2: # the error will be reported elsewhere
+ return {}, {}
+ attr = try_getting_str_literals(node.args[1], self.lookup_type(node.args[1]))
+ if literal(expr) == LITERAL_TYPE and attr and len(attr) == 1:
+ return self.hasattr_type_maps(expr, self.lookup_type(expr), attr[0])
elif isinstance(node.callee, RefExpr):
if node.callee.type_guard is not None:
# TODO: Follow keyword args or *args, **kwargs
@@ -4702,14 +5352,16 @@ def find_isinstance_check_helper(self, node: Expression) -> Tuple[TypeMap, TypeM
operand_types = []
narrowable_operand_index_to_hash = {}
for i, expr in enumerate(operands):
- if expr not in type_map:
+ if not self.has_type(expr):
return {}, {}
- expr_type = type_map[expr]
+ expr_type = self.lookup_type(expr)
operand_types.append(expr_type)
- if (literal(expr) == LITERAL_TYPE
- and not is_literal_none(expr)
- and not is_literal_enum(type_map, expr)):
+ if (
+ literal(expr) == LITERAL_TYPE
+ and not is_literal_none(expr)
+ and not self.is_literal_enum(expr)
+ ):
h = literal_hash(expr)
if h is not None:
narrowable_operand_index_to_hash[i] = h
@@ -4731,9 +5383,7 @@ def find_isinstance_check_helper(self, node: Expression) -> Tuple[TypeMap, TypeM
# in practice.
simplified_operator_list = group_comparison_operands(
- node.pairwise(),
- narrowable_operand_index_to_hash,
- {'==', 'is'},
+ node.pairwise(), narrowable_operand_index_to_hash, {"==", "is"}
)
# Step 3: Analyze each group and infer more precise type maps for each
@@ -4742,7 +5392,7 @@ def find_isinstance_check_helper(self, node: Expression) -> Tuple[TypeMap, TypeM
partial_type_maps = []
for operator, expr_indices in simplified_operator_list:
- if operator in {'is', 'is not', '==', '!='}:
+ if operator in {"is", "is not", "==", "!="}:
# is_valid_target:
# Controls which types we're allowed to narrow exprs to. Note that
# we cannot use 'is_literal_type_like' in both cases since doing
@@ -4759,17 +5409,19 @@ def find_isinstance_check_helper(self, node: Expression) -> Tuple[TypeMap, TypeM
# should_narrow_by_identity:
# Set to 'false' only if the user defines custom __eq__ or __ne__ methods
# that could cause identity-based narrowing to produce invalid results.
- if operator in {'is', 'is not'}:
+ if operator in {"is", "is not"}:
is_valid_target: Callable[[Type], bool] = is_singleton_type
coerce_only_in_literal_context = False
should_narrow_by_identity = True
else:
+
def is_exactly_literal_type(t: Type) -> bool:
return isinstance(get_proper_type(t), LiteralType)
def has_no_custom_eq_checks(t: Type) -> bool:
- return (not custom_special_method(t, '__eq__', check_all=False)
- and not custom_special_method(t, '__ne__', check_all=False))
+ return not custom_special_method(
+ t, "__eq__", check_all=False
+ ) and not custom_special_method(t, "__ne__", check_all=False)
is_valid_target = is_exactly_literal_type
coerce_only_in_literal_context = True
@@ -4805,34 +5457,50 @@ def has_no_custom_eq_checks(t: Type) -> bool:
# explicit type(x) == some_type check
if if_map == {} and else_map == {}:
if_map, else_map = self.find_type_equals_check(node, expr_indices)
- elif operator in {'in', 'not in'}:
+ elif operator in {"in", "not in"}:
assert len(expr_indices) == 2
left_index, right_index = expr_indices
- if left_index not in narrowable_operand_index_to_hash:
- continue
-
item_type = operand_types[left_index]
- collection_type = operand_types[right_index]
-
- # We only try and narrow away 'None' for now
- if not is_optional(item_type):
- continue
+ iterable_type = operand_types[right_index]
+
+ if_map, else_map = {}, {}
+
+ if left_index in narrowable_operand_index_to_hash:
+ # We only try and narrow away 'None' for now
+ if is_optional(item_type):
+ collection_item_type = get_proper_type(
+ builtin_item_type(iterable_type)
+ )
+ if (
+ collection_item_type is not None
+ and not is_optional(collection_item_type)
+ and not (
+ isinstance(collection_item_type, Instance)
+ and collection_item_type.type.fullname == "builtins.object"
+ )
+ and is_overlapping_erased_types(item_type, collection_item_type)
+ ):
+ if_map[operands[left_index]] = remove_optional(item_type)
+
+ if right_index in narrowable_operand_index_to_hash:
+ if_type, else_type = self.conditional_types_for_iterable(
+ item_type, iterable_type
+ )
+ expr = operands[right_index]
+ if if_type is None:
+ if_map = None
+ else:
+ if_map[expr] = if_type
+ if else_type is None:
+ else_map = None
+ else:
+ else_map[expr] = else_type
- collection_item_type = get_proper_type(builtin_item_type(collection_type))
- if collection_item_type is None or is_optional(collection_item_type):
- continue
- if (isinstance(collection_item_type, Instance)
- and collection_item_type.type.fullname == 'builtins.object'):
- continue
- if is_overlapping_erased_types(item_type, collection_item_type):
- if_map, else_map = {operands[left_index]: remove_optional(item_type)}, {}
- else:
- continue
else:
if_map = {}
else_map = {}
- if operator in {'is not', '!=', 'not in'}:
+ if operator in {"is not", "!=", "not in"}:
if_map, else_map = else_map, if_map
partial_type_maps.append((if_map, else_map))
@@ -4860,49 +5528,43 @@ def has_no_custom_eq_checks(t: Type) -> bool:
(None if if_assignment_map is None or if_condition_map is None else if_map),
(None if else_assignment_map is None or else_condition_map is None else else_map),
)
- elif isinstance(node, OpExpr) and node.op == 'and':
+ elif isinstance(node, OpExpr) and node.op == "and":
left_if_vars, left_else_vars = self.find_isinstance_check(node.left)
right_if_vars, right_else_vars = self.find_isinstance_check(node.right)
# (e1 and e2) is true if both e1 and e2 are true,
# and false if at least one of e1 and e2 is false.
- return (and_conditional_maps(left_if_vars, right_if_vars),
- or_conditional_maps(left_else_vars, right_else_vars))
- elif isinstance(node, OpExpr) and node.op == 'or':
+ return (
+ and_conditional_maps(left_if_vars, right_if_vars),
+ or_conditional_maps(left_else_vars, right_else_vars),
+ )
+ elif isinstance(node, OpExpr) and node.op == "or":
left_if_vars, left_else_vars = self.find_isinstance_check(node.left)
right_if_vars, right_else_vars = self.find_isinstance_check(node.right)
# (e1 or e2) is true if at least one of e1 or e2 is true,
# and false if both e1 and e2 are false.
- return (or_conditional_maps(left_if_vars, right_if_vars),
- and_conditional_maps(left_else_vars, right_else_vars))
- elif isinstance(node, UnaryExpr) and node.op == 'not':
+ return (
+ or_conditional_maps(left_if_vars, right_if_vars),
+ and_conditional_maps(left_else_vars, right_else_vars),
+ )
+ elif isinstance(node, UnaryExpr) and node.op == "not":
left, right = self.find_isinstance_check(node.expr)
return right, left
# Restrict the type of the variable to True-ish/False-ish in the if and else branches
# respectively
- original_vartype = type_map[node]
+ original_vartype = self.lookup_type(node)
self._check_for_truthy_type(original_vartype, node)
vartype = try_expanding_sum_type_to_union(original_vartype, "builtins.bool")
if_type = true_only(vartype)
else_type = false_only(vartype)
- if_map = (
- {node: if_type}
- if not isinstance(if_type, UninhabitedType)
- else None
- )
- else_map = (
- {node: else_type}
- if not isinstance(else_type, UninhabitedType)
- else None
- )
+ if_map = {node: if_type} if not isinstance(if_type, UninhabitedType) else None
+ else_map = {node: else_type} if not isinstance(else_type, UninhabitedType) else None
return if_map, else_map
- def propagate_up_typemap_info(self,
- existing_types: Mapping[Expression, Type],
- new_types: TypeMap) -> TypeMap:
+ def propagate_up_typemap_info(self, new_types: TypeMap) -> TypeMap:
"""Attempts refining parent expressions of any MemberExpr or IndexExprs in new_types.
Specifically, this function accepts two mappings of expression to original types:
@@ -4935,7 +5597,7 @@ def propagate_up_typemap_info(self,
output_map[expr] = expr_type
# Next, try using this information to refine the parent types, if applicable.
- new_mapping = self.refine_parent_types(existing_types, expr, expr_type)
+ new_mapping = self.refine_parent_types(expr, expr_type)
for parent_expr, proposed_parent_type in new_mapping.items():
# We don't try inferring anything if we've already inferred something for
# the parent expression.
@@ -4945,10 +5607,7 @@ def propagate_up_typemap_info(self,
output_map[parent_expr] = proposed_parent_type
return output_map
- def refine_parent_types(self,
- existing_types: Mapping[Expression, Type],
- expr: Expression,
- expr_type: Type) -> Mapping[Expression, Type]:
+ def refine_parent_types(self, expr: Expression, expr_type: Type) -> Mapping[Expression, Type]:
"""Checks if the given expr is a 'lookup operation' into a union and iteratively refines
the parent types based on the 'expr_type'.
@@ -4958,7 +5617,7 @@ def refine_parent_types(self,
For more details about what a 'lookup operation' is and how we use the expr_type to refine
the parent types of lookup_expr, see the docstring in 'propagate_up_typemap_info'.
"""
- output: Dict[Expression, Type] = {}
+ output: dict[Expression, Type] = {}
# Note: parent_expr and parent_type are progressively refined as we crawl up the
# parent lookup chain.
@@ -4968,33 +5627,34 @@ def refine_parent_types(self,
# and create function that will try replaying the same lookup
# operation against arbitrary types.
if isinstance(expr, MemberExpr):
- parent_expr = expr.expr
- parent_type = existing_types.get(parent_expr)
+ parent_expr = collapse_walrus(expr.expr)
+ parent_type = self.lookup_type_or_none(parent_expr)
member_name = expr.name
- def replay_lookup(new_parent_type: ProperType) -> Optional[Type]:
- msg_copy = self.msg.clean_copy()
- member_type = analyze_member_access(
- name=member_name,
- typ=new_parent_type,
- context=parent_expr,
- is_lvalue=False,
- is_super=False,
- is_operator=False,
- msg=msg_copy,
- original_type=new_parent_type,
- chk=self,
- in_literal_context=False,
- )
- if msg_copy.is_errors():
+ def replay_lookup(new_parent_type: ProperType) -> Type | None:
+ with self.msg.filter_errors() as w:
+ member_type = analyze_member_access(
+ name=member_name,
+ typ=new_parent_type,
+ context=parent_expr,
+ is_lvalue=False,
+ is_super=False,
+ is_operator=False,
+ msg=self.msg,
+ original_type=new_parent_type,
+ chk=self,
+ in_literal_context=False,
+ )
+ if w.has_new_errors():
return None
else:
return member_type
+
elif isinstance(expr, IndexExpr):
- parent_expr = expr.base
- parent_type = existing_types.get(parent_expr)
+ parent_expr = collapse_walrus(expr.base)
+ parent_type = self.lookup_type_or_none(parent_expr)
- index_type = existing_types.get(expr.index)
+ index_type = self.lookup_type_or_none(expr.index)
if index_type is None:
return output
@@ -5003,7 +5663,7 @@ def replay_lookup(new_parent_type: ProperType) -> Optional[Type]:
# Refactoring these two indexing replay functions is surprisingly
# tricky -- see https://github.com/python/mypy/pull/7917, which
# was blocked by https://github.com/mypyc/mypyc/issues/586
- def replay_lookup(new_parent_type: ProperType) -> Optional[Type]:
+ def replay_lookup(new_parent_type: ProperType) -> Type | None:
if not isinstance(new_parent_type, TypedDictType):
return None
try:
@@ -5012,10 +5672,12 @@ def replay_lookup(new_parent_type: ProperType) -> Optional[Type]:
except KeyError:
return None
return make_simplified_union(member_types)
+
else:
int_literals = try_getting_int_literals_from_type(index_type)
if int_literals is not None:
- def replay_lookup(new_parent_type: ProperType) -> Optional[Type]:
+
+ def replay_lookup(new_parent_type: ProperType) -> Type | None:
if not isinstance(new_parent_type, TupleType):
return None
try:
@@ -5024,6 +5686,7 @@ def replay_lookup(new_parent_type: ProperType) -> Optional[Type]:
except IndexError:
return None
return make_simplified_union(member_types)
+
else:
return output
else:
@@ -5045,8 +5708,8 @@ def replay_lookup(new_parent_type: ProperType) -> Optional[Type]:
# Take each element in the parent union and replay the original lookup procedure
# to figure out which parents are compatible.
new_parent_types = []
- for item in union_items(parent_type):
- member_type = replay_lookup(item)
+ for item in flatten_nested_unions(parent_type.items):
+ member_type = replay_lookup(get_proper_type(item))
if member_type is None:
# We were unable to obtain the member type. So, we give up on refining this
# parent type entirely and abort.
@@ -5064,14 +5727,15 @@ def replay_lookup(new_parent_type: ProperType) -> Optional[Type]:
expr = parent_expr
expr_type = output[parent_expr] = make_simplified_union(new_parent_types)
- def refine_identity_comparison_expression(self,
- operands: List[Expression],
- operand_types: List[Type],
- chain_indices: List[int],
- narrowable_operand_indices: AbstractSet[int],
- is_valid_target: Callable[[ProperType], bool],
- coerce_only_in_literal_context: bool,
- ) -> Tuple[TypeMap, TypeMap]:
+ def refine_identity_comparison_expression(
+ self,
+ operands: list[Expression],
+ operand_types: list[Type],
+ chain_indices: list[int],
+ narrowable_operand_indices: AbstractSet[int],
+ is_valid_target: Callable[[ProperType], bool],
+ coerce_only_in_literal_context: bool,
+ ) -> tuple[TypeMap, TypeMap]:
"""Produce conditional type maps refining expressions by an identity/equality comparison.
The 'operands' and 'operand_types' lists should be the full list of operands used
@@ -5102,7 +5766,7 @@ def refine_identity_comparison_expression(self,
if coerce_only_in_literal_context:
should_coerce = any(is_literal_type_like(operand_types[i]) for i in chain_indices)
- target: Optional[Type] = None
+ target: Type | None = None
possible_target_indices = []
for i in chain_indices:
expr_type = operand_types[i]
@@ -5159,8 +5823,9 @@ def refine_identity_comparison_expression(self,
sum_type_name = None
target = get_proper_type(target)
- if (isinstance(target, LiteralType) and
- (target.is_enum_literal() or isinstance(target.value, bool))):
+ if isinstance(target, LiteralType) and (
+ target.is_enum_literal() or isinstance(target.value, bool)
+ ):
sum_type_name = target.fallback.type.fullname
target_type = [TypeRange(target, is_upper_bound=False)]
@@ -5193,12 +5858,13 @@ def refine_identity_comparison_expression(self,
return reduce_conditional_maps(partial_type_maps)
- def refine_away_none_in_comparison(self,
- operands: List[Expression],
- operand_types: List[Type],
- chain_indices: List[int],
- narrowable_operand_indices: AbstractSet[int],
- ) -> Tuple[TypeMap, TypeMap]:
+ def refine_away_none_in_comparison(
+ self,
+ operands: list[Expression],
+ operand_types: list[Type],
+ chain_indices: list[int],
+ narrowable_operand_indices: AbstractSet[int],
+ ) -> tuple[TypeMap, TypeMap]:
"""Produces conditional type maps refining away None in an identity/equality chain.
For more details about what the different arguments mean, see the
@@ -5227,82 +5893,171 @@ def refine_away_none_in_comparison(self,
#
# Helpers
#
+ @overload
+ def check_subtype(
+ self,
+ subtype: Type,
+ supertype: Type,
+ context: Context,
+ msg: str,
+ subtype_label: str | None = None,
+ supertype_label: str | None = None,
+ *,
+ notes: list[str] | None = None,
+ code: ErrorCode | None = None,
+ outer_context: Context | None = None,
+ ) -> bool:
+ ...
- def check_subtype(self,
- subtype: Type,
- supertype: Type,
- context: Context,
- msg: Union[str, ErrorMessage] = message_registry.INCOMPATIBLE_TYPES,
- subtype_label: Optional[str] = None,
- supertype_label: Optional[str] = None,
- *,
- code: Optional[ErrorCode] = None,
- outer_context: Optional[Context] = None) -> bool:
+ @overload
+ def check_subtype(
+ self,
+ subtype: Type,
+ supertype: Type,
+ context: Context,
+ msg: ErrorMessage,
+ subtype_label: str | None = None,
+ supertype_label: str | None = None,
+ *,
+ notes: list[str] | None = None,
+ outer_context: Context | None = None,
+ ) -> bool:
+ ...
+
+ def check_subtype(
+ self,
+ subtype: Type,
+ supertype: Type,
+ context: Context,
+ msg: str | ErrorMessage,
+ subtype_label: str | None = None,
+ supertype_label: str | None = None,
+ *,
+ notes: list[str] | None = None,
+ code: ErrorCode | None = None,
+ outer_context: Context | None = None,
+ ) -> bool:
"""Generate an error if the subtype is not compatible with supertype."""
if is_subtype(subtype, supertype, options=self.options):
return True
- if isinstance(msg, ErrorMessage):
- msg_text = msg.value
- code = msg.code
- else:
- msg_text = msg
+ if isinstance(msg, str):
+ msg = ErrorMessage(msg, code=code)
+
+ if self.msg.prefer_simple_messages():
+ self.fail(msg, context) # Fast path -- skip all fancy logic
+ return False
+
+ orig_subtype = subtype
subtype = get_proper_type(subtype)
+ orig_supertype = supertype
supertype = get_proper_type(supertype)
- if self.msg.try_report_long_tuple_assignment_error(subtype, supertype, context, msg_text,
- subtype_label, supertype_label, code=code):
- return False
- if self.should_suppress_optional_error([subtype]):
+ if self.msg.try_report_long_tuple_assignment_error(
+ subtype, supertype, context, msg, subtype_label, supertype_label
+ ):
return False
- extra_info: List[str] = []
- note_msg = ''
- notes: List[str] = []
+ extra_info: list[str] = []
+ note_msg = ""
+ notes = notes or []
if subtype_label is not None or supertype_label is not None:
- subtype_str, supertype_str = format_type_distinctly(subtype, supertype)
+ subtype_str, supertype_str = format_type_distinctly(orig_subtype, orig_supertype)
if subtype_label is not None:
- extra_info.append(subtype_label + ' ' + subtype_str)
+ extra_info.append(subtype_label + " " + subtype_str)
if supertype_label is not None:
- extra_info.append(supertype_label + ' ' + supertype_str)
- note_msg = make_inferred_type_note(outer_context or context, subtype,
- supertype, supertype_str)
+ extra_info.append(supertype_label + " " + supertype_str)
+ note_msg = make_inferred_type_note(
+ outer_context or context, subtype, supertype, supertype_str
+ )
if isinstance(subtype, Instance) and isinstance(supertype, Instance):
- notes = append_invariance_notes([], subtype, supertype)
+ notes = append_invariance_notes(notes, subtype, supertype)
if extra_info:
- msg_text += ' (' + ', '.join(extra_info) + ')'
+ msg = msg.with_additional_msg(" (" + ", ".join(extra_info) + ")")
- self.fail(ErrorMessage(msg_text, code=code), context)
+ self.fail(msg, context)
for note in notes:
- self.msg.note(note, context, code=code)
+ self.msg.note(note, context, code=msg.code)
if note_msg:
- self.note(note_msg, context, code=code)
- self.msg.maybe_note_concatenate_pos_args(subtype, supertype, context, code=code)
- if (isinstance(supertype, Instance) and supertype.type.is_protocol and
- isinstance(subtype, (Instance, TupleType, TypedDictType))):
- self.msg.report_protocol_problems(subtype, supertype, context, code=code)
+ self.note(note_msg, context, code=msg.code)
+ self.msg.maybe_note_concatenate_pos_args(subtype, supertype, context, code=msg.code)
+ if (
+ isinstance(supertype, Instance)
+ and supertype.type.is_protocol
+ and isinstance(subtype, (CallableType, Instance, TupleType, TypedDictType))
+ ):
+ self.msg.report_protocol_problems(subtype, supertype, context, code=msg.code)
if isinstance(supertype, CallableType) and isinstance(subtype, Instance):
- call = find_member('__call__', subtype, subtype, is_operator=True)
+ call = find_member("__call__", subtype, subtype, is_operator=True)
if call:
- self.msg.note_call(subtype, call, context, code=code)
+ self.msg.note_call(subtype, call, context, code=msg.code)
if isinstance(subtype, (CallableType, Overloaded)) and isinstance(supertype, Instance):
- if supertype.type.is_protocol and supertype.type.protocol_members == ['__call__']:
- call = find_member('__call__', supertype, subtype, is_operator=True)
+ if supertype.type.is_protocol and "__call__" in supertype.type.protocol_members:
+ call = find_member("__call__", supertype, subtype, is_operator=True)
assert call is not None
- self.msg.note_call(supertype, call, context, code=code)
+ if not is_subtype(subtype, call, options=self.options):
+ self.msg.note_call(supertype, call, context, code=msg.code)
+ self.check_possible_missing_await(subtype, supertype, context)
return False
+ def get_precise_awaitable_type(self, typ: Type, local_errors: ErrorWatcher) -> Type | None:
+ """If type implements Awaitable[X] with non-Any X, return X.
+
+ In all other cases return None. This method must be called in context
+ of local_errors.
+ """
+ if isinstance(get_proper_type(typ), PartialType):
+ # Partial types are special, ignore them here.
+ return None
+ try:
+ aw_type = self.expr_checker.check_awaitable_expr(
+ typ, Context(), "", ignore_binder=True
+ )
+ except KeyError:
+ # This is a hack to speed up tests by not including Awaitable in all typing stubs.
+ return None
+ if local_errors.has_new_errors():
+ return None
+ if isinstance(get_proper_type(aw_type), (AnyType, UnboundType)):
+ return None
+ return aw_type
+
+ @contextmanager
+ def checking_await_set(self) -> Iterator[None]:
+ self.checking_missing_await = True
+ try:
+ yield
+ finally:
+ self.checking_missing_await = False
+
+ def check_possible_missing_await(
+ self, subtype: Type, supertype: Type, context: Context
+ ) -> None:
+ """Check if the given type becomes a subtype when awaited."""
+ if self.checking_missing_await:
+ # Avoid infinite recursion.
+ return
+ with self.checking_await_set(), self.msg.filter_errors() as local_errors:
+ aw_type = self.get_precise_awaitable_type(subtype, local_errors)
+ if aw_type is None:
+ return
+ if not self.check_subtype(
+ aw_type, supertype, context, msg=message_registry.INCOMPATIBLE_TYPES
+ ):
+ return
+ self.msg.possible_missing_await(context)
+
def contains_none(self, t: Type) -> bool:
t = get_proper_type(t)
return (
- isinstance(t, NoneType) or
- (isinstance(t, UnionType) and any(self.contains_none(ut) for ut in t.items)) or
- (isinstance(t, TupleType) and any(self.contains_none(tt) for tt in t.items)) or
- (isinstance(t, Instance) and bool(t.args)
- and any(self.contains_none(it) for it in t.args))
+ isinstance(t, NoneType)
+ or (isinstance(t, UnionType) and any(self.contains_none(ut) for ut in t.items))
+ or (isinstance(t, TupleType) and any(self.contains_none(tt) for tt in t.items))
+ or (
+ isinstance(t, Instance)
+ and bool(t.args)
+ and any(self.contains_none(it) for it in t.args)
+ )
)
- def should_suppress_optional_error(self, related_types: List[Type]) -> bool:
- return self.suppress_none_errors and any(self.contains_none(t) for t in related_types)
-
def named_type(self, name: str) -> Instance:
"""Return an instance type with given name and implicit Any type args.
@@ -5312,13 +6067,13 @@ def named_type(self, name: str) -> Instance:
sym = self.lookup_qualified(name)
node = sym.node
if isinstance(node, TypeAlias):
- assert isinstance(node.target, Instance) # type: ignore
+ assert isinstance(node.target, Instance) # type: ignore[misc]
node = node.target.type
assert isinstance(node, TypeInfo)
any_type = AnyType(TypeOfAny.from_omitted_generics)
return Instance(node, [any_type] * len(node.defn.type_vars))
- def named_generic_type(self, name: str, args: List[Type]) -> Instance:
+ def named_generic_type(self, name: str, args: list[Type]) -> Instance:
"""Return an instance with the given name and type arguments.
Assume that the number of arguments is correct. Assume that
@@ -5338,15 +6093,46 @@ def lookup_typeinfo(self, fullname: str) -> TypeInfo:
def type_type(self) -> Instance:
"""Return instance type 'type'."""
- return self.named_type('builtins.type')
+ return self.named_type("builtins.type")
def str_type(self) -> Instance:
"""Return instance type 'str'."""
- return self.named_type('builtins.str')
+ return self.named_type("builtins.str")
def store_type(self, node: Expression, typ: Type) -> None:
"""Store the type of a node in the type map."""
- self.type_map[node] = typ
+ self._type_maps[-1][node] = typ
+
+ def has_type(self, node: Expression) -> bool:
+ return any(node in m for m in reversed(self._type_maps))
+
+ def lookup_type_or_none(self, node: Expression) -> Type | None:
+ for m in reversed(self._type_maps):
+ if node in m:
+ return m[node]
+ return None
+
+ def lookup_type(self, node: Expression) -> Type:
+ for m in reversed(self._type_maps):
+ t = m.get(node)
+ if t is not None:
+ return t
+ raise KeyError(node)
+
+ def store_types(self, d: dict[Expression, Type]) -> None:
+ self._type_maps[-1].update(d)
+
+ @contextmanager
+ def local_type_map(self) -> Iterator[dict[Expression, Type]]:
+ """Store inferred types into a temporary type map (returned).
+
+ This can be used to perform type checking "experiments" without
+ affecting exported types (which are used by mypyc).
+ """
+ temp_type_map: dict[Expression, Type] = {}
+ self._type_maps.append(temp_type_map)
+ yield temp_type_map
+ self._type_maps.pop()
def in_checked_function(self) -> bool:
"""Should we type-check the current function?
@@ -5356,28 +6142,27 @@ def in_checked_function(self) -> bool:
- Yes in annotated functions.
- No otherwise.
"""
- return (self.options.check_untyped_defs
- or not self.dynamic_funcs
- or not self.dynamic_funcs[-1])
+ return (
+ self.options.check_untyped_defs or not self.dynamic_funcs or not self.dynamic_funcs[-1]
+ )
def lookup(self, name: str) -> SymbolTableNode:
- """Look up a definition from the symbol table with the given name.
- """
+ """Look up a definition from the symbol table with the given name."""
if name in self.globals:
return self.globals[name]
else:
- b = self.globals.get('__builtins__', None)
+ b = self.globals.get("__builtins__", None)
if b:
table = cast(MypyFile, b.node).names
if name in table:
return table[name]
- raise KeyError('Failed lookup: {}'.format(name))
+ raise KeyError(f"Failed lookup: {name}")
def lookup_qualified(self, name: str) -> SymbolTableNode:
- if '.' not in name:
+ if "." not in name:
return self.lookup(name)
else:
- parts = name.split('.')
+ parts = name.split(".")
n = self.modules[parts[0]]
for i in range(1, len(parts) - 1):
sym = n.names.get(parts[i])
@@ -5386,23 +6171,27 @@ def lookup_qualified(self, name: str) -> SymbolTableNode:
last = parts[-1]
if last in n.names:
return n.names[last]
- elif len(parts) == 2 and parts[0] == 'builtins':
- fullname = 'builtins.' + last
+ elif len(parts) == 2 and parts[0] in ("builtins", "typing"):
+ fullname = ".".join(parts)
if fullname in SUGGESTED_TEST_FIXTURES:
- suggestion = ", e.g. add '[builtins fixtures/{}]' to your test".format(
- SUGGESTED_TEST_FIXTURES[fullname])
+ suggestion = ", e.g. add '[{} fixtures/{}]' to your test".format(
+ parts[0], SUGGESTED_TEST_FIXTURES[fullname]
+ )
else:
- suggestion = ''
- raise KeyError("Could not find builtin symbol '{}' (If you are running a "
- "test case, use a fixture that "
- "defines this symbol{})".format(last, suggestion))
+ suggestion = ""
+ raise KeyError(
+ "Could not find builtin symbol '{}' (If you are running a "
+ "test case, use a fixture that "
+ "defines this symbol{})".format(last, suggestion)
+ )
else:
msg = "Failed qualified lookup: '{}' (fullname = '{}')."
raise KeyError(msg.format(last, name))
@contextmanager
- def enter_partial_types(self, *, is_function: bool = False,
- is_class: bool = False) -> Iterator[None]:
+ def enter_partial_types(
+ self, *, is_function: bool = False, is_class: bool = False
+ ) -> Iterator[None]:
"""Enter a new scope for collecting partial types.
Also report errors for (some) variables which still have partial
@@ -5416,9 +6205,7 @@ def enter_partial_types(self, *, is_function: bool = False,
# at the toplevel (with allow_untyped_globals) or if it is in an
# untyped function being checked with check_untyped_defs.
permissive = (self.options.allow_untyped_globals and not is_local) or (
- self.options.check_untyped_defs
- and self.dynamic_funcs
- and self.dynamic_funcs[-1]
+ self.options.check_untyped_defs and self.dynamic_funcs and self.dynamic_funcs[-1]
)
partial_types, _, _ = self.partial_types.pop()
@@ -5437,23 +6224,30 @@ def enter_partial_types(self, *, is_function: bool = False,
# checked for compatibility with base classes elsewhere. Without this exception
# mypy could require an annotation for an attribute that already has been
# declared in a base class, which would be bad.
- allow_none = (not self.options.local_partial_types
- or is_function
- or (is_class and self.is_defined_in_base_class(var)))
- if (allow_none
- and isinstance(var.type, PartialType)
- and var.type.type is None
- and not permissive):
+ allow_none = (
+ not self.options.local_partial_types
+ or is_function
+ or (is_class and self.is_defined_in_base_class(var))
+ )
+ if (
+ allow_none
+ and isinstance(var.type, PartialType)
+ and var.type.type is None
+ and not permissive
+ ):
var.type = NoneType()
else:
if var not in self.partial_reported and not permissive:
self.msg.need_annotation_for_var(var, context, self.options.python_version)
self.partial_reported.add(var)
if var.type:
- var.type = self.fixup_partial_type(var.type)
+ fixed = fixup_partial_type(var.type)
+ var.invalid_partial_type = fixed != var.type
+ var.type = fixed
def handle_partial_var_type(
- self, typ: PartialType, is_lvalue: bool, node: Var, context: Context) -> Type:
+ self, typ: PartialType, is_lvalue: bool, node: Var, context: Context
+ ) -> Type:
"""Handle a reference to a partial type through a var.
(Used by checkexpr and checkmember.)
@@ -5471,39 +6265,23 @@ def handle_partial_var_type(
if in_scope:
context = partial_types[node]
if is_local or not self.options.allow_untyped_globals:
- self.msg.need_annotation_for_var(node, context,
- self.options.python_version)
+ self.msg.need_annotation_for_var(
+ node, context, self.options.python_version
+ )
self.partial_reported.add(node)
else:
# Defer the node -- we might get a better type in the outer scope
self.handle_cannot_determine_type(node.name, context)
- return self.fixup_partial_type(typ)
-
- def fixup_partial_type(self, typ: Type) -> Type:
- """Convert a partial type that we couldn't resolve into something concrete.
-
- This means, for None we make it Optional[Any], and for anything else we
- fill in all of the type arguments with Any.
- """
- if not isinstance(typ, PartialType):
- return typ
- if typ.type is None:
- return UnionType.make_union([AnyType(TypeOfAny.unannotated), NoneType()])
- else:
- return Instance(
- typ.type,
- [AnyType(TypeOfAny.unannotated)] * len(typ.type.type_vars))
+ return fixup_partial_type(typ)
def is_defined_in_base_class(self, var: Var) -> bool:
- if var.info:
- for base in var.info.mro[1:]:
- if base.get(var.name) is not None:
- return True
- if var.info.fallback_to_any:
- return True
- return False
+ if not var.info:
+ return False
+ return var.info.fallback_to_any or any(
+ base.get(var.name) is not None for base in var.info.mro[1:]
+ )
- def find_partial_types(self, var: Var) -> Optional[Dict[Var, Context]]:
+ def find_partial_types(self, var: Var) -> dict[Var, Context] | None:
"""Look for an active partial type scope containing variable.
A scope is active if assignments in the current context can refine a partial
@@ -5516,7 +6294,8 @@ def find_partial_types(self, var: Var) -> Optional[Dict[Var, Context]]:
return None
def find_partial_types_in_all_scopes(
- self, var: Var) -> Tuple[bool, bool, Optional[Dict[Var, Context]]]:
+ self, var: Var
+ ) -> tuple[bool, bool, dict[Var, Context] | None]:
"""Look for partial type scope containing variable.
Return tuple (is the scope active, is the scope a local scope, scope).
@@ -5533,68 +6312,71 @@ def find_partial_types_in_all_scopes(
# as if --local-partial-types is always on (because it used to be like this).
disallow_other_scopes = True
- scope_active = (not disallow_other_scopes
- or scope.is_local == self.partial_types[-1].is_local)
+ scope_active = (
+ not disallow_other_scopes or scope.is_local == self.partial_types[-1].is_local
+ )
return scope_active, scope.is_local, scope.map
return False, False, None
- def temp_node(self, t: Type, context: Optional[Context] = None) -> TempNode:
+ def temp_node(self, t: Type, context: Context | None = None) -> TempNode:
"""Create a temporary node with the given, fixed type."""
return TempNode(t, context=context)
- def fail(self, msg: Union[str, ErrorMessage], context: Context, *,
- code: Optional[ErrorCode] = None) -> None:
+ def fail(
+ self, msg: str | ErrorMessage, context: Context, *, code: ErrorCode | None = None
+ ) -> None:
"""Produce an error message."""
if isinstance(msg, ErrorMessage):
self.msg.fail(msg.value, context, code=msg.code)
return
self.msg.fail(msg, context, code=code)
- def note(self,
- msg: str,
- context: Context,
- offset: int = 0,
- *,
- code: Optional[ErrorCode] = None) -> None:
+ def note(
+ self,
+ msg: str | ErrorMessage,
+ context: Context,
+ offset: int = 0,
+ *,
+ code: ErrorCode | None = None,
+ ) -> None:
"""Produce a note."""
+ if isinstance(msg, ErrorMessage):
+ self.msg.note(msg.value, context, code=msg.code)
+ return
self.msg.note(msg, context, offset=offset, code=code)
def iterable_item_type(self, instance: Instance) -> Type:
- iterable = map_instance_to_supertype(
- instance,
- self.lookup_typeinfo('typing.Iterable'))
+ iterable = map_instance_to_supertype(instance, self.lookup_typeinfo("typing.Iterable"))
item_type = iterable.args[0]
if not isinstance(get_proper_type(item_type), AnyType):
# This relies on 'map_instance_to_supertype' returning 'Iterable[Any]'
# in case there is no explicit base class.
return item_type
# Try also structural typing.
- iter_type = get_proper_type(find_member('__iter__', instance, instance, is_operator=True))
+ iter_type = get_proper_type(find_member("__iter__", instance, instance, is_operator=True))
if iter_type and isinstance(iter_type, CallableType):
ret_type = get_proper_type(iter_type.ret_type)
if isinstance(ret_type, Instance):
- iterator = map_instance_to_supertype(ret_type,
- self.lookup_typeinfo('typing.Iterator'))
+ iterator = map_instance_to_supertype(
+ ret_type, self.lookup_typeinfo("typing.Iterator")
+ )
item_type = iterator.args[0]
return item_type
def function_type(self, func: FuncBase) -> FunctionLike:
- return function_type(func, self.named_type('builtins.function'))
+ return function_type(func, self.named_type("builtins.function"))
- def push_type_map(self, type_map: 'TypeMap') -> None:
+ def push_type_map(self, type_map: TypeMap) -> None:
if type_map is None:
self.binder.unreachable()
else:
for expr, type in type_map.items():
self.binder.put(expr, type)
- def infer_issubclass_maps(self, node: CallExpr,
- expr: Expression,
- type_map: Dict[Expression, Type]
- ) -> Tuple[TypeMap, TypeMap]:
+ def infer_issubclass_maps(self, node: CallExpr, expr: Expression) -> tuple[TypeMap, TypeMap]:
"""Infer type restrictions for an expression in issubclass call."""
- vartype = type_map[expr]
- type = get_isinstance_type(node.args[1], type_map)
+ vartype = self.lookup_type(expr)
+ type = self.get_isinstance_type(node.args[1])
if isinstance(vartype, TypeVarType):
vartype = vartype.upper_bound
vartype = get_proper_type(vartype)
@@ -5610,9 +6392,8 @@ def infer_issubclass_maps(self, node: CallExpr,
vartype = UnionType(union_list)
elif isinstance(vartype, TypeType):
vartype = vartype.item
- elif (isinstance(vartype, Instance) and
- vartype.type.fullname == 'builtins.type'):
- vartype = self.named_type('builtins.object')
+ elif isinstance(vartype, Instance) and vartype.type.is_metaclass():
+ vartype = self.named_type("builtins.object")
else:
# Any other object whose type we don't know precisely
# for example, Any or a custom metaclass.
@@ -5623,32 +6404,33 @@ def infer_issubclass_maps(self, node: CallExpr,
return yes_map, no_map
@overload
- def conditional_types_with_intersection(self,
- expr_type: Type,
- type_ranges: Optional[List[TypeRange]],
- ctx: Context,
- default: None = None
- ) -> Tuple[Optional[Type], Optional[Type]]: ...
+ def conditional_types_with_intersection(
+ self,
+ expr_type: Type,
+ type_ranges: list[TypeRange] | None,
+ ctx: Context,
+ default: None = None,
+ ) -> tuple[Type | None, Type | None]:
+ ...
@overload
- def conditional_types_with_intersection(self,
- expr_type: Type,
- type_ranges: Optional[List[TypeRange]],
- ctx: Context,
- default: Type
- ) -> Tuple[Type, Type]: ...
-
- def conditional_types_with_intersection(self,
- expr_type: Type,
- type_ranges: Optional[List[TypeRange]],
- ctx: Context,
- default: Optional[Type] = None
- ) -> Tuple[Optional[Type], Optional[Type]]:
+ def conditional_types_with_intersection(
+ self, expr_type: Type, type_ranges: list[TypeRange] | None, ctx: Context, default: Type
+ ) -> tuple[Type, Type]:
+ ...
+
+ def conditional_types_with_intersection(
+ self,
+ expr_type: Type,
+ type_ranges: list[TypeRange] | None,
+ ctx: Context,
+ default: Type | None = None,
+ ) -> tuple[Type | None, Type | None]:
initial_types = conditional_types(expr_type, type_ranges, default)
# For some reason, doing "yes_map, no_map = conditional_types_to_typemaps(...)"
# doesn't work: mypyc will decide that 'yes_map' is of type None if we try.
- yes_type: Optional[Type] = initial_types[0]
- no_type: Optional[Type] = initial_types[1]
+ yes_type: Type | None = initial_types[0]
+ no_type: Type | None = initial_types[1]
if not isinstance(get_proper_type(yes_type), UninhabitedType) or type_ranges is None:
return yes_type, no_type
@@ -5671,15 +6453,20 @@ def conditional_types_with_intersection(self,
possible_target_types.append(item)
out = []
+ errors: list[tuple[str, str]] = []
for v in possible_expr_types:
if not isinstance(v, Instance):
return yes_type, no_type
for t in possible_target_types:
- intersection = self.intersect_instances((v, t), ctx)
+ intersection = self.intersect_instances((v, t), errors)
if intersection is None:
continue
out.append(intersection)
if len(out) == 0:
+ # Only report errors if no element in the union worked.
+ if self.should_report_unreachable_issues():
+ for types, reason in errors:
+ self.msg.impossible_intersection(types, reason, ctx)
return UninhabitedType(), expr_type
new_yes_type = make_simplified_union(out)
return new_yes_type, expr_type
@@ -5687,32 +6474,209 @@ def conditional_types_with_intersection(self,
def is_writable_attribute(self, node: Node) -> bool:
"""Check if an attribute is writable"""
if isinstance(node, Var):
+ if node.is_property and not node.is_settable_property:
+ return False
return True
elif isinstance(node, OverloadedFuncDef) and node.is_property:
first_item = cast(Decorator, node.items[0])
return first_item.var.is_settable_property
- else:
+ return False
+
+ def get_isinstance_type(self, expr: Expression) -> list[TypeRange] | None:
+ if isinstance(expr, OpExpr) and expr.op == "|":
+ left = self.get_isinstance_type(expr.left)
+ right = self.get_isinstance_type(expr.right)
+ if left is None or right is None:
+ return None
+ return left + right
+ all_types = get_proper_types(flatten_types(self.lookup_type(expr)))
+ types: list[TypeRange] = []
+ for typ in all_types:
+ if isinstance(typ, FunctionLike) and typ.is_type_obj():
+ # Type variables may be present -- erase them, which is the best
+ # we can do (outside disallowing them here).
+ erased_type = erase_typevars(typ.items[0].ret_type)
+ types.append(TypeRange(erased_type, is_upper_bound=False))
+ elif isinstance(typ, TypeType):
+ # Type[A] means "any type that is a subtype of A" rather than "precisely type A"
+ # we indicate this by setting is_upper_bound flag
+ types.append(TypeRange(typ.item, is_upper_bound=True))
+ elif isinstance(typ, Instance) and typ.type.fullname == "builtins.type":
+ object_type = Instance(typ.type.mro[-1], [])
+ types.append(TypeRange(object_type, is_upper_bound=True))
+ elif isinstance(typ, AnyType):
+ types.append(TypeRange(typ, is_upper_bound=False))
+ else: # we didn't see an actual type, but rather a variable with unknown value
+ return None
+ if not types:
+ # this can happen if someone has empty tuple as 2nd argument to isinstance
+ # strictly speaking, we should return UninhabitedType but for simplicity we will simply
+ # refuse to do any type inference for now
+ return None
+ return types
+
+ def is_literal_enum(self, n: Expression) -> bool:
+ """Returns true if this expression (with the given type context) is an Enum literal.
+
+ For example, if we had an enum:
+
+ class Foo(Enum):
+ A = 1
+ B = 2
+
+ ...and if the expression 'Foo' referred to that enum within the current type context,
+ then the expression 'Foo.A' would be a literal enum. However, if we did 'a = Foo.A',
+ then the variable 'a' would *not* be a literal enum.
+
+ We occasionally special-case expressions like 'Foo.A' and treat them as a single primitive
+ unit for the same reasons we sometimes treat 'True', 'False', or 'None' as a single
+ primitive unit.
+ """
+ if not isinstance(n, MemberExpr) or not isinstance(n.expr, NameExpr):
+ return False
+
+ parent_type = self.lookup_type_or_none(n.expr)
+ member_type = self.lookup_type_or_none(n)
+ if member_type is None or parent_type is None:
+ return False
+
+ parent_type = get_proper_type(parent_type)
+ member_type = get_proper_type(coerce_to_literal(member_type))
+ if not isinstance(parent_type, FunctionLike) or not isinstance(member_type, LiteralType):
+ return False
+
+ if not parent_type.is_type_obj():
+ return False
+
+ return (
+ member_type.is_enum_literal()
+ and member_type.fallback.type == parent_type.type_object()
+ )
+
+ def add_any_attribute_to_type(self, typ: Type, name: str) -> Type:
+ """Inject an extra attribute with Any type using fallbacks."""
+ orig_typ = typ
+ typ = get_proper_type(typ)
+ any_type = AnyType(TypeOfAny.unannotated)
+ if isinstance(typ, Instance):
+ result = typ.copy_with_extra_attr(name, any_type)
+ # For instances, we erase the possible module name, so that restrictions
+ # become anonymous types.ModuleType instances, allowing hasattr() to
+ # have effect on modules.
+ assert result.extra_attrs is not None
+ result.extra_attrs.mod_name = None
+ return result
+ if isinstance(typ, TupleType):
+ fallback = typ.partial_fallback.copy_with_extra_attr(name, any_type)
+ return typ.copy_modified(fallback=fallback)
+ if isinstance(typ, CallableType):
+ fallback = typ.fallback.copy_with_extra_attr(name, any_type)
+ return typ.copy_modified(fallback=fallback)
+ if isinstance(typ, TypeType) and isinstance(typ.item, Instance):
+ return TypeType.make_normalized(self.add_any_attribute_to_type(typ.item, name))
+ if isinstance(typ, TypeVarType):
+ return typ.copy_modified(
+ upper_bound=self.add_any_attribute_to_type(typ.upper_bound, name),
+ values=[self.add_any_attribute_to_type(v, name) for v in typ.values],
+ )
+ if isinstance(typ, UnionType):
+ with_attr, without_attr = self.partition_union_by_attr(typ, name)
+ return make_simplified_union(
+ with_attr + [self.add_any_attribute_to_type(typ, name) for typ in without_attr]
+ )
+ return orig_typ
+
+ def hasattr_type_maps(
+ self, expr: Expression, source_type: Type, name: str
+ ) -> tuple[TypeMap, TypeMap]:
+ """Simple support for hasattr() checks.
+
+ Essentially the logic is following:
+ * In the if branch, keep types that already has a valid attribute as is,
+ for other inject an attribute with `Any` type.
+ * In the else branch, remove types that already have a valid attribute,
+ while keeping the rest.
+ """
+ if self.has_valid_attribute(source_type, name):
+ return {expr: source_type}, {}
+
+ source_type = get_proper_type(source_type)
+ if isinstance(source_type, UnionType):
+ _, without_attr = self.partition_union_by_attr(source_type, name)
+ yes_map = {expr: self.add_any_attribute_to_type(source_type, name)}
+ return yes_map, {expr: make_simplified_union(without_attr)}
+
+ type_with_attr = self.add_any_attribute_to_type(source_type, name)
+ if type_with_attr != source_type:
+ return {expr: type_with_attr}, {}
+ return {}, {}
+
+ def partition_union_by_attr(
+ self, source_type: UnionType, name: str
+ ) -> tuple[list[Type], list[Type]]:
+ with_attr = []
+ without_attr = []
+ for item in source_type.items:
+ if self.has_valid_attribute(item, name):
+ with_attr.append(item)
+ else:
+ without_attr.append(item)
+ return with_attr, without_attr
+
+ def has_valid_attribute(self, typ: Type, name: str) -> bool:
+ p_typ = get_proper_type(typ)
+ if isinstance(p_typ, AnyType):
return False
+ if isinstance(p_typ, Instance) and p_typ.extra_attrs and p_typ.extra_attrs.mod_name:
+ # Presence of module_symbol_table means this check will skip ModuleType.__getattr__
+ module_symbol_table = p_typ.type.names
+ else:
+ module_symbol_table = None
+ with self.msg.filter_errors() as watcher:
+ analyze_member_access(
+ name,
+ typ,
+ TempNode(AnyType(TypeOfAny.special_form)),
+ False,
+ False,
+ False,
+ self.msg,
+ original_type=typ,
+ chk=self,
+ # This is not a real attribute lookup so don't mess with deferring nodes.
+ no_deferral=True,
+ module_symbol_table=module_symbol_table,
+ )
+ return not watcher.has_new_errors()
+
+
+class CollectArgTypeVarTypes(TypeTraverserVisitor):
+ """Collects the non-nested argument types in a set."""
+
+ def __init__(self) -> None:
+ self.arg_types: set[TypeVarType] = set()
+
+ def visit_type_var(self, t: TypeVarType) -> None:
+ self.arg_types.add(t)
@overload
-def conditional_types(current_type: Type,
- proposed_type_ranges: Optional[List[TypeRange]],
- default: None = None
- ) -> Tuple[Optional[Type], Optional[Type]]: ...
+def conditional_types(
+ current_type: Type, proposed_type_ranges: list[TypeRange] | None, default: None = None
+) -> tuple[Type | None, Type | None]:
+ ...
@overload
-def conditional_types(current_type: Type,
- proposed_type_ranges: Optional[List[TypeRange]],
- default: Type
- ) -> Tuple[Type, Type]: ...
+def conditional_types(
+ current_type: Type, proposed_type_ranges: list[TypeRange] | None, default: Type
+) -> tuple[Type, Type]:
+ ...
-def conditional_types(current_type: Type,
- proposed_type_ranges: Optional[List[TypeRange]],
- default: Optional[Type] = None
- ) -> Tuple[Optional[Type], Optional[Type]]:
+def conditional_types(
+ current_type: Type, proposed_type_ranges: list[TypeRange] | None, default: Type | None = None
+) -> tuple[Type | None, Type | None]:
"""Takes in the current type and a proposed type of an expression.
Returns a 2-tuple: The first element is the proposed type, if the expression
@@ -5724,11 +6688,11 @@ def conditional_types(current_type: Type,
if len(proposed_type_ranges) == 1:
target = proposed_type_ranges[0].item
target = get_proper_type(target)
- if isinstance(target, LiteralType) and (target.is_enum_literal()
- or isinstance(target.value, bool)):
+ if isinstance(target, LiteralType) and (
+ target.is_enum_literal() or isinstance(target.value, bool)
+ ):
enum_name = target.fallback.type.fullname
- current_type = try_expanding_sum_type_to_union(current_type,
- enum_name)
+ current_type = try_expanding_sum_type_to_union(current_type, enum_name)
proposed_items = [type_range.item for type_range in proposed_type_ranges]
proposed_type = make_simplified_union(proposed_items)
if isinstance(proposed_type, AnyType):
@@ -5736,19 +6700,25 @@ def conditional_types(current_type: Type,
# attempt to narrow anything. Instead, we broaden the expr to Any to
# avoid false positives
return proposed_type, default
- elif (not any(type_range.is_upper_bound for type_range in proposed_type_ranges)
- and is_proper_subtype(current_type, proposed_type)):
+ elif not any(
+ type_range.is_upper_bound for type_range in proposed_type_ranges
+ ) and is_proper_subtype(current_type, proposed_type, ignore_promotions=True):
# Expression is always of one of the types in proposed_type_ranges
return default, UninhabitedType()
- elif not is_overlapping_types(current_type, proposed_type,
- prohibit_none_typevar_overlap=True):
+ elif not is_overlapping_types(
+ current_type, proposed_type, prohibit_none_typevar_overlap=True, ignore_promotions=True
+ ):
# Expression is never of any type in proposed_type_ranges
return UninhabitedType(), default
else:
# we can only restrict when the type is precise, not bounded
- proposed_precise_type = UnionType.make_union([type_range.item
- for type_range in proposed_type_ranges
- if not type_range.is_upper_bound])
+ proposed_precise_type = UnionType.make_union(
+ [
+ type_range.item
+ for type_range in proposed_type_ranges
+ if not type_range.is_upper_bound
+ ]
+ )
remaining_type = restrict_subtype_away(current_type, proposed_precise_type)
return proposed_type, remaining_type
else:
@@ -5756,11 +6726,10 @@ def conditional_types(current_type: Type,
return current_type, default
-def conditional_types_to_typemaps(expr: Expression,
- yes_type: Optional[Type],
- no_type: Optional[Type]
- ) -> Tuple[TypeMap, TypeMap]:
- maps: List[TypeMap] = []
+def conditional_types_to_typemaps(
+ expr: Expression, yes_type: Type | None, no_type: Type | None
+) -> tuple[TypeMap, TypeMap]:
+ maps: list[TypeMap] = []
for typ in (yes_type, no_type):
proper_type = get_proper_type(typ)
if isinstance(proper_type, UninhabitedType):
@@ -5786,62 +6755,24 @@ def gen_unique_name(base: str, table: SymbolTable) -> str:
def is_true_literal(n: Expression) -> bool:
"""Returns true if this expression is the 'True' literal/keyword."""
- return (refers_to_fullname(n, 'builtins.True')
- or isinstance(n, IntExpr) and n.value != 0)
+ return refers_to_fullname(n, "builtins.True") or isinstance(n, IntExpr) and n.value != 0
def is_false_literal(n: Expression) -> bool:
"""Returns true if this expression is the 'False' literal/keyword."""
- return (refers_to_fullname(n, 'builtins.False')
- or isinstance(n, IntExpr) and n.value == 0)
-
-
-def is_literal_enum(type_map: Mapping[Expression, Type], n: Expression) -> bool:
- """Returns true if this expression (with the given type context) is an Enum literal.
-
- For example, if we had an enum:
-
- class Foo(Enum):
- A = 1
- B = 2
-
- ...and if the expression 'Foo' referred to that enum within the current type context,
- then the expression 'Foo.A' would be a literal enum. However, if we did 'a = Foo.A',
- then the variable 'a' would *not* be a literal enum.
-
- We occasionally special-case expressions like 'Foo.A' and treat them as a single primitive
- unit for the same reasons we sometimes treat 'True', 'False', or 'None' as a single
- primitive unit.
- """
- if not isinstance(n, MemberExpr) or not isinstance(n.expr, NameExpr):
- return False
-
- parent_type = type_map.get(n.expr)
- member_type = type_map.get(n)
- if member_type is None or parent_type is None:
- return False
-
- parent_type = get_proper_type(parent_type)
- member_type = get_proper_type(coerce_to_literal(member_type))
- if not isinstance(parent_type, FunctionLike) or not isinstance(member_type, LiteralType):
- return False
-
- if not parent_type.is_type_obj():
- return False
-
- return member_type.is_enum_literal() and member_type.fallback.type == parent_type.type_object()
+ return refers_to_fullname(n, "builtins.False") or isinstance(n, IntExpr) and n.value == 0
def is_literal_none(n: Expression) -> bool:
"""Returns true if this expression is the 'None' literal/keyword."""
- return isinstance(n, NameExpr) and n.fullname == 'builtins.None'
+ return isinstance(n, NameExpr) and n.fullname == "builtins.None"
def is_literal_not_implemented(n: Expression) -> bool:
- return isinstance(n, NameExpr) and n.fullname == 'builtins.NotImplemented'
+ return isinstance(n, NameExpr) and n.fullname == "builtins.NotImplemented"
-def builtin_item_type(tp: Type) -> Optional[Type]:
+def builtin_item_type(tp: Type) -> Type | None:
"""Get the item type of a builtin container.
If 'tp' is not one of the built containers (these includes NamedTuple and TypedDict)
@@ -5859,24 +6790,30 @@ def builtin_item_type(tp: Type) -> Optional[Type]:
if isinstance(tp, Instance):
if tp.type.fullname in [
- 'builtins.list', 'builtins.tuple', 'builtins.dict',
- 'builtins.set', 'builtins.frozenset',
+ "builtins.list",
+ "builtins.tuple",
+ "builtins.dict",
+ "builtins.set",
+ "builtins.frozenset",
+ "_collections_abc.dict_keys",
+ "typing.KeysView",
]:
if not tp.args:
# TODO: fix tuple in lib-stub/builtins.pyi (it should be generic).
return None
if not isinstance(get_proper_type(tp.args[0]), AnyType):
return tp.args[0]
- elif isinstance(tp, TupleType) and all(not isinstance(it, AnyType)
- for it in get_proper_types(tp.items)):
+ elif isinstance(tp, TupleType) and all(
+ not isinstance(it, AnyType) for it in get_proper_types(tp.items)
+ ):
return make_simplified_union(tp.items) # this type is not externally visible
elif isinstance(tp, TypedDictType):
# TypedDict always has non-optional string keys. Find the key type from the Mapping
# base class.
for base in tp.fallback.type.mro:
- if base.fullname == 'typing.Mapping':
+ if base.fullname == "typing.Mapping":
return map_instance_to_supertype(tp.fallback, base).args[0]
- assert False, 'No Mapping base class found for TypedDict fallback'
+ assert False, "No Mapping base class found for TypedDict fallback"
return None
@@ -5895,7 +6832,7 @@ def and_conditional_maps(m1: TypeMap, m2: TypeMap) -> TypeMap:
# arbitrarily give precedence to m2. (In the future, we could use
# an intersection type.)
result = m2.copy()
- m2_keys = set(literal_hash(n2) for n2 in m2)
+ m2_keys = {literal_hash(n2) for n2 in m2}
for n1 in m1:
if literal_hash(n1) not in m2_keys:
result[n1] = m1[n1]
@@ -5916,7 +6853,7 @@ def or_conditional_maps(m1: TypeMap, m2: TypeMap) -> TypeMap:
# expressions whose type is refined by both conditions. (We do not
# learn anything about expressions whose type is refined by only
# one condition.)
- result: Dict[Expression, Type] = {}
+ result: dict[Expression, Type] = {}
for n1 in m1:
for n2 in m2:
if literal_hash(n1) == literal_hash(n2):
@@ -5924,8 +6861,7 @@ def or_conditional_maps(m1: TypeMap, m2: TypeMap) -> TypeMap:
return result
-def reduce_conditional_maps(type_maps: List[Tuple[TypeMap, TypeMap]],
- ) -> Tuple[TypeMap, TypeMap]:
+def reduce_conditional_maps(type_maps: list[tuple[TypeMap, TypeMap]]) -> tuple[TypeMap, TypeMap]:
"""Reduces a list containing pairs of if/else TypeMaps into a single pair.
We "and" together all of the if TypeMaps and "or" together the else TypeMaps. So
@@ -5963,7 +6899,7 @@ def reduce_conditional_maps(type_maps: List[Tuple[TypeMap, TypeMap]],
def convert_to_typetype(type_map: TypeMap) -> TypeMap:
- converted_type_map: Dict[Expression, Type] = {}
+ converted_type_map: dict[Expression, Type] = {}
if type_map is None:
return None
for expr, typ in type_map.items():
@@ -5978,7 +6914,7 @@ def convert_to_typetype(type_map: TypeMap) -> TypeMap:
return converted_type_map
-def flatten(t: Expression) -> List[Expression]:
+def flatten(t: Expression) -> list[Expression]:
"""Flatten a nested sequence of tuples/lists into one list of nodes."""
if isinstance(t, TupleExpr) or isinstance(t, ListExpr):
return [b for a in t.items for b in flatten(a)]
@@ -5988,7 +6924,7 @@ def flatten(t: Expression) -> List[Expression]:
return [t]
-def flatten_types(t: Type) -> List[Type]:
+def flatten_types(t: Type) -> list[Type]:
"""Flatten a nested sequence of tuples into one list of nodes."""
t = get_proper_type(t)
if isinstance(t, TupleType):
@@ -5997,50 +6933,15 @@ def flatten_types(t: Type) -> List[Type]:
return [t]
-def get_isinstance_type(expr: Expression,
- type_map: Dict[Expression, Type]) -> Optional[List[TypeRange]]:
- if isinstance(expr, OpExpr) and expr.op == '|':
- left = get_isinstance_type(expr.left, type_map)
- right = get_isinstance_type(expr.right, type_map)
- if left is None or right is None:
- return None
- return left + right
- all_types = get_proper_types(flatten_types(type_map[expr]))
- types: List[TypeRange] = []
- for typ in all_types:
- if isinstance(typ, FunctionLike) and typ.is_type_obj():
- # Type variables may be present -- erase them, which is the best
- # we can do (outside disallowing them here).
- erased_type = erase_typevars(typ.items[0].ret_type)
- types.append(TypeRange(erased_type, is_upper_bound=False))
- elif isinstance(typ, TypeType):
- # Type[A] means "any type that is a subtype of A" rather than "precisely type A"
- # we indicate this by setting is_upper_bound flag
- types.append(TypeRange(typ.item, is_upper_bound=True))
- elif isinstance(typ, Instance) and typ.type.fullname == 'builtins.type':
- object_type = Instance(typ.type.mro[-1], [])
- types.append(TypeRange(object_type, is_upper_bound=True))
- elif isinstance(typ, AnyType):
- types.append(TypeRange(typ, is_upper_bound=False))
- else: # we didn't see an actual type, but rather a variable whose value is unknown to us
- return None
- if not types:
- # this can happen if someone has empty tuple as 2nd argument to isinstance
- # strictly speaking, we should return UninhabitedType but for simplicity we will simply
- # refuse to do any type inference for now
- return None
- return types
-
-
-def expand_func(defn: FuncItem, map: Dict[TypeVarId, Type]) -> FuncItem:
+def expand_func(defn: FuncItem, map: dict[TypeVarId, Type]) -> FuncItem:
visitor = TypeTransformVisitor(map)
- ret = defn.accept(visitor)
+ ret = visitor.node(defn)
assert isinstance(ret, FuncItem)
return ret
class TypeTransformVisitor(TransformVisitor):
- def __init__(self, map: Dict[TypeVarId, Type]) -> None:
+ def __init__(self, map: dict[TypeVarId, Type]) -> None:
super().__init__()
self.map = map
@@ -6049,15 +6950,15 @@ def type(self, type: Type) -> Type:
def are_argument_counts_overlapping(t: CallableType, s: CallableType) -> bool:
- """Can a single call match both t and s, based just on positional argument counts?
- """
+ """Can a single call match both t and s, based just on positional argument counts?"""
min_args = max(t.min_args, s.min_args)
max_args = min(t.max_possible_positional_args(), s.max_possible_positional_args())
return min_args <= max_args
-def is_unsafe_overlapping_overload_signatures(signature: CallableType,
- other: CallableType) -> bool:
+def is_unsafe_overlapping_overload_signatures(
+ signature: CallableType, other: CallableType
+) -> bool:
"""Check if two overloaded signatures are unsafely overlapping or partially overlapping.
We consider two functions 's' and 't' to be unsafely overlapping if both
@@ -6088,18 +6989,23 @@ def is_unsafe_overlapping_overload_signatures(signature: CallableType,
#
# This discrepancy is unfortunately difficult to get rid of, so we repeat the
# checks twice in both directions for now.
- return (is_callable_compatible(signature, other,
- is_compat=is_overlapping_types_no_promote,
- is_compat_return=lambda l, r: not is_subtype_no_promote(l, r),
- ignore_return=False,
- check_args_covariantly=True,
- allow_partial_overlap=True) or
- is_callable_compatible(other, signature,
- is_compat=is_overlapping_types_no_promote,
- is_compat_return=lambda l, r: not is_subtype_no_promote(r, l),
- ignore_return=False,
- check_args_covariantly=False,
- allow_partial_overlap=True))
+ return is_callable_compatible(
+ signature,
+ other,
+ is_compat=is_overlapping_types_no_promote_no_uninhabited,
+ is_compat_return=lambda l, r: not is_subtype_no_promote(l, r),
+ ignore_return=False,
+ check_args_covariantly=True,
+ allow_partial_overlap=True,
+ ) or is_callable_compatible(
+ other,
+ signature,
+ is_compat=is_overlapping_types_no_promote_no_uninhabited,
+ is_compat_return=lambda l, r: not is_subtype_no_promote(r, l),
+ ignore_return=False,
+ check_args_covariantly=False,
+ allow_partial_overlap=True,
+ )
def detach_callable(typ: CallableType) -> CallableType:
@@ -6116,7 +7022,7 @@ def detach_callable(typ: CallableType) -> CallableType:
from a class or not."""
type_list = typ.arg_types + [typ.ret_type]
- appear_map: Dict[str, List[int]] = {}
+ appear_map: dict[str, list[int]] = {}
for i, inner_type in enumerate(type_list):
typevars_available = get_type_vars(inner_type)
for var in typevars_available:
@@ -6133,18 +7039,18 @@ def detach_callable(typ: CallableType) -> CallableType:
for var in set(all_type_vars):
if var.fullname not in used_type_var_names:
continue
- new_variables.append(TypeVarType(
- name=var.name,
- fullname=var.fullname,
- id=var.id,
- values=var.values,
- upper_bound=var.upper_bound,
- variance=var.variance,
- ))
+ new_variables.append(
+ TypeVarType(
+ name=var.name,
+ fullname=var.fullname,
+ id=var.id,
+ values=var.values,
+ upper_bound=var.upper_bound,
+ variance=var.variance,
+ )
+ )
out = typ.copy_modified(
- variables=new_variables,
- arg_types=type_list[:-1],
- ret_type=type_list[-1],
+ variables=new_variables, arg_types=type_list[:-1], ret_type=type_list[-1]
)
return out
@@ -6164,13 +7070,13 @@ def overload_can_never_match(signature: CallableType, other: CallableType) -> bo
# the below subtype check and (surprisingly?) `is_proper_subtype(Any, Any)`
# returns `True`.
# TODO: find a cleaner solution instead of this ad-hoc erasure.
- exp_signature = expand_type(signature, {tvar.id: erase_def_to_union_or_bound(tvar)
- for tvar in signature.variables})
- assert isinstance(exp_signature, ProperType)
+ exp_signature = expand_type(
+ signature, {tvar.id: erase_def_to_union_or_bound(tvar) for tvar in signature.variables}
+ )
assert isinstance(exp_signature, CallableType)
- return is_callable_compatible(exp_signature, other,
- is_compat=is_more_precise,
- ignore_return=True)
+ return is_callable_compatible(
+ exp_signature, other, is_compat=is_more_precise, ignore_return=True
+ )
def is_more_general_arg_prefix(t: FunctionLike, s: FunctionLike) -> bool:
@@ -6179,26 +7085,28 @@ def is_more_general_arg_prefix(t: FunctionLike, s: FunctionLike) -> bool:
# general than one with fewer items (or just one item)?
if isinstance(t, CallableType):
if isinstance(s, CallableType):
- return is_callable_compatible(t, s,
- is_compat=is_proper_subtype,
- ignore_return=True)
+ return is_callable_compatible(t, s, is_compat=is_proper_subtype, ignore_return=True)
elif isinstance(t, FunctionLike):
if isinstance(s, FunctionLike):
if len(t.items) == len(s.items):
- return all(is_same_arg_prefix(items, itemt)
- for items, itemt in zip(t.items, s.items))
+ return all(
+ is_same_arg_prefix(items, itemt) for items, itemt in zip(t.items, s.items)
+ )
return False
def is_same_arg_prefix(t: CallableType, s: CallableType) -> bool:
- return is_callable_compatible(t, s,
- is_compat=is_same_type,
- ignore_return=True,
- check_args_covariantly=True,
- ignore_pos_arg_names=True)
+ return is_callable_compatible(
+ t,
+ s,
+ is_compat=is_same_type,
+ ignore_return=True,
+ check_args_covariantly=True,
+ ignore_pos_arg_names=True,
+ )
-def infer_operator_assignment_method(typ: Type, operator: str) -> Tuple[bool, str]:
+def infer_operator_assignment_method(typ: Type, operator: str) -> tuple[bool, str]:
"""Determine if operator assignment on given value type is in-place, and the method name.
For example, if operator is '+', return (True, '__iadd__') or (False, '__add__')
@@ -6208,39 +7116,54 @@ def infer_operator_assignment_method(typ: Type, operator: str) -> Tuple[bool, st
method = operators.op_methods[operator]
if isinstance(typ, Instance):
if operator in operators.ops_with_inplace_method:
- inplace_method = '__i' + method[2:]
+ inplace_method = "__i" + method[2:]
if typ.type.has_readable_member(inplace_method):
return True, inplace_method
return False, method
-def is_valid_inferred_type(typ: Type) -> bool:
- """Is an inferred type valid?
+def is_valid_inferred_type(typ: Type, is_lvalue_final: bool = False) -> bool:
+ """Is an inferred type valid and needs no further refinement?
- Examples of invalid types include the None type or List[].
+ Examples of invalid types include the None type (when we are not assigning
+ None to a final lvalue) or List[].
When not doing strict Optional checking, all types containing None are
invalid. When doing strict Optional checking, only None and types that are
incompletely defined (i.e. contain UninhabitedType) are invalid.
"""
- if isinstance(get_proper_type(typ), (NoneType, UninhabitedType)):
- # With strict Optional checking, we *may* eventually infer NoneType when
- # the initializer is None, but we only do that if we can't infer a
- # specific Optional type. This resolution happens in
- # leave_partial_types when we pop a partial types scope.
+ proper_type = get_proper_type(typ)
+ if isinstance(proper_type, NoneType):
+ # If the lvalue is final, we may immediately infer NoneType when the
+ # initializer is None.
+ #
+ # If not, we want to defer making this decision. The final inferred
+ # type could either be NoneType or an Optional type, depending on
+ # the context. This resolution happens in leave_partial_types when
+ # we pop a partial types scope.
+ return is_lvalue_final
+ elif isinstance(proper_type, UninhabitedType):
return False
- return not typ.accept(NothingSeeker())
+ return not typ.accept(InvalidInferredTypes())
-class NothingSeeker(TypeQuery[bool]):
- """Find any types resulting from failed (ambiguous) type inference."""
+class InvalidInferredTypes(BoolTypeQuery):
+ """Find type components that are not valid for an inferred type.
+
+ These include type, and any types resulting from failed
+ (ambiguous) type inference.
+ """
def __init__(self) -> None:
- super().__init__(any)
+ super().__init__(ANY_STRATEGY)
def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
return t.ambiguous
+ def visit_erased_type(self, t: ErasedType) -> bool:
+ # This can happen inside a lambda.
+ return True
+
class SetNothingToAny(TypeTranslator):
"""Replace all ambiguous types with Any (to avoid spurious extra errors)."""
@@ -6251,12 +7174,12 @@ def visit_uninhabited_type(self, t: UninhabitedType) -> Type:
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
- # Target of the alias cannot by an ambiguous , so we just
+ # Target of the alias cannot be an ambiguous , so we just
# replace the arguments.
return t.copy_modified(args=[a.accept(self) for a in t.args])
-def is_node_static(node: Optional[Node]) -> Optional[bool]:
+def is_node_static(node: Node | None) -> bool | None:
"""Find out if a node describes a static function method."""
if isinstance(node, FuncDef):
@@ -6270,29 +7193,29 @@ def is_node_static(node: Optional[Node]) -> Optional[bool]:
class CheckerScope:
# We keep two stacks combined, to maintain the relative order
- stack: List[Union[TypeInfo, FuncItem, MypyFile]]
+ stack: list[TypeInfo | FuncItem | MypyFile]
def __init__(self, module: MypyFile) -> None:
self.stack = [module]
- def top_function(self) -> Optional[FuncItem]:
+ def top_function(self) -> FuncItem | None:
for e in reversed(self.stack):
if isinstance(e, FuncItem):
return e
return None
- def top_non_lambda_function(self) -> Optional[FuncItem]:
+ def top_non_lambda_function(self) -> FuncItem | None:
for e in reversed(self.stack):
if isinstance(e, FuncItem) and not isinstance(e, LambdaExpr):
return e
return None
- def active_class(self) -> Optional[TypeInfo]:
+ def active_class(self) -> TypeInfo | None:
if isinstance(self.stack[-1], TypeInfo):
return self.stack[-1]
return None
- def enclosing_class(self) -> Optional[TypeInfo]:
+ def enclosing_class(self) -> TypeInfo | None:
"""Is there a class *directly* enclosing this function?"""
top = self.top_function()
assert top, "This method must be called from inside a function"
@@ -6303,7 +7226,7 @@ def enclosing_class(self) -> Optional[TypeInfo]:
return enclosing
return None
- def active_self_type(self) -> Optional[Union[Instance, TupleType]]:
+ def active_self_type(self) -> Instance | TupleType | None:
"""An instance or tuple type representing the current class.
This returns None unless we are in class body or in a method.
@@ -6329,8 +7252,8 @@ def push_class(self, info: TypeInfo) -> Iterator[None]:
self.stack.pop()
-TKey = TypeVar('TKey')
-TValue = TypeVar('TValue')
+TKey = TypeVar("TKey")
+TValue = TypeVar("TValue")
class DisjointDict(Generic[TKey, TValue]):
@@ -6359,19 +7282,20 @@ class DisjointDict(Generic[TKey, TValue]):
tree of height log_2(n). This makes root lookups no longer amoritized constant time when we
finally call 'items()'.
"""
+
def __init__(self) -> None:
# Each key maps to a unique ID
- self._key_to_id: Dict[TKey, int] = {}
+ self._key_to_id: dict[TKey, int] = {}
# Each id points to the parent id, forming a forest of upwards-pointing trees. If the
# current id already is the root, it points to itself. We gradually flatten these trees
# as we perform root lookups: eventually all nodes point directly to its root.
- self._id_to_parent_id: Dict[int, int] = {}
+ self._id_to_parent_id: dict[int, int] = {}
# Each root id in turn maps to the set of values.
- self._root_id_to_values: Dict[int, Set[TValue]] = {}
+ self._root_id_to_values: dict[int, set[TValue]] = {}
- def add_mapping(self, keys: Set[TKey], values: Set[TValue]) -> None:
+ def add_mapping(self, keys: set[TKey], values: set[TValue]) -> None:
"""Adds a 'Set[TKey] -> Set[TValue]' mapping. If there already exists a mapping
containing one or more of the given keys, we merge the input mapping with the old one.
@@ -6391,9 +7315,9 @@ def add_mapping(self, keys: Set[TKey], values: Set[TValue]) -> None:
self._id_to_parent_id[subtree_root] = new_root
root_values.update(self._root_id_to_values.pop(subtree_root))
- def items(self) -> List[Tuple[Set[TKey], Set[TValue]]]:
+ def items(self) -> list[tuple[set[TKey], set[TValue]]]:
"""Returns all disjoint mappings in key-value pairs."""
- root_id_to_keys: Dict[int, Set[TKey]] = {}
+ root_id_to_keys: dict[int, set[TKey]] = {}
for key in self._key_to_id:
root_id = self._lookup_root_id(key)
if root_id not in root_id_to_keys:
@@ -6427,10 +7351,11 @@ def _lookup_root_id(self, key: TKey) -> int:
return i
-def group_comparison_operands(pairwise_comparisons: Iterable[Tuple[str, Expression, Expression]],
- operand_to_literal_hash: Mapping[int, Key],
- operators_to_group: Set[str],
- ) -> List[Tuple[str, List[int]]]:
+def group_comparison_operands(
+ pairwise_comparisons: Iterable[tuple[str, Expression, Expression]],
+ operand_to_literal_hash: Mapping[int, Key],
+ operators_to_group: set[str],
+) -> list[tuple[str, list[int]]]:
"""Group a series of comparison operands together chained by any operand
in the 'operators_to_group' set. All other pairwise operands are kept in
groups of size 2.
@@ -6474,12 +7399,12 @@ def group_comparison_operands(pairwise_comparisons: Iterable[Tuple[str, Expressi
This function is currently only used to assist with type-narrowing refinements
and is extracted out to a helper function so we can unit test it.
"""
- groups: Dict[str, DisjointDict[Key, int]] = {op: DisjointDict() for op in operators_to_group}
+ groups: dict[str, DisjointDict[Key, int]] = {op: DisjointDict() for op in operators_to_group}
- simplified_operator_list: List[Tuple[str, List[int]]] = []
- last_operator: Optional[str] = None
- current_indices: Set[int] = set()
- current_hashes: Set[Key] = set()
+ simplified_operator_list: list[tuple[str, list[int]]] = []
+ last_operator: str | None = None
+ current_indices: set[int] = set()
+ current_hashes: set[Key] = set()
for i, (operator, left_expr, right_expr) in enumerate(pairwise_comparisons):
if last_operator is None:
last_operator = operator
@@ -6526,27 +7451,28 @@ def group_comparison_operands(pairwise_comparisons: Iterable[Tuple[str, Expressi
return simplified_operator_list
-def is_typed_callable(c: Optional[Type]) -> bool:
+def is_typed_callable(c: Type | None) -> bool:
c = get_proper_type(c)
if not c or not isinstance(c, CallableType):
return False
- return not all(isinstance(t, AnyType) and t.type_of_any == TypeOfAny.unannotated
- for t in get_proper_types(c.arg_types + [c.ret_type]))
+ return not all(
+ isinstance(t, AnyType) and t.type_of_any == TypeOfAny.unannotated
+ for t in get_proper_types(c.arg_types + [c.ret_type])
+ )
-def is_untyped_decorator(typ: Optional[Type]) -> bool:
+def is_untyped_decorator(typ: Type | None) -> bool:
typ = get_proper_type(typ)
if not typ:
return True
elif isinstance(typ, CallableType):
return not is_typed_callable(typ)
elif isinstance(typ, Instance):
- method = typ.type.get_method('__call__')
+ method = typ.type.get_method("__call__")
if method:
if isinstance(method, Decorator):
- return (
- is_untyped_decorator(method.func.type)
- or is_untyped_decorator(method.var.type)
+ return is_untyped_decorator(method.func.type) or is_untyped_decorator(
+ method.var.type
)
if isinstance(method.type, Overloaded):
@@ -6560,25 +7486,46 @@ def is_untyped_decorator(typ: Optional[Type]) -> bool:
return True
-def is_static(func: Union[FuncBase, Decorator]) -> bool:
+def is_static(func: FuncBase | Decorator) -> bool:
if isinstance(func, Decorator):
return is_static(func.func)
elif isinstance(func, FuncBase):
return func.is_static
- assert False, "Unexpected func type: {}".format(type(func))
+ assert False, f"Unexpected func type: {type(func)}"
+
+
+def is_property(defn: SymbolNode) -> bool:
+ if isinstance(defn, Decorator):
+ return defn.func.is_property
+ if isinstance(defn, OverloadedFuncDef):
+ if defn.items and isinstance(defn.items[0], Decorator):
+ return defn.items[0].func.is_property
+ return False
+
+
+def get_property_type(t: ProperType) -> ProperType:
+ if isinstance(t, CallableType):
+ return get_proper_type(t.ret_type)
+ if isinstance(t, Overloaded):
+ return get_proper_type(t.items[0].ret_type)
+ return t
def is_subtype_no_promote(left: Type, right: Type) -> bool:
return is_subtype(left, right, ignore_promotions=True)
-def is_overlapping_types_no_promote(left: Type, right: Type) -> bool:
- return is_overlapping_types(left, right, ignore_promotions=True)
+def is_overlapping_types_no_promote_no_uninhabited(left: Type, right: Type) -> bool:
+ # For the purpose of unsafe overload checks we consider list[] and list[int]
+ # non-overlapping. This is consistent with how we treat list[int] and list[str] as
+ # non-overlapping, despite [] belongs to both. Also this will prevent false positives
+ # for failed type inference during unification.
+ return is_overlapping_types(left, right, ignore_promotions=True, ignore_uninhabited=True)
def is_private(node_name: str) -> bool:
"""Check if node is private to class definition."""
- return node_name.startswith('__') and not node_name.endswith('__')
+ return node_name.startswith("__") and not node_name.endswith("__")
def is_string_literal(typ: Type) -> bool:
@@ -6588,11 +7535,10 @@ def is_string_literal(typ: Type) -> bool:
def has_bool_item(typ: ProperType) -> bool:
"""Return True if type is 'bool' or a union with a 'bool' item."""
- if is_named_instance(typ, 'builtins.bool'):
+ if is_named_instance(typ, "builtins.bool"):
return True
if isinstance(typ, UnionType):
- return any(is_named_instance(item, 'builtins.bool')
- for item in typ.items)
+ return any(is_named_instance(item, "builtins.bool") for item in typ.items)
return False
diff --git a/mypy/checkexpr.py b/mypy/checkexpr.py
index 45d5818d4eeb..35371de155d7 100644
--- a/mypy/checkexpr.py
+++ b/mypy/checkexpr.py
@@ -1,95 +1,177 @@
"""Expression type checker. This file is conceptually part of TypeChecker."""
-from mypy.backports import OrderedDict, nullcontext
-from contextlib import contextmanager
+from __future__ import annotations
+
import itertools
-from typing import (
- cast, Dict, Set, List, Tuple, Callable, Union, Optional, Sequence, Iterator
-)
-from typing_extensions import ClassVar, Final, overload, TypeAlias as _TypeAlias
+import time
+from contextlib import contextmanager
+from typing import Callable, ClassVar, Iterator, List, Optional, Sequence, cast
+from typing_extensions import Final, TypeAlias as _TypeAlias, overload
-from mypy.errors import report_internal_error
-from mypy.typeanal import (
- has_any_from_unimported_type, check_for_explicit_any, set_any_tvars, expand_type_alias,
- make_optional_type,
-)
-from mypy.semanal_enum import ENUM_BASES
-from mypy.types import (
- Type, AnyType, CallableType, Overloaded, NoneType, TypeVarType,
- TupleType, TypedDictType, Instance, ErasedType, UnionType,
- PartialType, DeletedType, UninhabitedType, TypeType, TypeOfAny, LiteralType, LiteralValue,
- is_named_instance, FunctionLike, ParamSpecType, ParamSpecFlavor,
- StarType, is_optional, remove_optional, is_generic_instance, get_proper_type, ProperType,
- get_proper_types, flatten_nested_unions, LITERAL_TYPE_NAMES,
-)
-from mypy.nodes import (
- NameExpr, RefExpr, Var, FuncDef, OverloadedFuncDef, TypeInfo, CallExpr,
- MemberExpr, IntExpr, StrExpr, BytesExpr, UnicodeExpr, FloatExpr,
- OpExpr, UnaryExpr, IndexExpr, CastExpr, RevealExpr, TypeApplication, ListExpr,
- TupleExpr, DictExpr, LambdaExpr, SuperExpr, SliceExpr, Context, Expression,
- ListComprehension, GeneratorExpr, SetExpr, MypyFile, Decorator,
- ConditionalExpr, ComparisonExpr, TempNode, SetComprehension, AssignmentExpr,
- DictionaryComprehension, ComplexExpr, EllipsisExpr, StarExpr, AwaitExpr, YieldExpr,
- YieldFromExpr, TypedDictExpr, PromoteExpr, NewTypeExpr, NamedTupleExpr, TypeVarExpr,
- TypeAliasExpr, BackquoteExpr, EnumCallExpr, TypeAlias, SymbolNode, PlaceholderNode,
- ParamSpecExpr,
- ArgKind, ARG_POS, ARG_NAMED, ARG_STAR, ARG_STAR2, LITERAL_TYPE, REVEAL_TYPE,
-)
-from mypy.literals import literal
-from mypy import nodes
-from mypy import operators
import mypy.checker
-from mypy import types
-from mypy.sametypes import is_same_type
-from mypy.erasetype import replace_meta_vars, erase_type, remove_instance_last_known_values
-from mypy.maptype import map_instance_to_supertype
-from mypy.messages import MessageBuilder
-from mypy import message_registry
-from mypy.infer import (
- ArgumentInferContext, infer_type_arguments, infer_function_type_arguments,
-)
-from mypy import join
-from mypy.meet import narrow_declared_type, is_overlapping_types
-from mypy.subtypes import is_subtype, is_proper_subtype, is_equivalent, non_method_protocol_members
-from mypy import applytype
-from mypy import erasetype
-from mypy.checkmember import analyze_member_access, type_object_type
+import mypy.errorcodes as codes
+from mypy import applytype, erasetype, join, message_registry, nodes, operators, types
from mypy.argmap import ArgTypeExpander, map_actuals_to_formals, map_formals_to_actuals
+from mypy.checkmember import analyze_member_access, type_object_type
from mypy.checkstrformat import StringFormatterChecker
+from mypy.erasetype import erase_type, remove_instance_last_known_values, replace_meta_vars
+from mypy.errors import ErrorWatcher, report_internal_error
from mypy.expandtype import expand_type, expand_type_by_instance, freshen_function_type_vars
-from mypy.util import split_module_names
-from mypy.typevars import fill_typevars
-from mypy.visitor import ExpressionVisitor
+from mypy.infer import ArgumentInferContext, infer_function_type_arguments, infer_type_arguments
+from mypy.literals import literal
+from mypy.maptype import map_instance_to_supertype
+from mypy.meet import is_overlapping_types, narrow_declared_type
+from mypy.message_registry import ErrorMessage
+from mypy.messages import MessageBuilder
+from mypy.nodes import (
+ ARG_NAMED,
+ ARG_POS,
+ ARG_STAR,
+ ARG_STAR2,
+ IMPLICITLY_ABSTRACT,
+ LITERAL_TYPE,
+ REVEAL_TYPE,
+ ArgKind,
+ AssertTypeExpr,
+ AssignmentExpr,
+ AwaitExpr,
+ BytesExpr,
+ CallExpr,
+ CastExpr,
+ ComparisonExpr,
+ ComplexExpr,
+ ConditionalExpr,
+ Context,
+ Decorator,
+ DictExpr,
+ DictionaryComprehension,
+ EllipsisExpr,
+ EnumCallExpr,
+ Expression,
+ FloatExpr,
+ FuncDef,
+ GeneratorExpr,
+ IndexExpr,
+ IntExpr,
+ LambdaExpr,
+ ListComprehension,
+ ListExpr,
+ MemberExpr,
+ MypyFile,
+ NamedTupleExpr,
+ NameExpr,
+ NewTypeExpr,
+ OpExpr,
+ OverloadedFuncDef,
+ ParamSpecExpr,
+ PlaceholderNode,
+ PromoteExpr,
+ RefExpr,
+ RevealExpr,
+ SetComprehension,
+ SetExpr,
+ SliceExpr,
+ StarExpr,
+ StrExpr,
+ SuperExpr,
+ SymbolNode,
+ TempNode,
+ TupleExpr,
+ TypeAlias,
+ TypeAliasExpr,
+ TypeApplication,
+ TypedDictExpr,
+ TypeInfo,
+ TypeVarExpr,
+ TypeVarTupleExpr,
+ UnaryExpr,
+ Var,
+ YieldExpr,
+ YieldFromExpr,
+)
from mypy.plugin import (
+ FunctionContext,
+ FunctionSigContext,
+ MethodContext,
+ MethodSigContext,
Plugin,
- MethodContext, MethodSigContext,
- FunctionContext, FunctionSigContext,
+)
+from mypy.semanal_enum import ENUM_BASES
+from mypy.state import state
+from mypy.subtypes import is_equivalent, is_same_type, is_subtype, non_method_protocol_members
+from mypy.traverser import has_await_expression
+from mypy.typeanal import (
+ check_for_explicit_any,
+ expand_type_alias,
+ has_any_from_unimported_type,
+ make_optional_type,
+ set_any_tvars,
)
from mypy.typeops import (
- try_expanding_sum_type_to_union, tuple_fallback, make_simplified_union,
- true_only, false_only, erase_to_union_or_bound, function_type,
- callable_type, try_getting_str_literals, custom_special_method,
+ callable_type,
+ custom_special_method,
+ erase_to_union_or_bound,
+ false_only,
+ fixup_partial_type,
+ function_type,
is_literal_type_like,
+ make_simplified_union,
+ simple_literal_type,
+ true_only,
+ try_expanding_sum_type_to_union,
+ try_getting_str_literals,
+ tuple_fallback,
)
-from mypy.message_registry import ErrorMessage
-import mypy.errorcodes as codes
+from mypy.types import (
+ LITERAL_TYPE_NAMES,
+ TUPLE_LIKE_INSTANCE_NAMES,
+ AnyType,
+ CallableType,
+ DeletedType,
+ ErasedType,
+ ExtraAttrs,
+ FunctionLike,
+ Instance,
+ LiteralType,
+ LiteralValue,
+ NoneType,
+ Overloaded,
+ ParamSpecFlavor,
+ ParamSpecType,
+ PartialType,
+ ProperType,
+ StarType,
+ TupleType,
+ Type,
+ TypeAliasType,
+ TypedDictType,
+ TypeOfAny,
+ TypeType,
+ TypeVarTupleType,
+ TypeVarType,
+ UninhabitedType,
+ UnionType,
+ UnpackType,
+ flatten_nested_unions,
+ get_proper_type,
+ get_proper_types,
+ has_recursive_types,
+ is_generic_instance,
+ is_named_instance,
+ is_optional,
+ is_self_type_like,
+ remove_optional,
+)
+from mypy.typestate import type_state
+from mypy.typevars import fill_typevars
+from mypy.typevartuples import find_unpack_in_list
+from mypy.util import split_module_names
+from mypy.visitor import ExpressionVisitor
# Type of callback user for checking individual function arguments. See
# check_args() below for details.
-ArgChecker: _TypeAlias = Callable[[
- Type,
- Type,
- ArgKind,
- Type,
- int,
- int,
- CallableType,
- Optional[Type],
- Context,
- Context,
- MessageBuilder,
- ],
- None,
+ArgChecker: _TypeAlias = Callable[
+ [Type, Type, ArgKind, Type, int, int, CallableType, Optional[Type], Context, Context], None,
]
# Maximum nesting level for math union in overloads, setting this to large values
@@ -119,14 +201,23 @@ class TooManyUnions(Exception):
"""
-def extract_refexpr_names(expr: RefExpr) -> Set[str]:
+def allow_fast_container_literal(t: Type) -> bool:
+ if isinstance(t, TypeAliasType) and t.is_recursive:
+ return False
+ t = get_proper_type(t)
+ return isinstance(t, Instance) or (
+ isinstance(t, TupleType) and all(allow_fast_container_literal(it) for it in t.items)
+ )
+
+
+def extract_refexpr_names(expr: RefExpr) -> set[str]:
"""Recursively extracts all module references from a reference expression.
Note that currently, the only two subclasses of RefExpr are NameExpr and
MemberExpr."""
- output: Set[str] = set()
- while isinstance(expr.node, MypyFile) or expr.fullname is not None:
- if isinstance(expr.node, MypyFile) and expr.fullname is not None:
+ output: set[str] = set()
+ while isinstance(expr.node, MypyFile) or expr.fullname:
+ if isinstance(expr.node, MypyFile) and expr.fullname:
# If it's None, something's wrong (perhaps due to an
# import cycle or a suppressed error). For now we just
# skip it.
@@ -137,9 +228,9 @@ def extract_refexpr_names(expr: RefExpr) -> Set[str]:
if isinstance(expr.node, TypeInfo):
# Reference to a class or a nested class
output.update(split_module_names(expr.node.module_name))
- elif expr.fullname is not None and '.' in expr.fullname and not is_suppressed_import:
+ elif "." in expr.fullname and not is_suppressed_import:
# Everything else (that is not a silenced import within a class)
- output.add(expr.fullname.rsplit('.', 1)[0])
+ output.add(expr.fullname.rsplit(".", 1)[0])
break
elif isinstance(expr, MemberExpr):
if isinstance(expr.expr, RefExpr):
@@ -147,7 +238,7 @@ def extract_refexpr_names(expr: RefExpr) -> Set[str]:
else:
break
else:
- raise AssertionError("Unknown RefExpr subclass: {}".format(type(expr)))
+ raise AssertionError(f"Unknown RefExpr subclass: {type(expr)}")
return output
@@ -162,32 +253,54 @@ class ExpressionChecker(ExpressionVisitor[Type]):
"""
# Some services are provided by a TypeChecker instance.
- chk: "mypy.checker.TypeChecker"
+ chk: mypy.checker.TypeChecker
# This is shared with TypeChecker, but stored also here for convenience.
msg: MessageBuilder
# Type context for type inference
- type_context: List[Optional[Type]]
+ type_context: list[Type | None]
+
+ # cache resolved types in some cases
+ resolved_type: dict[Expression, ProperType]
strfrm_checker: StringFormatterChecker
plugin: Plugin
- def __init__(self,
- chk: 'mypy.checker.TypeChecker',
- msg: MessageBuilder,
- plugin: Plugin) -> None:
+ def __init__(
+ self,
+ chk: mypy.checker.TypeChecker,
+ msg: MessageBuilder,
+ plugin: Plugin,
+ per_line_checking_time_ns: dict[int, int],
+ ) -> None:
"""Construct an expression type checker."""
self.chk = chk
self.msg = msg
self.plugin = plugin
+ self.per_line_checking_time_ns = per_line_checking_time_ns
+ self.collect_line_checking_stats = chk.options.line_checking_stats is not None
+ # Are we already visiting some expression? This is used to avoid double counting
+ # time for nested expressions.
+ self.in_expression = False
self.type_context = [None]
# Temporary overrides for expression types. This is currently
# used by the union math in overloads.
# TODO: refactor this to use a pattern similar to one in
# multiassign_from_union, or maybe even combine the two?
- self.type_overrides: Dict[Expression, Type] = {}
+ self.type_overrides: dict[Expression, Type] = {}
self.strfrm_checker = StringFormatterChecker(self, self.chk, self.msg)
+ self.resolved_type = {}
+
+ # Callee in a call expression is in some sense both runtime context and
+ # type context, because we support things like C[int](...). Store information
+ # on whether current expression is a callee, to give better error messages
+ # related to type context.
+ self.is_callee = False
+
+ def reset(self) -> None:
+ self.resolved_type = {}
+
def visit_name_expr(self, e: NameExpr) -> Type:
"""Type check a name expression.
@@ -198,7 +311,7 @@ def visit_name_expr(self, e: NameExpr) -> Type:
return self.narrow_type_from_binder(e, result)
def analyze_ref_expr(self, e: RefExpr, lvalue: bool = False) -> Type:
- result: Optional[Type] = None
+ result: Type | None = None
node = e.node
if isinstance(e, NameExpr) and e.is_special_form:
@@ -212,7 +325,7 @@ def analyze_ref_expr(self, e: RefExpr, lvalue: bool = False) -> Type:
result = self.chk.handle_partial_var_type(result, lvalue, node, e)
elif isinstance(node, FuncDef):
# Reference to a global function.
- result = function_type(node, self.named_type('builtins.function'))
+ result = function_type(node, self.named_type("builtins.function"))
elif isinstance(node, OverloadedFuncDef) and node.type is not None:
# node.type is None when there are multiple definitions of a function
# and it's decorated by something that is not typing.overload
@@ -221,9 +334,14 @@ def analyze_ref_expr(self, e: RefExpr, lvalue: bool = False) -> Type:
result = node.type
elif isinstance(node, TypeInfo):
# Reference to a type object.
- result = type_object_type(node, self.named_type)
- if (isinstance(result, CallableType) and
- isinstance(result.ret_type, Instance)): # type: ignore
+ if node.typeddict_type:
+ # We special-case TypedDict, because they don't define any constructor.
+ result = self.typeddict_callable(node)
+ else:
+ result = type_object_type(node, self.named_type)
+ if isinstance(result, CallableType) and isinstance( # type: ignore[misc]
+ result.ret_type, Instance
+ ):
# We need to set correct line and column
# TODO: always do this in type_object_type by passing the original context
result.ret_type.line = e.line
@@ -234,27 +352,21 @@ def analyze_ref_expr(self, e: RefExpr, lvalue: bool = False) -> Type:
result = erasetype.erase_typevars(result)
elif isinstance(node, MypyFile):
# Reference to a module object.
- try:
- result = self.named_type('types.ModuleType')
- except KeyError:
- # In test cases might 'types' may not be available.
- # Fall back to a dummy 'object' type instead to
- # avoid a crash.
- result = self.named_type('builtins.object')
+ result = self.module_type(node)
elif isinstance(node, Decorator):
result = self.analyze_var_ref(node.var, e)
elif isinstance(node, TypeAlias):
# Something that refers to a type alias appears in runtime context.
# Note that we suppress bogus errors for alias redefinitions,
# they are already reported in semanal.py.
- result = self.alias_type_in_runtime_context(node, node.no_args, e,
- alias_definition=e.is_alias_rvalue
- or lvalue)
+ result = self.alias_type_in_runtime_context(
+ node, ctx=e, alias_definition=e.is_alias_rvalue or lvalue
+ )
elif isinstance(node, (TypeVarExpr, ParamSpecExpr)):
result = self.object_type()
else:
if isinstance(node, PlaceholderNode):
- assert False, 'PlaceholderNode %r leaked to checker' % node.fullname
+ assert False, f"PlaceholderNode {node.fullname!r} leaked to checker"
# Unknown reference; use any type implicitly to avoid
# generating extra type errors.
result = AnyType(TypeOfAny.from_error)
@@ -267,8 +379,8 @@ def analyze_var_ref(self, var: Var, context: Context) -> Type:
if isinstance(var_type, Instance):
if self.is_literal_context() and var_type.last_known_value is not None:
return var_type.last_known_value
- if var.name in {'True', 'False'}:
- return self.infer_literal_expr_type(var.name == 'True', 'builtins.bool')
+ if var.name in {"True", "False"}:
+ return self.infer_literal_expr_type(var.name == "True", "builtins.bool")
return var.type
else:
if not var.is_ready and self.chk.in_checked_function():
@@ -276,6 +388,31 @@ def analyze_var_ref(self, var: Var, context: Context) -> Type:
# Implicit 'Any' type.
return AnyType(TypeOfAny.special_form)
+ def module_type(self, node: MypyFile) -> Instance:
+ try:
+ result = self.named_type("types.ModuleType")
+ except KeyError:
+ # In test cases might 'types' may not be available.
+ # Fall back to a dummy 'object' type instead to
+ # avoid a crash.
+ result = self.named_type("builtins.object")
+ module_attrs = {}
+ immutable = set()
+ for name, n in node.names.items():
+ if not n.module_public:
+ continue
+ if isinstance(n.node, Var) and n.node.is_final:
+ immutable.add(name)
+ typ = self.chk.determine_type_of_member(n)
+ if typ:
+ module_attrs[name] = typ
+ else:
+ # TODO: what to do about nested module references?
+ # They are non-trivial because there may be import cycles.
+ module_attrs[name] = AnyType(TypeOfAny.special_form)
+ result.extra_attrs = ExtraAttrs(module_attrs, immutable, node.fullname)
+ return result
+
def visit_call_expr(self, e: CallExpr, allow_none_return: bool = False) -> Type:
"""Type check a call expression."""
if e.analyzed:
@@ -287,15 +424,34 @@ def visit_call_expr(self, e: CallExpr, allow_none_return: bool = False) -> Type:
return self.accept(e.analyzed, self.type_context[-1])
return self.visit_call_expr_inner(e, allow_none_return=allow_none_return)
+ def refers_to_typeddict(self, base: Expression) -> bool:
+ if not isinstance(base, RefExpr):
+ return False
+ if isinstance(base.node, TypeInfo) and base.node.typeddict_type is not None:
+ # Direct reference.
+ return True
+ return isinstance(base.node, TypeAlias) and isinstance(
+ get_proper_type(base.node.target), TypedDictType
+ )
+
def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) -> Type:
- if isinstance(e.callee, RefExpr) and isinstance(e.callee.node, TypeInfo) and \
- e.callee.node.typeddict_type is not None:
- # Use named fallback for better error messages.
- typeddict_type = e.callee.node.typeddict_type.copy_modified(
- fallback=Instance(e.callee.node, []))
- return self.check_typeddict_call(typeddict_type, e.arg_kinds, e.arg_names, e.args, e)
- if (isinstance(e.callee, NameExpr) and e.callee.name in ('isinstance', 'issubclass')
- and len(e.args) == 2):
+ if (
+ self.refers_to_typeddict(e.callee)
+ or isinstance(e.callee, IndexExpr)
+ and self.refers_to_typeddict(e.callee.base)
+ ):
+ typeddict_callable = get_proper_type(self.accept(e.callee, is_callee=True))
+ if isinstance(typeddict_callable, CallableType):
+ typeddict_type = get_proper_type(typeddict_callable.ret_type)
+ assert isinstance(typeddict_type, TypedDictType)
+ return self.check_typeddict_call(
+ typeddict_type, e.arg_kinds, e.arg_names, e.args, e, typeddict_callable
+ )
+ if (
+ isinstance(e.callee, NameExpr)
+ and e.callee.name in ("isinstance", "issubclass")
+ and len(e.args) == 2
+ ):
for typ in mypy.checker.flatten(e.args[1]):
node = None
if isinstance(typ, NameExpr):
@@ -307,14 +463,22 @@ def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) ->
if is_expr_literal_type(typ):
self.msg.cannot_use_function_with_type(e.callee.name, "Literal", e)
continue
- if (node and isinstance(node.node, TypeAlias)
- and isinstance(get_proper_type(node.node.target), AnyType)):
+ if (
+ node
+ and isinstance(node.node, TypeAlias)
+ and isinstance(get_proper_type(node.node.target), AnyType)
+ ):
self.msg.cannot_use_function_with_type(e.callee.name, "Any", e)
continue
- if ((isinstance(typ, IndexExpr)
- and isinstance(typ.analyzed, (TypeApplication, TypeAliasExpr)))
- or (isinstance(typ, NameExpr) and node and
- isinstance(node.node, TypeAlias) and not node.node.no_args)):
+ if (
+ isinstance(typ, IndexExpr)
+ and isinstance(typ.analyzed, (TypeApplication, TypeAliasExpr))
+ ) or (
+ isinstance(typ, NameExpr)
+ and node
+ and isinstance(node.node, TypeAlias)
+ and not node.node.no_args
+ ):
self.msg.type_arguments_not_allowed(e)
if isinstance(typ, RefExpr) and isinstance(typ.node, TypeInfo):
if typ.node.typeddict_type:
@@ -325,25 +489,33 @@ def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) ->
type_context = None
if isinstance(e.callee, LambdaExpr):
formal_to_actual = map_actuals_to_formals(
- e.arg_kinds, e.arg_names,
- e.callee.arg_kinds, e.callee.arg_names,
- lambda i: self.accept(e.args[i]))
-
- arg_types = [join.join_type_list([self.accept(e.args[j]) for j in formal_to_actual[i]])
- for i in range(len(e.callee.arg_kinds))]
- type_context = CallableType(arg_types, e.callee.arg_kinds, e.callee.arg_names,
- ret_type=self.object_type(),
- fallback=self.named_type('builtins.function'))
- callee_type = get_proper_type(self.accept(e.callee, type_context, always_allow_any=True))
- if (isinstance(e.callee, RefExpr)
- and isinstance(callee_type, CallableType)
- and callee_type.type_guard is not None):
- # Cache it for find_isinstance_check()
- e.callee.type_guard = callee_type.type_guard
- if (self.chk.options.disallow_untyped_calls and
- self.chk.in_checked_function() and
- isinstance(callee_type, CallableType)
- and callee_type.implicit):
+ e.arg_kinds,
+ e.arg_names,
+ e.callee.arg_kinds,
+ e.callee.arg_names,
+ lambda i: self.accept(e.args[i]),
+ )
+
+ arg_types = [
+ join.join_type_list([self.accept(e.args[j]) for j in formal_to_actual[i]])
+ for i in range(len(e.callee.arg_kinds))
+ ]
+ type_context = CallableType(
+ arg_types,
+ e.callee.arg_kinds,
+ e.callee.arg_names,
+ ret_type=self.object_type(),
+ fallback=self.named_type("builtins.function"),
+ )
+ callee_type = get_proper_type(
+ self.accept(e.callee, type_context, always_allow_any=True, is_callee=True)
+ )
+ if (
+ self.chk.options.disallow_untyped_calls
+ and self.chk.in_checked_function()
+ and isinstance(callee_type, CallableType)
+ and callee_type.implicit
+ ):
self.msg.untyped_function_call(callee_type, e)
# Figure out the full name of the callee for plugin lookup.
@@ -354,7 +526,7 @@ def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) ->
# There are two special cases where plugins might act:
# * A "static" reference/alias to a class or function;
# get_function_hook() will be invoked for these.
- fullname = e.callee.fullname
+ fullname = e.callee.fullname or None
if isinstance(e.callee.node, TypeAlias):
target = get_proper_type(e.callee.node.target)
if isinstance(target, Instance):
@@ -363,19 +535,22 @@ def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) ->
# method_fullname() for details on supported objects);
# get_method_hook() and get_method_signature_hook() will
# be invoked for these.
- if (fullname is None
- and isinstance(e.callee, MemberExpr)
- and e.callee.expr in self.chk.type_map):
+ if (
+ not fullname
+ and isinstance(e.callee, MemberExpr)
+ and self.chk.has_type(e.callee.expr)
+ ):
member = e.callee.name
- object_type = self.chk.type_map[e.callee.expr]
- ret_type = self.check_call_expr_with_callee_type(callee_type, e, fullname,
- object_type, member)
+ object_type = self.chk.lookup_type(e.callee.expr)
+ ret_type = self.check_call_expr_with_callee_type(
+ callee_type, e, fullname, object_type, member
+ )
if isinstance(e.callee, RefExpr) and len(e.args) == 2:
- if e.callee.fullname in ('builtins.isinstance', 'builtins.issubclass'):
+ if e.callee.fullname in ("builtins.isinstance", "builtins.issubclass"):
self.check_runtime_protocol_test(e)
- if e.callee.fullname == 'builtins.issubclass':
+ if e.callee.fullname == "builtins.issubclass":
self.check_protocol_issubclass(e)
- if isinstance(e.callee, MemberExpr) and e.callee.name == 'format':
+ if isinstance(e.callee, MemberExpr) and e.callee.name == "format":
self.check_str_format_call(e)
ret_type = get_proper_type(ret_type)
if isinstance(ret_type, UnionType):
@@ -385,8 +560,11 @@ def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) ->
# Warn on calls to functions that always return None. The check
# of ret_type is both a common-case optimization and prevents reporting
# the error in dynamic functions (where it will be Any).
- if (not allow_none_return and isinstance(ret_type, NoneType)
- and self.always_returns_none(e.callee)):
+ if (
+ not allow_none_return
+ and isinstance(ret_type, NoneType)
+ and self.always_returns_none(e.callee)
+ ):
self.chk.msg.does_not_return_value(callee_type, e)
return AnyType(TypeOfAny.from_error)
return ret_type
@@ -395,16 +573,16 @@ def check_str_format_call(self, e: CallExpr) -> None:
"""More precise type checking for str.format() calls on literals."""
assert isinstance(e.callee, MemberExpr)
format_value = None
- if isinstance(e.callee.expr, (StrExpr, UnicodeExpr)):
+ if isinstance(e.callee.expr, StrExpr):
format_value = e.callee.expr.value
- elif e.callee.expr in self.chk.type_map:
- base_typ = try_getting_literal(self.chk.type_map[e.callee.expr])
+ elif self.chk.has_type(e.callee.expr):
+ base_typ = try_getting_literal(self.chk.lookup_type(e.callee.expr))
if isinstance(base_typ, LiteralType) and isinstance(base_typ.value, str):
format_value = base_typ.value
if format_value is not None:
self.strfrm_checker.check_str_format_call(e, format_value)
- def method_fullname(self, object_type: Type, method_name: str) -> Optional[str]:
+ def method_fullname(self, object_type: Type, method_name: str) -> str | None:
"""Convert a method name to a fully qualified name, based on the type of the object that
it is invoked on. Return `None` if the name of `object_type` cannot be determined.
"""
@@ -427,8 +605,8 @@ def method_fullname(self, object_type: Type, method_name: str) -> Optional[str]:
elif isinstance(object_type, TupleType):
type_name = tuple_fallback(object_type).type.fullname
- if type_name is not None:
- return '{}.{}'.format(type_name, method_name)
+ if type_name:
+ return f"{type_name}.{method_name}"
else:
return None
@@ -438,7 +616,7 @@ def always_returns_none(self, node: Expression) -> bool:
if self.defn_returns_none(node.node):
return True
if isinstance(node, MemberExpr) and node.node is None: # instance or class attribute
- typ = get_proper_type(self.chk.type_map.get(node.expr))
+ typ = get_proper_type(self.chk.lookup_type(node.expr))
if isinstance(typ, Instance):
info = typ.type
elif isinstance(typ, CallableType) and typ.is_type_obj():
@@ -454,74 +632,86 @@ def always_returns_none(self, node: Expression) -> bool:
return True
return False
- def defn_returns_none(self, defn: Optional[SymbolNode]) -> bool:
+ def defn_returns_none(self, defn: SymbolNode | None) -> bool:
"""Check if `defn` can _only_ return None."""
if isinstance(defn, FuncDef):
- return (isinstance(defn.type, CallableType) and
- isinstance(get_proper_type(defn.type.ret_type), NoneType))
+ return isinstance(defn.type, CallableType) and isinstance(
+ get_proper_type(defn.type.ret_type), NoneType
+ )
if isinstance(defn, OverloadedFuncDef):
return all(self.defn_returns_none(item) for item in defn.items)
if isinstance(defn, Var):
typ = get_proper_type(defn.type)
- if (not defn.is_inferred and isinstance(typ, CallableType) and
- isinstance(get_proper_type(typ.ret_type), NoneType)):
+ if (
+ not defn.is_inferred
+ and isinstance(typ, CallableType)
+ and isinstance(get_proper_type(typ.ret_type), NoneType)
+ ):
return True
if isinstance(typ, Instance):
- sym = typ.type.get('__call__')
+ sym = typ.type.get("__call__")
if sym and self.defn_returns_none(sym.node):
return True
return False
def check_runtime_protocol_test(self, e: CallExpr) -> None:
for expr in mypy.checker.flatten(e.args[1]):
- tp = get_proper_type(self.chk.type_map[expr])
- if (isinstance(tp, CallableType) and tp.is_type_obj() and
- tp.type_object().is_protocol and
- not tp.type_object().runtime_protocol):
+ tp = get_proper_type(self.chk.lookup_type(expr))
+ if (
+ isinstance(tp, CallableType)
+ and tp.is_type_obj()
+ and tp.type_object().is_protocol
+ and not tp.type_object().runtime_protocol
+ ):
self.chk.fail(message_registry.RUNTIME_PROTOCOL_EXPECTED, e)
def check_protocol_issubclass(self, e: CallExpr) -> None:
for expr in mypy.checker.flatten(e.args[1]):
- tp = get_proper_type(self.chk.type_map[expr])
- if (isinstance(tp, CallableType) and tp.is_type_obj() and
- tp.type_object().is_protocol):
+ tp = get_proper_type(self.chk.lookup_type(expr))
+ if isinstance(tp, CallableType) and tp.is_type_obj() and tp.type_object().is_protocol:
attr_members = non_method_protocol_members(tp.type_object())
if attr_members:
- self.chk.msg.report_non_method_protocol(tp.type_object(),
- attr_members, e)
-
- def check_typeddict_call(self, callee: TypedDictType,
- arg_kinds: List[ArgKind],
- arg_names: Sequence[Optional[str]],
- args: List[Expression],
- context: Context) -> Type:
+ self.chk.msg.report_non_method_protocol(tp.type_object(), attr_members, e)
+
+ def check_typeddict_call(
+ self,
+ callee: TypedDictType,
+ arg_kinds: list[ArgKind],
+ arg_names: Sequence[str | None],
+ args: list[Expression],
+ context: Context,
+ orig_callee: Type | None,
+ ) -> Type:
if len(args) >= 1 and all([ak == ARG_NAMED for ak in arg_kinds]):
# ex: Point(x=42, y=1337)
assert all(arg_name is not None for arg_name in arg_names)
item_names = cast(List[str], arg_names)
item_args = args
return self.check_typeddict_call_with_kwargs(
- callee, OrderedDict(zip(item_names, item_args)), context)
+ callee, dict(zip(item_names, item_args)), context, orig_callee
+ )
if len(args) == 1 and arg_kinds[0] == ARG_POS:
unique_arg = args[0]
if isinstance(unique_arg, DictExpr):
# ex: Point({'x': 42, 'y': 1337})
- return self.check_typeddict_call_with_dict(callee, unique_arg, context)
+ return self.check_typeddict_call_with_dict(
+ callee, unique_arg, context, orig_callee
+ )
if isinstance(unique_arg, CallExpr) and isinstance(unique_arg.analyzed, DictExpr):
# ex: Point(dict(x=42, y=1337))
- return self.check_typeddict_call_with_dict(callee, unique_arg.analyzed, context)
+ return self.check_typeddict_call_with_dict(
+ callee, unique_arg.analyzed, context, orig_callee
+ )
if len(args) == 0:
# ex: EmptyDict()
- return self.check_typeddict_call_with_kwargs(
- callee, OrderedDict(), context)
+ return self.check_typeddict_call_with_kwargs(callee, {}, context, orig_callee)
self.chk.fail(message_registry.INVALID_TYPEDDICT_ARGS, context)
return AnyType(TypeOfAny.from_error)
- def validate_typeddict_kwargs(
- self, kwargs: DictExpr) -> 'Optional[OrderedDict[str, Expression]]':
+ def validate_typeddict_kwargs(self, kwargs: DictExpr) -> dict[str, Expression] | None:
item_args = [item[1] for item in kwargs.items]
item_names = [] # List[str]
@@ -534,69 +724,142 @@ def validate_typeddict_kwargs(
literal_value = values[0]
if literal_value is None:
key_context = item_name_expr or item_arg
- self.chk.fail(message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
- key_context)
+ self.chk.fail(message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL, key_context)
return None
else:
item_names.append(literal_value)
- return OrderedDict(zip(item_names, item_args))
+ return dict(zip(item_names, item_args))
- def match_typeddict_call_with_dict(self, callee: TypedDictType,
- kwargs: DictExpr,
- context: Context) -> bool:
+ def match_typeddict_call_with_dict(
+ self, callee: TypedDictType, kwargs: DictExpr, context: Context
+ ) -> bool:
validated_kwargs = self.validate_typeddict_kwargs(kwargs=kwargs)
if validated_kwargs is not None:
- return (callee.required_keys <= set(validated_kwargs.keys())
- <= set(callee.items.keys()))
+ return callee.required_keys <= set(validated_kwargs.keys()) <= set(callee.items.keys())
else:
return False
- def check_typeddict_call_with_dict(self, callee: TypedDictType,
- kwargs: DictExpr,
- context: Context) -> Type:
+ def check_typeddict_call_with_dict(
+ self, callee: TypedDictType, kwargs: DictExpr, context: Context, orig_callee: Type | None
+ ) -> Type:
validated_kwargs = self.validate_typeddict_kwargs(kwargs=kwargs)
if validated_kwargs is not None:
return self.check_typeddict_call_with_kwargs(
- callee,
- kwargs=validated_kwargs,
- context=context)
+ callee, kwargs=validated_kwargs, context=context, orig_callee=orig_callee
+ )
else:
return AnyType(TypeOfAny.from_error)
- def check_typeddict_call_with_kwargs(self, callee: TypedDictType,
- kwargs: 'OrderedDict[str, Expression]',
- context: Context) -> Type:
+ def typeddict_callable(self, info: TypeInfo) -> CallableType:
+ """Construct a reasonable type for a TypedDict type in runtime context.
+
+ If it appears as a callee, it will be special-cased anyway, e.g. it is
+ also allowed to accept a single positional argument if it is a dict literal.
+
+ Note it is not safe to move this to type_object_type() since it will crash
+ on plugin-generated TypedDicts, that may not have the special_alias.
+ """
+ assert info.special_alias is not None
+ target = info.special_alias.target
+ assert isinstance(target, ProperType) and isinstance(target, TypedDictType)
+ expected_types = list(target.items.values())
+ kinds = [ArgKind.ARG_NAMED] * len(expected_types)
+ names = list(target.items.keys())
+ return CallableType(
+ expected_types,
+ kinds,
+ names,
+ target,
+ self.named_type("builtins.type"),
+ variables=info.defn.type_vars,
+ )
+
+ def typeddict_callable_from_context(self, callee: TypedDictType) -> CallableType:
+ return CallableType(
+ list(callee.items.values()),
+ [ArgKind.ARG_NAMED] * len(callee.items),
+ list(callee.items.keys()),
+ callee,
+ self.named_type("builtins.type"),
+ )
+
+ def check_typeddict_call_with_kwargs(
+ self,
+ callee: TypedDictType,
+ kwargs: dict[str, Expression],
+ context: Context,
+ orig_callee: Type | None,
+ ) -> Type:
if not (callee.required_keys <= set(kwargs.keys()) <= set(callee.items.keys())):
- expected_keys = [key for key in callee.items.keys()
- if key in callee.required_keys or key in kwargs.keys()]
+ expected_keys = [
+ key
+ for key in callee.items.keys()
+ if key in callee.required_keys or key in kwargs.keys()
+ ]
actual_keys = kwargs.keys()
self.msg.unexpected_typeddict_keys(
- callee,
- expected_keys=expected_keys,
- actual_keys=list(actual_keys),
- context=context)
+ callee, expected_keys=expected_keys, actual_keys=list(actual_keys), context=context
+ )
return AnyType(TypeOfAny.from_error)
- for (item_name, item_expected_type) in callee.items.items():
+ orig_callee = get_proper_type(orig_callee)
+ if isinstance(orig_callee, CallableType):
+ infer_callee = orig_callee
+ else:
+ # Try reconstructing from type context.
+ if callee.fallback.type.special_alias is not None:
+ infer_callee = self.typeddict_callable(callee.fallback.type)
+ else:
+ # Likely a TypedDict type generated by a plugin.
+ infer_callee = self.typeddict_callable_from_context(callee)
+
+ # We don't show any errors, just infer types in a generic TypedDict type,
+ # a custom error message will be given below, if there are errors.
+ with self.msg.filter_errors(), self.chk.local_type_map():
+ orig_ret_type, _ = self.check_callable_call(
+ infer_callee,
+ list(kwargs.values()),
+ [ArgKind.ARG_NAMED] * len(kwargs),
+ context,
+ list(kwargs.keys()),
+ None,
+ None,
+ None,
+ )
+
+ ret_type = get_proper_type(orig_ret_type)
+ if not isinstance(ret_type, TypedDictType):
+ # If something went really wrong, type-check call with original type,
+ # this may give a better error message.
+ ret_type = callee
+
+ for (item_name, item_expected_type) in ret_type.items.items():
if item_name in kwargs:
item_value = kwargs[item_name]
self.chk.check_simple_assignment(
- lvalue_type=item_expected_type, rvalue=item_value, context=item_value,
- msg=message_registry.INCOMPATIBLE_TYPES,
- lvalue_name='TypedDict item "{}"'.format(item_name),
- rvalue_name='expression',
- code=codes.TYPEDDICT_ITEM)
+ lvalue_type=item_expected_type,
+ rvalue=item_value,
+ context=item_value,
+ msg=ErrorMessage(
+ message_registry.INCOMPATIBLE_TYPES.value, code=codes.TYPEDDICT_ITEM
+ ),
+ lvalue_name=f'TypedDict item "{item_name}"',
+ rvalue_name="expression",
+ )
- return callee
+ return orig_ret_type
- def get_partial_self_var(self, expr: MemberExpr) -> Optional[Var]:
+ def get_partial_self_var(self, expr: MemberExpr) -> Var | None:
"""Get variable node for a partial self attribute.
If the expression is not a self attribute, or attribute is not variable,
or variable is not partial, return None.
"""
- if not (isinstance(expr.expr, NameExpr) and
- isinstance(expr.expr.node, Var) and expr.expr.node.is_self):
+ if not (
+ isinstance(expr.expr, NameExpr)
+ and isinstance(expr.expr.node, Var)
+ and expr.expr.node.is_self
+ ):
# Not a self.attr expression.
return None
info = self.chk.scope.enclosing_class()
@@ -609,11 +872,11 @@ def get_partial_self_var(self, expr: MemberExpr) -> Optional[Var]:
return None
# Types and methods that can be used to infer partial types.
- item_args: ClassVar[Dict[str, List[str]]] = {
+ item_args: ClassVar[dict[str, list[str]]] = {
"builtins.list": ["append"],
"builtins.set": ["add", "discard"],
}
- container_args: ClassVar[Dict[str, Dict[str, List[str]]]] = {
+ container_args: ClassVar[dict[str, dict[str, list[str]]]] = {
"builtins.list": {"extend": ["builtins.list"]},
"builtins.dict": {"update": ["builtins.dict"]},
"collections.OrderedDict": {"update": ["builtins.dict"]},
@@ -632,7 +895,8 @@ def try_infer_partial_type(self, e: CallExpr) -> None:
return
var, partial_types = ret
typ = self.try_infer_partial_value_type_from_call(e, callee.name, var)
- if typ is not None:
+ # Var may be deleted from partial_types in try_infer_partial_value_type_from_call
+ if typ is not None and var in partial_types:
var.type = typ
del partial_types[var]
elif isinstance(callee.expr, IndexExpr) and isinstance(callee.expr.base, RefExpr):
@@ -656,11 +920,10 @@ def try_infer_partial_type(self, e: CallExpr) -> None:
# Store inferred partial type.
assert partial_type.type is not None
typename = partial_type.type.fullname
- var.type = self.chk.named_generic_type(typename,
- [key_type, value_type])
+ var.type = self.chk.named_generic_type(typename, [key_type, value_type])
del partial_types[var]
- def get_partial_var(self, ref: RefExpr) -> Optional[Tuple[Var, Dict[Var, Context]]]:
+ def get_partial_var(self, ref: RefExpr) -> tuple[Var, dict[Var, Context]] | None:
var = ref.node
if var is None and isinstance(ref, MemberExpr):
var = self.get_partial_self_var(ref)
@@ -672,10 +935,8 @@ def get_partial_var(self, ref: RefExpr) -> Optional[Tuple[Var, Dict[Var, Context
return var, partial_types
def try_infer_partial_value_type_from_call(
- self,
- e: CallExpr,
- methodname: str,
- var: Var) -> Optional[Instance]:
+ self, e: CallExpr, methodname: str, var: Var
+ ) -> Instance | None:
"""Try to make partial type precise from a call such as 'x.append(y)'."""
if self.chk.current_node_deferred:
return None
@@ -689,37 +950,45 @@ def try_infer_partial_value_type_from_call(
typename = partial_type.type.fullname
# Sometimes we can infer a full type for a partial List, Dict or Set type.
# TODO: Don't infer argument expression twice.
- if (typename in self.item_args and methodname in self.item_args[typename]
- and e.arg_kinds == [ARG_POS]):
+ if (
+ typename in self.item_args
+ and methodname in self.item_args[typename]
+ and e.arg_kinds == [ARG_POS]
+ ):
item_type = self.accept(e.args[0])
if mypy.checker.is_valid_inferred_type(item_type):
return self.chk.named_generic_type(typename, [item_type])
- elif (typename in self.container_args
- and methodname in self.container_args[typename]
- and e.arg_kinds == [ARG_POS]):
+ elif (
+ typename in self.container_args
+ and methodname in self.container_args[typename]
+ and e.arg_kinds == [ARG_POS]
+ ):
arg_type = get_proper_type(self.accept(e.args[0]))
if isinstance(arg_type, Instance):
arg_typename = arg_type.type.fullname
if arg_typename in self.container_args[typename][methodname]:
- if all(mypy.checker.is_valid_inferred_type(item_type)
- for item_type in arg_type.args):
- return self.chk.named_generic_type(typename,
- list(arg_type.args))
+ if all(
+ mypy.checker.is_valid_inferred_type(item_type)
+ for item_type in arg_type.args
+ ):
+ return self.chk.named_generic_type(typename, list(arg_type.args))
elif isinstance(arg_type, AnyType):
return self.chk.named_type(typename)
return None
- def apply_function_plugin(self,
- callee: CallableType,
- arg_kinds: List[ArgKind],
- arg_types: List[Type],
- arg_names: Optional[Sequence[Optional[str]]],
- formal_to_actual: List[List[int]],
- args: List[Expression],
- fullname: str,
- object_type: Optional[Type],
- context: Context) -> Type:
+ def apply_function_plugin(
+ self,
+ callee: CallableType,
+ arg_kinds: list[ArgKind],
+ arg_types: list[Type],
+ arg_names: Sequence[str | None] | None,
+ formal_to_actual: list[list[int]],
+ args: list[Expression],
+ fullname: str,
+ object_type: Type | None,
+ context: Context,
+ ) -> Type:
"""Use special case logic to infer the return type of a specific named function/method.
Caller must ensure that a plugin hook exists. There are two different cases:
@@ -732,10 +1001,10 @@ def apply_function_plugin(self,
Return the inferred return type.
"""
num_formals = len(callee.arg_types)
- formal_arg_types: List[List[Type]] = [[] for _ in range(num_formals)]
- formal_arg_exprs: List[List[Expression]] = [[] for _ in range(num_formals)]
- formal_arg_names: List[List[Optional[str]]] = [[] for _ in range(num_formals)]
- formal_arg_kinds: List[List[ArgKind]] = [[] for _ in range(num_formals)]
+ formal_arg_types: list[list[Type]] = [[] for _ in range(num_formals)]
+ formal_arg_exprs: list[list[Expression]] = [[] for _ in range(num_formals)]
+ formal_arg_names: list[list[str | None]] = [[] for _ in range(num_formals)]
+ formal_arg_kinds: list[list[ArgKind]] = [[] for _ in range(num_formals)]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
formal_arg_types[formal].append(arg_types[actual])
@@ -749,35 +1018,55 @@ def apply_function_plugin(self,
callback = self.plugin.get_function_hook(fullname)
assert callback is not None # Assume that caller ensures this
return callback(
- FunctionContext(formal_arg_types, formal_arg_kinds,
- callee.arg_names, formal_arg_names,
- callee.ret_type, formal_arg_exprs, context, self.chk))
+ FunctionContext(
+ formal_arg_types,
+ formal_arg_kinds,
+ callee.arg_names,
+ formal_arg_names,
+ callee.ret_type,
+ formal_arg_exprs,
+ context,
+ self.chk,
+ )
+ )
else:
# Apply method plugin
method_callback = self.plugin.get_method_hook(fullname)
assert method_callback is not None # Assume that caller ensures this
object_type = get_proper_type(object_type)
return method_callback(
- MethodContext(object_type, formal_arg_types, formal_arg_kinds,
- callee.arg_names, formal_arg_names,
- callee.ret_type, formal_arg_exprs, context, self.chk))
+ MethodContext(
+ object_type,
+ formal_arg_types,
+ formal_arg_kinds,
+ callee.arg_names,
+ formal_arg_names,
+ callee.ret_type,
+ formal_arg_exprs,
+ context,
+ self.chk,
+ )
+ )
def apply_signature_hook(
- self, callee: FunctionLike, args: List[Expression],
- arg_kinds: List[ArgKind],
- arg_names: Optional[Sequence[Optional[str]]],
- hook: Callable[
- [List[List[Expression]], CallableType],
- FunctionLike,
- ]) -> FunctionLike:
+ self,
+ callee: FunctionLike,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ arg_names: Sequence[str | None] | None,
+ hook: Callable[[list[list[Expression]], CallableType], FunctionLike],
+ ) -> FunctionLike:
"""Helper to apply a signature hook for either a function or method"""
if isinstance(callee, CallableType):
num_formals = len(callee.arg_kinds)
formal_to_actual = map_actuals_to_formals(
- arg_kinds, arg_names,
- callee.arg_kinds, callee.arg_names,
- lambda i: self.accept(args[i]))
- formal_arg_exprs: List[List[Expression]] = [[] for _ in range(num_formals)]
+ arg_kinds,
+ arg_names,
+ callee.arg_kinds,
+ callee.arg_names,
+ lambda i: self.accept(args[i]),
+ )
+ formal_arg_exprs: list[list[Expression]] = [[] for _ in range(num_formals)]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
formal_arg_exprs[formal].append(args[actual])
@@ -786,40 +1075,63 @@ def apply_signature_hook(
assert isinstance(callee, Overloaded)
items = []
for item in callee.items:
- adjusted = self.apply_signature_hook(
- item, args, arg_kinds, arg_names, hook)
+ adjusted = self.apply_signature_hook(item, args, arg_kinds, arg_names, hook)
assert isinstance(adjusted, CallableType)
items.append(adjusted)
return Overloaded(items)
def apply_function_signature_hook(
- self, callee: FunctionLike, args: List[Expression],
- arg_kinds: List[ArgKind], context: Context,
- arg_names: Optional[Sequence[Optional[str]]],
- signature_hook: Callable[[FunctionSigContext], FunctionLike]) -> FunctionLike:
+ self,
+ callee: FunctionLike,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ context: Context,
+ arg_names: Sequence[str | None] | None,
+ signature_hook: Callable[[FunctionSigContext], FunctionLike],
+ ) -> FunctionLike:
"""Apply a plugin hook that may infer a more precise signature for a function."""
return self.apply_signature_hook(
- callee, args, arg_kinds, arg_names,
- (lambda args, sig:
- signature_hook(FunctionSigContext(args, sig, context, self.chk))))
+ callee,
+ args,
+ arg_kinds,
+ arg_names,
+ (lambda args, sig: signature_hook(FunctionSigContext(args, sig, context, self.chk))),
+ )
def apply_method_signature_hook(
- self, callee: FunctionLike, args: List[Expression],
- arg_kinds: List[ArgKind], context: Context,
- arg_names: Optional[Sequence[Optional[str]]], object_type: Type,
- signature_hook: Callable[[MethodSigContext], FunctionLike]) -> FunctionLike:
+ self,
+ callee: FunctionLike,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ context: Context,
+ arg_names: Sequence[str | None] | None,
+ object_type: Type,
+ signature_hook: Callable[[MethodSigContext], FunctionLike],
+ ) -> FunctionLike:
"""Apply a plugin hook that may infer a more precise signature for a method."""
pobject_type = get_proper_type(object_type)
return self.apply_signature_hook(
- callee, args, arg_kinds, arg_names,
- (lambda args, sig:
- signature_hook(MethodSigContext(pobject_type, args, sig, context, self.chk))))
+ callee,
+ args,
+ arg_kinds,
+ arg_names,
+ (
+ lambda args, sig: signature_hook(
+ MethodSigContext(pobject_type, args, sig, context, self.chk)
+ )
+ ),
+ )
def transform_callee_type(
- self, callable_name: Optional[str], callee: Type, args: List[Expression],
- arg_kinds: List[ArgKind], context: Context,
- arg_names: Optional[Sequence[Optional[str]]] = None,
- object_type: Optional[Type] = None) -> Type:
+ self,
+ callable_name: str | None,
+ callee: Type,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ context: Context,
+ arg_names: Sequence[str | None] | None = None,
+ object_type: Type | None = None,
+ ) -> Type:
"""Attempt to determine a more accurate signature for a method call.
This is done by looking up and applying a method signature hook (if one exists for the
@@ -840,21 +1152,25 @@ def transform_callee_type(
method_sig_hook = self.plugin.get_method_signature_hook(callable_name)
if method_sig_hook:
return self.apply_method_signature_hook(
- callee, args, arg_kinds, context, arg_names, object_type, method_sig_hook)
+ callee, args, arg_kinds, context, arg_names, object_type, method_sig_hook
+ )
else:
function_sig_hook = self.plugin.get_function_signature_hook(callable_name)
if function_sig_hook:
return self.apply_function_signature_hook(
- callee, args, arg_kinds, context, arg_names, function_sig_hook)
+ callee, args, arg_kinds, context, arg_names, function_sig_hook
+ )
return callee
- def check_call_expr_with_callee_type(self,
- callee_type: Type,
- e: CallExpr,
- callable_name: Optional[str],
- object_type: Optional[Type],
- member: Optional[str] = None) -> Type:
+ def check_call_expr_with_callee_type(
+ self,
+ callee_type: Type,
+ e: CallExpr,
+ callable_name: str | None,
+ object_type: Type | None,
+ member: str | None = None,
+ ) -> Type:
"""Type check call expression.
The callee_type should be used as the type of callee expression. In particular,
@@ -873,44 +1189,71 @@ def check_call_expr_with_callee_type(self,
if callable_name:
# Try to refine the call signature using plugin hooks before checking the call.
callee_type = self.transform_callee_type(
- callable_name, callee_type, e.args, e.arg_kinds, e, e.arg_names, object_type)
+ callable_name, callee_type, e.args, e.arg_kinds, e, e.arg_names, object_type
+ )
# Unions are special-cased to allow plugins to act on each item in the union.
elif member is not None and isinstance(object_type, UnionType):
return self.check_union_call_expr(e, object_type, member)
- return self.check_call(callee_type, e.args, e.arg_kinds, e,
- e.arg_names, callable_node=e.callee,
- callable_name=callable_name,
- object_type=object_type)[0]
+ ret_type, callee_type = self.check_call(
+ callee_type,
+ e.args,
+ e.arg_kinds,
+ e,
+ e.arg_names,
+ callable_node=e.callee,
+ callable_name=callable_name,
+ object_type=object_type,
+ )
+ proper_callee = get_proper_type(callee_type)
+ if (
+ isinstance(e.callee, RefExpr)
+ and isinstance(proper_callee, CallableType)
+ and proper_callee.type_guard is not None
+ ):
+ # Cache it for find_isinstance_check()
+ e.callee.type_guard = proper_callee.type_guard
+ return ret_type
def check_union_call_expr(self, e: CallExpr, object_type: UnionType, member: str) -> Type:
- """"Type check calling a member expression where the base type is a union."""
- res: List[Type] = []
+ """Type check calling a member expression where the base type is a union."""
+ res: list[Type] = []
for typ in object_type.relevant_items():
# Member access errors are already reported when visiting the member expression.
- with self.msg.disable_errors():
- item = analyze_member_access(member, typ, e, False, False, False,
- self.msg, original_type=object_type, chk=self.chk,
- in_literal_context=self.is_literal_context(),
- self_type=typ)
+ with self.msg.filter_errors():
+ item = analyze_member_access(
+ member,
+ typ,
+ e,
+ False,
+ False,
+ False,
+ self.msg,
+ original_type=object_type,
+ chk=self.chk,
+ in_literal_context=self.is_literal_context(),
+ self_type=typ,
+ )
narrowed = self.narrow_type_from_binder(e.callee, item, skip_non_overlapping=True)
if narrowed is None:
continue
callable_name = self.method_fullname(typ, member)
item_object_type = typ if callable_name else None
- res.append(self.check_call_expr_with_callee_type(narrowed, e, callable_name,
- item_object_type))
+ res.append(
+ self.check_call_expr_with_callee_type(narrowed, e, callable_name, item_object_type)
+ )
return make_simplified_union(res)
- def check_call(self,
- callee: Type,
- args: List[Expression],
- arg_kinds: List[ArgKind],
- context: Context,
- arg_names: Optional[Sequence[Optional[str]]] = None,
- callable_node: Optional[Expression] = None,
- arg_messages: Optional[MessageBuilder] = None,
- callable_name: Optional[str] = None,
- object_type: Optional[Type] = None) -> Tuple[Type, Type]:
+ def check_call(
+ self,
+ callee: Type,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ context: Context,
+ arg_names: Sequence[str | None] | None = None,
+ callable_node: Expression | None = None,
+ callable_name: str | None = None,
+ object_type: Type | None = None,
+ ) -> tuple[Type, Type]:
"""Type check a call.
Also infer type arguments if the callee is a generic function.
@@ -926,71 +1269,103 @@ def check_call(self,
arg_names: names of arguments (optional)
callable_node: associate the inferred callable type to this node,
if specified
- arg_messages: utility for generating messages, can be swapped to suppress errors,
- by default uses 'self.msg' to show errors
callable_name: Fully-qualified name of the function/method to call,
or None if unavailable (examples: 'builtins.open', 'typing.Mapping.get')
object_type: If callable_name refers to a method, the type of the object
on which the method is being called
"""
- arg_messages = arg_messages or self.msg
callee = get_proper_type(callee)
if isinstance(callee, CallableType):
- return self.check_callable_call(callee, args, arg_kinds, context, arg_names,
- callable_node, arg_messages, callable_name,
- object_type)
+ return self.check_callable_call(
+ callee,
+ args,
+ arg_kinds,
+ context,
+ arg_names,
+ callable_node,
+ callable_name,
+ object_type,
+ )
elif isinstance(callee, Overloaded):
- return self.check_overload_call(callee, args, arg_kinds, arg_names, callable_name,
- object_type, context, arg_messages)
+ return self.check_overload_call(
+ callee, args, arg_kinds, arg_names, callable_name, object_type, context
+ )
elif isinstance(callee, AnyType) or not self.chk.in_checked_function():
return self.check_any_type_call(args, callee)
elif isinstance(callee, UnionType):
- return self.check_union_call(callee, args, arg_kinds, arg_names, context, arg_messages)
+ return self.check_union_call(callee, args, arg_kinds, arg_names, context)
elif isinstance(callee, Instance):
- call_function = analyze_member_access('__call__', callee, context, is_lvalue=False,
- is_super=False, is_operator=True, msg=self.msg,
- original_type=callee, chk=self.chk,
- in_literal_context=self.is_literal_context())
+ call_function = analyze_member_access(
+ "__call__",
+ callee,
+ context,
+ is_lvalue=False,
+ is_super=False,
+ is_operator=True,
+ msg=self.msg,
+ original_type=callee,
+ chk=self.chk,
+ in_literal_context=self.is_literal_context(),
+ )
callable_name = callee.type.fullname + ".__call__"
# Apply method signature hook, if one exists
call_function = self.transform_callee_type(
- callable_name, call_function, args, arg_kinds, context, arg_names, callee)
- result = self.check_call(call_function, args, arg_kinds, context, arg_names,
- callable_node, arg_messages, callable_name, callee)
+ callable_name, call_function, args, arg_kinds, context, arg_names, callee
+ )
+ result = self.check_call(
+ call_function,
+ args,
+ arg_kinds,
+ context,
+ arg_names,
+ callable_node,
+ callable_name,
+ callee,
+ )
if callable_node:
# check_call() stored "call_function" as the type, which is incorrect.
# Override the type.
self.chk.store_type(callable_node, callee)
return result
elif isinstance(callee, TypeVarType):
- return self.check_call(callee.upper_bound, args, arg_kinds, context, arg_names,
- callable_node, arg_messages)
+ return self.check_call(
+ callee.upper_bound, args, arg_kinds, context, arg_names, callable_node
+ )
elif isinstance(callee, TypeType):
item = self.analyze_type_type_callee(callee.item, context)
- return self.check_call(item, args, arg_kinds, context, arg_names,
- callable_node, arg_messages)
+ return self.check_call(item, args, arg_kinds, context, arg_names, callable_node)
elif isinstance(callee, TupleType):
- return self.check_call(tuple_fallback(callee), args, arg_kinds, context,
- arg_names, callable_node, arg_messages, callable_name,
- object_type)
+ return self.check_call(
+ tuple_fallback(callee),
+ args,
+ arg_kinds,
+ context,
+ arg_names,
+ callable_node,
+ callable_name,
+ object_type,
+ )
else:
return self.msg.not_callable(callee, context), AnyType(TypeOfAny.from_error)
- def check_callable_call(self,
- callee: CallableType,
- args: List[Expression],
- arg_kinds: List[ArgKind],
- context: Context,
- arg_names: Optional[Sequence[Optional[str]]],
- callable_node: Optional[Expression],
- arg_messages: MessageBuilder,
- callable_name: Optional[str],
- object_type: Optional[Type]) -> Tuple[Type, Type]:
+ def check_callable_call(
+ self,
+ callee: CallableType,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ context: Context,
+ arg_names: Sequence[str | None] | None,
+ callable_node: Expression | None,
+ callable_name: str | None,
+ object_type: Type | None,
+ ) -> tuple[Type, Type]:
"""Type check a call that targets a callable value.
See the docstring of check_call for more information.
"""
+ # Always unpack **kwargs before checking a call.
+ callee = callee.with_unpacked_kwargs()
if callable_name is None and callee.name:
callable_name = callee.name
ret_type = get_proper_type(callee.ret_type)
@@ -1000,79 +1375,156 @@ def check_callable_call(self,
# An Enum() call that failed SemanticAnalyzerPass2.check_enum_call().
return callee.ret_type, callee
- if (callee.is_type_obj() and callee.type_object().is_abstract
- # Exception for Type[...]
- and not callee.from_type_type
- and not callee.type_object().fallback_to_any):
+ if (
+ callee.is_type_obj()
+ and callee.type_object().is_protocol
+ # Exception for Type[...]
+ and not callee.from_type_type
+ ):
+ self.chk.fail(
+ message_registry.CANNOT_INSTANTIATE_PROTOCOL.format(callee.type_object().name),
+ context,
+ )
+ elif (
+ callee.is_type_obj()
+ and callee.type_object().is_abstract
+ # Exception for Type[...]
+ and not callee.from_type_type
+ and not callee.type_object().fallback_to_any
+ ):
type = callee.type_object()
+ # Determine whether the implicitly abstract attributes are functions with
+ # None-compatible return types.
+ abstract_attributes: dict[str, bool] = {}
+ for attr_name, abstract_status in type.abstract_attributes:
+ if abstract_status == IMPLICITLY_ABSTRACT:
+ abstract_attributes[attr_name] = self.can_return_none(type, attr_name)
+ else:
+ abstract_attributes[attr_name] = False
self.msg.cannot_instantiate_abstract_class(
- callee.type_object().name, type.abstract_attributes,
- context)
- elif (callee.is_type_obj() and callee.type_object().is_protocol
- # Exception for Type[...]
- and not callee.from_type_type):
- self.chk.fail(message_registry.CANNOT_INSTANTIATE_PROTOCOL
- .format(callee.type_object().name), context)
+ callee.type_object().name, abstract_attributes, context
+ )
formal_to_actual = map_actuals_to_formals(
- arg_kinds, arg_names,
- callee.arg_kinds, callee.arg_names,
- lambda i: self.accept(args[i]))
+ arg_kinds,
+ arg_names,
+ callee.arg_kinds,
+ callee.arg_names,
+ lambda i: self.accept(args[i]),
+ )
if callee.is_generic():
- need_refresh = any(isinstance(v, ParamSpecType) for v in callee.variables)
+ need_refresh = any(
+ isinstance(v, (ParamSpecType, TypeVarTupleType)) for v in callee.variables
+ )
callee = freshen_function_type_vars(callee)
- callee = self.infer_function_type_arguments_using_context(
- callee, context)
- callee = self.infer_function_type_arguments(
- callee, args, arg_kinds, formal_to_actual, context)
+ callee = self.infer_function_type_arguments_using_context(callee, context)
if need_refresh:
# Argument kinds etc. may have changed due to
- # ParamSpec variables being replaced with an arbitrary
+ # ParamSpec or TypeVarTuple variables being replaced with an arbitrary
# number of arguments; recalculate actual-to-formal map
formal_to_actual = map_actuals_to_formals(
- arg_kinds, arg_names,
- callee.arg_kinds, callee.arg_names,
- lambda i: self.accept(args[i]))
+ arg_kinds,
+ arg_names,
+ callee.arg_kinds,
+ callee.arg_names,
+ lambda i: self.accept(args[i]),
+ )
+ callee = self.infer_function_type_arguments(
+ callee, args, arg_kinds, formal_to_actual, context
+ )
+ if need_refresh:
+ formal_to_actual = map_actuals_to_formals(
+ arg_kinds,
+ arg_names,
+ callee.arg_kinds,
+ callee.arg_names,
+ lambda i: self.accept(args[i]),
+ )
param_spec = callee.param_spec()
if param_spec is not None and arg_kinds == [ARG_STAR, ARG_STAR2]:
arg1 = self.accept(args[0])
arg2 = self.accept(args[1])
- if (isinstance(arg1, ParamSpecType)
- and isinstance(arg2, ParamSpecType)
- and arg1.flavor == ParamSpecFlavor.ARGS
- and arg2.flavor == ParamSpecFlavor.KWARGS
- and arg1.id == arg2.id == param_spec.id):
+ if (
+ isinstance(arg1, ParamSpecType)
+ and isinstance(arg2, ParamSpecType)
+ and arg1.flavor == ParamSpecFlavor.ARGS
+ and arg2.flavor == ParamSpecFlavor.KWARGS
+ and arg1.id == arg2.id == param_spec.id
+ ):
return callee.ret_type, callee
- arg_types = self.infer_arg_types_in_context(
- callee, args, arg_kinds, formal_to_actual)
-
- self.check_argument_count(callee, arg_types, arg_kinds,
- arg_names, formal_to_actual, context, self.msg)
+ arg_types = self.infer_arg_types_in_context(callee, args, arg_kinds, formal_to_actual)
+
+ self.check_argument_count(
+ callee,
+ arg_types,
+ arg_kinds,
+ arg_names,
+ formal_to_actual,
+ context,
+ object_type,
+ callable_name,
+ )
- self.check_argument_types(arg_types, arg_kinds, args, callee, formal_to_actual, context,
- messages=arg_messages, object_type=object_type)
+ self.check_argument_types(
+ arg_types, arg_kinds, args, callee, formal_to_actual, context, object_type=object_type
+ )
- if (callee.is_type_obj() and (len(arg_types) == 1)
- and is_equivalent(callee.ret_type, self.named_type('builtins.type'))):
+ if (
+ callee.is_type_obj()
+ and (len(arg_types) == 1)
+ and is_equivalent(callee.ret_type, self.named_type("builtins.type"))
+ ):
callee = callee.copy_modified(ret_type=TypeType.make_normalized(arg_types[0]))
if callable_node:
# Store the inferred callable type.
self.chk.store_type(callable_node, callee)
- if (callable_name
- and ((object_type is None and self.plugin.get_function_hook(callable_name))
- or (object_type is not None
- and self.plugin.get_method_hook(callable_name)))):
+ if callable_name and (
+ (object_type is None and self.plugin.get_function_hook(callable_name))
+ or (object_type is not None and self.plugin.get_method_hook(callable_name))
+ ):
new_ret_type = self.apply_function_plugin(
- callee, arg_kinds, arg_types, arg_names, formal_to_actual, args,
- callable_name, object_type, context)
+ callee,
+ arg_kinds,
+ arg_types,
+ arg_names,
+ formal_to_actual,
+ args,
+ callable_name,
+ object_type,
+ context,
+ )
callee = callee.copy_modified(ret_type=new_ret_type)
return callee.ret_type, callee
+ def can_return_none(self, type: TypeInfo, attr_name: str) -> bool:
+ """Is the given attribute a method with a None-compatible return type?
+
+ Overloads are only checked if there is an implementation.
+ """
+ if not state.strict_optional:
+ # If strict-optional is not set, is_subtype(NoneType(), T) is always True.
+ # So, we cannot do anything useful here in that case.
+ return False
+ for base in type.mro:
+ symnode = base.names.get(attr_name)
+ if symnode is None:
+ continue
+ node = symnode.node
+ if isinstance(node, OverloadedFuncDef):
+ node = node.impl
+ if isinstance(node, Decorator):
+ node = node.func
+ if isinstance(node, FuncDef):
+ if node.type is not None:
+ assert isinstance(node.type, CallableType)
+ return is_subtype(NoneType(), node.type.ret_type)
+ return False
+
def analyze_type_type_callee(self, item: ProperType, context: Context) -> Type:
"""Analyze the callee X in X(...) where X is Type[item].
@@ -1084,15 +1536,20 @@ def analyze_type_type_callee(self, item: ProperType, context: Context) -> Type:
res = type_object_type(item.type, self.named_type)
if isinstance(res, CallableType):
res = res.copy_modified(from_type_type=True)
- expanded = get_proper_type(expand_type_by_instance(res, item))
+ expanded = expand_type_by_instance(res, item)
if isinstance(expanded, CallableType):
# Callee of the form Type[...] should never be generic, only
# proper class objects can be.
expanded = expanded.copy_modified(variables=[])
return expanded
if isinstance(item, UnionType):
- return UnionType([self.analyze_type_type_callee(get_proper_type(tp), context)
- for tp in item.relevant_items()], item.line)
+ return UnionType(
+ [
+ self.analyze_type_type_callee(get_proper_type(tp), context)
+ for tp in item.relevant_items()
+ ],
+ item.line,
+ )
if isinstance(item, TypeVarType):
# Pretend we're calling the typevar's upper bound,
# i.e. its constructor (a poor approximation for reality,
@@ -1103,24 +1560,22 @@ def analyze_type_type_callee(self, item: ProperType, context: Context) -> Type:
if isinstance(callee, CallableType):
callee = callee.copy_modified(ret_type=item)
elif isinstance(callee, Overloaded):
- callee = Overloaded([c.copy_modified(ret_type=item)
- for c in callee.items])
+ callee = Overloaded([c.copy_modified(ret_type=item) for c in callee.items])
return callee
# We support Type of namedtuples but not of tuples in general
- if (isinstance(item, TupleType)
- and tuple_fallback(item).type.fullname != 'builtins.tuple'):
+ if isinstance(item, TupleType) and tuple_fallback(item).type.fullname != "builtins.tuple":
return self.analyze_type_type_callee(tuple_fallback(item), context)
self.msg.unsupported_type_type(item, context)
return AnyType(TypeOfAny.from_error)
- def infer_arg_types_in_empty_context(self, args: List[Expression]) -> List[Type]:
+ def infer_arg_types_in_empty_context(self, args: list[Expression]) -> list[Type]:
"""Infer argument expression types in an empty context.
In short, we basically recurse on each argument without considering
in what context the argument was called.
"""
- res: List[Type] = []
+ res: list[Type] = []
for arg in args:
arg_type = self.accept(arg)
@@ -1130,9 +1585,29 @@ def infer_arg_types_in_empty_context(self, args: List[Expression]) -> List[Type]
res.append(arg_type)
return res
+ def infer_more_unions_for_recursive_type(self, type_context: Type) -> bool:
+ """Adjust type inference of unions if type context has a recursive type.
+
+ Return the old state. The caller must assign it to type_state.infer_unions
+ afterwards.
+
+ This is a hack to better support inference for recursive types.
+
+ Note: This is performance-sensitive and must not be a context manager
+ until mypyc supports them better.
+ """
+ old = type_state.infer_unions
+ if has_recursive_types(type_context):
+ type_state.infer_unions = True
+ return old
+
def infer_arg_types_in_context(
- self, callee: CallableType, args: List[Expression], arg_kinds: List[ArgKind],
- formal_to_actual: List[List[int]]) -> List[Type]:
+ self,
+ callee: CallableType,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ formal_to_actual: list[list[int]],
+ ) -> list[Type]:
"""Infer argument expression types using a callable type as context.
For example, if callee argument 2 has type List[int], infer the
@@ -1140,12 +1615,21 @@ def infer_arg_types_in_context(
Returns the inferred types of *actual arguments*.
"""
- res: List[Optional[Type]] = [None] * len(args)
+ res: list[Type | None] = [None] * len(args)
for i, actuals in enumerate(formal_to_actual):
for ai in actuals:
if not arg_kinds[ai].is_star():
- res[ai] = self.accept(args[ai], callee.arg_types[i])
+ arg_type = callee.arg_types[i]
+ # When the outer context for a function call is known to be recursive,
+ # we solve type constraints inferred from arguments using unions instead
+ # of joins. This is a bit arbitrary, but in practice it works for most
+ # cases. A cleaner alternative would be to switch to single bin type
+ # inference, but this is a lot of work.
+ old = self.infer_more_unions_for_recursive_type(arg_type)
+ res[ai] = self.accept(args[ai], arg_type)
+ # We need to manually restore union inference state, ugh.
+ type_state.infer_unions = old
# Fill in the rest of the argument types.
for i, t in enumerate(res):
@@ -1155,7 +1639,8 @@ def infer_arg_types_in_context(
return cast(List[Type], res)
def infer_function_type_arguments_using_context(
- self, callable: CallableType, error_context: Context) -> CallableType:
+ self, callable: CallableType, error_context: Context
+ ) -> CallableType:
"""Unify callable return type to type context to infer type vars.
For example, if the return type is set[t] where 't' is a type variable
@@ -1217,7 +1702,7 @@ def infer_function_type_arguments_using_context(
return callable.copy_modified()
args = infer_type_arguments(callable.type_var_ids(), ret_type, erased_ctx)
# Only substitute non-Uninhabited and non-erased types.
- new_args: List[Optional[Type]] = []
+ new_args: list[Type | None] = []
for arg in args:
if has_uninhabited_component(arg) or has_erased_component(arg):
new_args.append(None)
@@ -1225,14 +1710,18 @@ def infer_function_type_arguments_using_context(
new_args.append(arg)
# Don't show errors after we have only used the outer context for inference.
# We will use argument context to infer more variables.
- return self.apply_generic_arguments(callable, new_args, error_context,
- skip_unsatisfied=True)
-
- def infer_function_type_arguments(self, callee_type: CallableType,
- args: List[Expression],
- arg_kinds: List[ArgKind],
- formal_to_actual: List[List[int]],
- context: Context) -> CallableType:
+ return self.apply_generic_arguments(
+ callable, new_args, error_context, skip_unsatisfied=True
+ )
+
+ def infer_function_type_arguments(
+ self,
+ callee_type: CallableType,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ formal_to_actual: list[list[int]],
+ context: Context,
+ ) -> CallableType:
"""Infer the type arguments for a generic callee type.
Infer based on the types of arguments.
@@ -1244,14 +1733,16 @@ def infer_function_type_arguments(self, callee_type: CallableType,
# due to partial available context information at this time, but
# these errors can be safely ignored as the arguments will be
# inferred again later.
- with self.msg.disable_errors():
+ with self.msg.filter_errors():
arg_types = self.infer_arg_types_in_context(
- callee_type, args, arg_kinds, formal_to_actual)
+ callee_type, args, arg_kinds, formal_to_actual
+ )
arg_pass_nums = self.get_arg_infer_passes(
- callee_type.arg_types, formal_to_actual, len(args))
+ callee_type.arg_types, formal_to_actual, len(args)
+ )
- pass1_args: List[Optional[Type]] = []
+ pass1_args: list[Type | None] = []
for i, arg in enumerate(arg_types):
if arg_pass_nums[i] > 1:
pass1_args.append(None)
@@ -1259,19 +1750,25 @@ def infer_function_type_arguments(self, callee_type: CallableType,
pass1_args.append(arg)
inferred_args = infer_function_type_arguments(
- callee_type, pass1_args, arg_kinds, formal_to_actual,
+ callee_type,
+ pass1_args,
+ arg_kinds,
+ formal_to_actual,
context=self.argument_infer_context(),
- strict=self.chk.in_checked_function())
+ strict=self.chk.in_checked_function(),
+ )
if 2 in arg_pass_nums:
# Second pass of type inference.
- (callee_type,
- inferred_args) = self.infer_function_type_arguments_pass2(
- callee_type, args, arg_kinds, formal_to_actual,
- inferred_args, context)
+ (callee_type, inferred_args) = self.infer_function_type_arguments_pass2(
+ callee_type, args, arg_kinds, formal_to_actual, inferred_args, context
+ )
- if callee_type.special_sig == 'dict' and len(inferred_args) == 2 and (
- ARG_NAMED in arg_kinds or ARG_STAR2 in arg_kinds):
+ if (
+ callee_type.special_sig == "dict"
+ and len(inferred_args) == 2
+ and (ARG_NAMED in arg_kinds or ARG_STAR2 in arg_kinds)
+ ):
# HACK: Infer str key type for dict(...) with keyword args. The type system
# can't represent this so we special case it, as this is a pretty common
# thing. This doesn't quite work with all possible subclasses of dict
@@ -1280,24 +1777,24 @@ def infer_function_type_arguments(self, callee_type: CallableType,
# a little tricky to fix so it's left unfixed for now.
first_arg = get_proper_type(inferred_args[0])
if isinstance(first_arg, (NoneType, UninhabitedType)):
- inferred_args[0] = self.named_type('builtins.str')
- elif not first_arg or not is_subtype(self.named_type('builtins.str'), first_arg):
- self.chk.fail(message_registry.KEYWORD_ARGUMENT_REQUIRES_STR_KEY_TYPE,
- context)
+ inferred_args[0] = self.named_type("builtins.str")
+ elif not first_arg or not is_subtype(self.named_type("builtins.str"), first_arg):
+ self.chk.fail(message_registry.KEYWORD_ARGUMENT_REQUIRES_STR_KEY_TYPE, context)
else:
# In dynamically typed functions use implicit 'Any' types for
# type variables.
inferred_args = [AnyType(TypeOfAny.unannotated)] * len(callee_type.variables)
- return self.apply_inferred_arguments(callee_type, inferred_args,
- context)
+ return self.apply_inferred_arguments(callee_type, inferred_args, context)
def infer_function_type_arguments_pass2(
- self, callee_type: CallableType,
- args: List[Expression],
- arg_kinds: List[ArgKind],
- formal_to_actual: List[List[int]],
- old_inferred_args: Sequence[Optional[Type]],
- context: Context) -> Tuple[CallableType, List[Optional[Type]]]:
+ self,
+ callee_type: CallableType,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ formal_to_actual: list[list[int]],
+ old_inferred_args: Sequence[Type | None],
+ context: Context,
+ ) -> tuple[CallableType, list[Type | None]]:
"""Perform second pass of generic function type argument inference.
The second pass is needed for arguments with types such as Callable[[T], S],
@@ -1318,11 +1815,13 @@ def infer_function_type_arguments_pass2(
inferred_args[i] = None
callee_type = self.apply_generic_arguments(callee_type, inferred_args, context)
- arg_types = self.infer_arg_types_in_context(
- callee_type, args, arg_kinds, formal_to_actual)
+ arg_types = self.infer_arg_types_in_context(callee_type, args, arg_kinds, formal_to_actual)
inferred_args = infer_function_type_arguments(
- callee_type, arg_types, arg_kinds, formal_to_actual,
+ callee_type,
+ arg_types,
+ arg_kinds,
+ formal_to_actual,
context=self.argument_infer_context(),
)
@@ -1330,13 +1829,12 @@ def infer_function_type_arguments_pass2(
def argument_infer_context(self) -> ArgumentInferContext:
return ArgumentInferContext(
- self.chk.named_type('typing.Mapping'),
- self.chk.named_type('typing.Iterable'),
+ self.chk.named_type("typing.Mapping"), self.chk.named_type("typing.Iterable")
)
- def get_arg_infer_passes(self, arg_types: List[Type],
- formal_to_actual: List[List[int]],
- num_actuals: int) -> List[int]:
+ def get_arg_infer_passes(
+ self, arg_types: list[Type], formal_to_actual: list[list[int]], num_actuals: int
+ ) -> list[int]:
"""Return pass numbers for args for two-pass argument type inference.
For each actual, the pass number is either 1 (first pass) or 2 (second
@@ -1352,9 +1850,9 @@ def get_arg_infer_passes(self, arg_types: List[Type],
res[j] = 2
return res
- def apply_inferred_arguments(self, callee_type: CallableType,
- inferred_args: Sequence[Optional[Type]],
- context: Context) -> CallableType:
+ def apply_inferred_arguments(
+ self, callee_type: CallableType, inferred_args: Sequence[Type | None], context: Context
+ ) -> CallableType:
"""Apply inferred values of type arguments to a generic function.
Inferred_args contains the values of function type arguments.
@@ -1365,22 +1863,24 @@ def apply_inferred_arguments(self, callee_type: CallableType,
for i, inferred_type in enumerate(inferred_args):
if not inferred_type or has_erased_component(inferred_type):
# Could not infer a non-trivial type for a type variable.
- self.msg.could_not_infer_type_arguments(
- callee_type, i + 1, context)
+ self.msg.could_not_infer_type_arguments(callee_type, i + 1, context)
inferred_args = [AnyType(TypeOfAny.from_error)] * len(inferred_args)
# Apply the inferred types to the function type. In this case the
# return type must be CallableType, since we give the right number of type
# arguments.
return self.apply_generic_arguments(callee_type, inferred_args, context)
- def check_argument_count(self,
- callee: CallableType,
- actual_types: List[Type],
- actual_kinds: List[ArgKind],
- actual_names: Optional[Sequence[Optional[str]]],
- formal_to_actual: List[List[int]],
- context: Optional[Context],
- messages: Optional[MessageBuilder]) -> bool:
+ def check_argument_count(
+ self,
+ callee: CallableType,
+ actual_types: list[Type],
+ actual_kinds: list[ArgKind],
+ actual_names: Sequence[str | None] | None,
+ formal_to_actual: list[list[int]],
+ context: Context | None,
+ object_type: Type | None = None,
+ callable_name: str | None = None,
+ ) -> bool:
"""Check that there is a value for all required arguments to a function.
Also check that there are no duplicate values for arguments. Report found errors
@@ -1388,57 +1888,61 @@ def check_argument_count(self,
Return False if there were any errors. Otherwise return True
"""
- if messages:
- assert context, "Internal error: messages given without context"
- elif context is None:
+ if context is None:
# Avoid "is None" checks
context = TempNode(AnyType(TypeOfAny.special_form))
# TODO(jukka): We could return as soon as we find an error if messages is None.
- # Collect list of all actual arguments matched to formal arguments.
- all_actuals: List[int] = []
+ # Collect dict of all actual arguments matched to formal arguments, with occurrence count
+ all_actuals: dict[int, int] = {}
for actuals in formal_to_actual:
- all_actuals.extend(actuals)
+ for a in actuals:
+ all_actuals[a] = all_actuals.get(a, 0) + 1
ok, is_unexpected_arg_error = self.check_for_extra_actual_arguments(
- callee, actual_types, actual_kinds, actual_names, all_actuals, context, messages)
+ callee, actual_types, actual_kinds, actual_names, all_actuals, context
+ )
# Check for too many or few values for formals.
for i, kind in enumerate(callee.arg_kinds):
if kind.is_required() and not formal_to_actual[i] and not is_unexpected_arg_error:
# No actual for a mandatory formal
- if messages:
- if kind.is_positional():
- messages.too_few_arguments(callee, context, actual_names)
- else:
- argname = callee.arg_names[i] or "?"
- messages.missing_named_argument(callee, context, argname)
+ if kind.is_positional():
+ self.msg.too_few_arguments(callee, context, actual_names)
+ if object_type and callable_name and "." in callable_name:
+ self.missing_classvar_callable_note(object_type, callable_name, context)
+ else:
+ argname = callee.arg_names[i] or "?"
+ self.msg.missing_named_argument(callee, context, argname)
ok = False
elif not kind.is_star() and is_duplicate_mapping(
- formal_to_actual[i], actual_types, actual_kinds):
- if (self.chk.in_checked_function() or
- isinstance(get_proper_type(actual_types[formal_to_actual[i][0]]),
- TupleType)):
- if messages:
- messages.duplicate_argument_value(callee, i, context)
+ formal_to_actual[i], actual_types, actual_kinds
+ ):
+ if self.chk.in_checked_function() or isinstance(
+ get_proper_type(actual_types[formal_to_actual[i][0]]), TupleType
+ ):
+ self.msg.duplicate_argument_value(callee, i, context)
ok = False
- elif (kind.is_named() and formal_to_actual[i] and
- actual_kinds[formal_to_actual[i][0]] not in [nodes.ARG_NAMED, nodes.ARG_STAR2]):
+ elif (
+ kind.is_named()
+ and formal_to_actual[i]
+ and actual_kinds[formal_to_actual[i][0]] not in [nodes.ARG_NAMED, nodes.ARG_STAR2]
+ ):
# Positional argument when expecting a keyword argument.
- if messages:
- messages.too_many_positional_arguments(callee, context)
+ self.msg.too_many_positional_arguments(callee, context)
ok = False
return ok
- def check_for_extra_actual_arguments(self,
- callee: CallableType,
- actual_types: List[Type],
- actual_kinds: List[ArgKind],
- actual_names: Optional[Sequence[Optional[str]]],
- all_actuals: List[int],
- context: Context,
- messages: Optional[MessageBuilder]) -> Tuple[bool, bool]:
+ def check_for_extra_actual_arguments(
+ self,
+ callee: CallableType,
+ actual_types: list[Type],
+ actual_kinds: list[ArgKind],
+ actual_names: Sequence[str | None] | None,
+ all_actuals: dict[int, int],
+ context: Context,
+ ) -> tuple[bool, bool]:
"""Check for extra actual arguments.
Return tuple (was everything ok,
@@ -1449,158 +1953,273 @@ def check_for_extra_actual_arguments(self,
ok = True # False if we've found any error
for i, kind in enumerate(actual_kinds):
- if (i not in all_actuals and
- # We accept the other iterables than tuple (including Any)
- # as star arguments because they could be empty, resulting no arguments.
- (kind != nodes.ARG_STAR or is_non_empty_tuple(actual_types[i])) and
- # Accept all types for double-starred arguments, because they could be empty
- # dictionaries and we can't tell it from their types
- kind != nodes.ARG_STAR2):
+ if (
+ i not in all_actuals
+ and
+ # We accept the other iterables than tuple (including Any)
+ # as star arguments because they could be empty, resulting no arguments.
+ (kind != nodes.ARG_STAR or is_non_empty_tuple(actual_types[i]))
+ and
+ # Accept all types for double-starred arguments, because they could be empty
+ # dictionaries and we can't tell it from their types
+ kind != nodes.ARG_STAR2
+ ):
# Extra actual: not matched by a formal argument.
ok = False
if kind != nodes.ARG_NAMED:
- if messages:
- messages.too_many_arguments(callee, context)
+ self.msg.too_many_arguments(callee, context)
else:
- if messages:
- assert actual_names, "Internal error: named kinds without names given"
- act_name = actual_names[i]
- assert act_name is not None
- act_type = actual_types[i]
- messages.unexpected_keyword_argument(callee, act_name, act_type, context)
+ assert actual_names, "Internal error: named kinds without names given"
+ act_name = actual_names[i]
+ assert act_name is not None
+ act_type = actual_types[i]
+ self.msg.unexpected_keyword_argument(callee, act_name, act_type, context)
is_unexpected_arg_error = True
- elif ((kind == nodes.ARG_STAR and nodes.ARG_STAR not in callee.arg_kinds)
- or kind == nodes.ARG_STAR2):
+ elif (
+ kind == nodes.ARG_STAR and nodes.ARG_STAR not in callee.arg_kinds
+ ) or kind == nodes.ARG_STAR2:
actual_type = get_proper_type(actual_types[i])
if isinstance(actual_type, (TupleType, TypedDictType)):
- if all_actuals.count(i) < len(actual_type.items):
+ if all_actuals.get(i, 0) < len(actual_type.items):
# Too many tuple/dict items as some did not match.
- if messages:
- if (kind != nodes.ARG_STAR2
- or not isinstance(actual_type, TypedDictType)):
- messages.too_many_arguments(callee, context)
- else:
- messages.too_many_arguments_from_typed_dict(callee, actual_type,
- context)
- is_unexpected_arg_error = True
+ if kind != nodes.ARG_STAR2 or not isinstance(actual_type, TypedDictType):
+ self.msg.too_many_arguments(callee, context)
+ else:
+ self.msg.too_many_arguments_from_typed_dict(
+ callee, actual_type, context
+ )
+ is_unexpected_arg_error = True
ok = False
# *args/**kwargs can be applied even if the function takes a fixed
# number of positional arguments. This may succeed at runtime.
return ok, is_unexpected_arg_error
- def check_argument_types(self,
- arg_types: List[Type],
- arg_kinds: List[ArgKind],
- args: List[Expression],
- callee: CallableType,
- formal_to_actual: List[List[int]],
- context: Context,
- messages: Optional[MessageBuilder] = None,
- check_arg: Optional[ArgChecker] = None,
- object_type: Optional[Type] = None) -> None:
+ def missing_classvar_callable_note(
+ self, object_type: Type, callable_name: str, context: Context
+ ) -> None:
+ if isinstance(object_type, ProperType) and isinstance(object_type, Instance):
+ _, var_name = callable_name.rsplit(".", maxsplit=1)
+ node = object_type.type.get(var_name)
+ if node is not None and isinstance(node.node, Var):
+ if not node.node.is_inferred and not node.node.is_classvar:
+ self.msg.note(
+ f'"{var_name}" is considered instance variable,'
+ " to make it class variable use ClassVar[...]",
+ context,
+ )
+
+ def check_argument_types(
+ self,
+ arg_types: list[Type],
+ arg_kinds: list[ArgKind],
+ args: list[Expression],
+ callee: CallableType,
+ formal_to_actual: list[list[int]],
+ context: Context,
+ check_arg: ArgChecker | None = None,
+ object_type: Type | None = None,
+ ) -> None:
"""Check argument types against a callable type.
Report errors if the argument types are not compatible.
The check_call docstring describes some of the arguments.
"""
- messages = messages or self.msg
check_arg = check_arg or self.check_arg
# Keep track of consumed tuple *arg items.
mapper = ArgTypeExpander(self.argument_infer_context())
for i, actuals in enumerate(formal_to_actual):
- for actual in actuals:
- actual_type = arg_types[actual]
+ orig_callee_arg_type = get_proper_type(callee.arg_types[i])
+
+ # Checking the case that we have more than one item but the first argument
+ # is an unpack, so this would be something like:
+ # [Tuple[Unpack[Ts]], int]
+ #
+ # In this case we have to check everything together, we do this by re-unifying
+ # the suffices to the tuple, e.g. a single actual like
+ # Tuple[Unpack[Ts], int]
+ expanded_tuple = False
+ if len(actuals) > 1:
+ first_actual_arg_type = get_proper_type(arg_types[actuals[0]])
+ if (
+ isinstance(first_actual_arg_type, TupleType)
+ and len(first_actual_arg_type.items) == 1
+ and isinstance(get_proper_type(first_actual_arg_type.items[0]), UnpackType)
+ ):
+ # TODO: use walrus operator
+ actual_types = [first_actual_arg_type.items[0]] + [
+ arg_types[a] for a in actuals[1:]
+ ]
+ actual_kinds = [nodes.ARG_STAR] + [nodes.ARG_POS] * (len(actuals) - 1)
+
+ assert isinstance(orig_callee_arg_type, TupleType)
+ assert orig_callee_arg_type.items
+ callee_arg_types = orig_callee_arg_type.items
+ callee_arg_kinds = [nodes.ARG_STAR] + [nodes.ARG_POS] * (
+ len(orig_callee_arg_type.items) - 1
+ )
+ expanded_tuple = True
+
+ if not expanded_tuple:
+ actual_types = [arg_types[a] for a in actuals]
+ actual_kinds = [arg_kinds[a] for a in actuals]
+ if isinstance(orig_callee_arg_type, UnpackType):
+ unpacked_type = get_proper_type(orig_callee_arg_type.type)
+ if isinstance(unpacked_type, TupleType):
+ inner_unpack_index = find_unpack_in_list(unpacked_type.items)
+ if inner_unpack_index is None:
+ callee_arg_types = unpacked_type.items
+ callee_arg_kinds = [ARG_POS] * len(actuals)
+ else:
+ inner_unpack = get_proper_type(unpacked_type.items[inner_unpack_index])
+ assert isinstance(inner_unpack, UnpackType)
+ inner_unpacked_type = get_proper_type(inner_unpack.type)
+ # We assume heterogenous tuples are desugared earlier
+ assert isinstance(inner_unpacked_type, Instance)
+ assert inner_unpacked_type.type.fullname == "builtins.tuple"
+ callee_arg_types = (
+ unpacked_type.items[:inner_unpack_index]
+ + [inner_unpacked_type.args[0]]
+ * (len(actuals) - len(unpacked_type.items) + 1)
+ + unpacked_type.items[inner_unpack_index + 1 :]
+ )
+ callee_arg_kinds = [ARG_POS] * len(actuals)
+ else:
+ assert isinstance(unpacked_type, Instance)
+ assert unpacked_type.type.fullname == "builtins.tuple"
+ callee_arg_types = [unpacked_type.args[0]] * len(actuals)
+ callee_arg_kinds = [ARG_POS] * len(actuals)
+ else:
+ callee_arg_types = [orig_callee_arg_type] * len(actuals)
+ callee_arg_kinds = [callee.arg_kinds[i]] * len(actuals)
+
+ assert len(actual_types) == len(actuals) == len(actual_kinds)
+
+ if len(callee_arg_types) != len(actual_types):
+ # TODO: Improve error message
+ self.chk.fail("Invalid number of arguments", context)
+ continue
+
+ assert len(callee_arg_types) == len(actual_types)
+ assert len(callee_arg_types) == len(callee_arg_kinds)
+ for actual, actual_type, actual_kind, callee_arg_type, callee_arg_kind in zip(
+ actuals, actual_types, actual_kinds, callee_arg_types, callee_arg_kinds
+ ):
if actual_type is None:
continue # Some kind of error was already reported.
- actual_kind = arg_kinds[actual]
# Check that a *arg is valid as varargs.
- if (actual_kind == nodes.ARG_STAR and
- not self.is_valid_var_arg(actual_type)):
- messages.invalid_var_arg(actual_type, context)
- if (actual_kind == nodes.ARG_STAR2 and
- not self.is_valid_keyword_var_arg(actual_type)):
- is_mapping = is_subtype(actual_type, self.chk.named_type('typing.Mapping'))
- messages.invalid_keyword_var_arg(actual_type, is_mapping, context)
+ if actual_kind == nodes.ARG_STAR and not self.is_valid_var_arg(actual_type):
+ self.msg.invalid_var_arg(actual_type, context)
+ if actual_kind == nodes.ARG_STAR2 and not self.is_valid_keyword_var_arg(
+ actual_type
+ ):
+ is_mapping = is_subtype(actual_type, self.chk.named_type("typing.Mapping"))
+ self.msg.invalid_keyword_var_arg(actual_type, is_mapping, context)
expanded_actual = mapper.expand_actual_type(
- actual_type, actual_kind,
- callee.arg_names[i], callee.arg_kinds[i])
- check_arg(expanded_actual, actual_type, arg_kinds[actual],
- callee.arg_types[i],
- actual + 1, i + 1, callee, object_type, args[actual], context, messages)
-
- def check_arg(self,
- caller_type: Type,
- original_caller_type: Type,
- caller_kind: ArgKind,
- callee_type: Type,
- n: int,
- m: int,
- callee: CallableType,
- object_type: Optional[Type],
- context: Context,
- outer_context: Context,
- messages: MessageBuilder) -> None:
+ actual_type, actual_kind, callee.arg_names[i], callee_arg_kind
+ )
+ check_arg(
+ expanded_actual,
+ actual_type,
+ actual_kind,
+ callee_arg_type,
+ actual + 1,
+ i + 1,
+ callee,
+ object_type,
+ args[actual],
+ context,
+ )
+
+ def check_arg(
+ self,
+ caller_type: Type,
+ original_caller_type: Type,
+ caller_kind: ArgKind,
+ callee_type: Type,
+ n: int,
+ m: int,
+ callee: CallableType,
+ object_type: Type | None,
+ context: Context,
+ outer_context: Context,
+ ) -> None:
"""Check the type of a single argument in a call."""
caller_type = get_proper_type(caller_type)
original_caller_type = get_proper_type(original_caller_type)
callee_type = get_proper_type(callee_type)
if isinstance(caller_type, DeletedType):
- messages.deleted_as_rvalue(caller_type, context)
+ self.msg.deleted_as_rvalue(caller_type, context)
# Only non-abstract non-protocol class can be given where Type[...] is expected...
- elif (isinstance(caller_type, CallableType) and isinstance(callee_type, TypeType) and
- caller_type.is_type_obj() and
- (caller_type.type_object().is_abstract or caller_type.type_object().is_protocol) and
- isinstance(callee_type.item, Instance) and
- (callee_type.item.type.is_abstract or callee_type.item.type.is_protocol)):
+ elif (
+ isinstance(caller_type, CallableType)
+ and isinstance(callee_type, TypeType)
+ and caller_type.is_type_obj()
+ and (caller_type.type_object().is_abstract or caller_type.type_object().is_protocol)
+ and isinstance(callee_type.item, Instance)
+ and (callee_type.item.type.is_abstract or callee_type.item.type.is_protocol)
+ and not self.chk.allow_abstract_call
+ ):
self.msg.concrete_only_call(callee_type, context)
elif not is_subtype(caller_type, callee_type, options=self.chk.options):
- if self.chk.should_suppress_optional_error([caller_type, callee_type]):
- return
- code = messages.incompatible_argument(n,
- m,
- callee,
- original_caller_type,
- caller_kind,
- object_type=object_type,
- context=context,
- outer_context=outer_context)
- messages.incompatible_argument_note(original_caller_type, callee_type, context,
- code=code)
-
- def check_overload_call(self,
- callee: Overloaded,
- args: List[Expression],
- arg_kinds: List[ArgKind],
- arg_names: Optional[Sequence[Optional[str]]],
- callable_name: Optional[str],
- object_type: Optional[Type],
- context: Context,
- arg_messages: MessageBuilder) -> Tuple[Type, Type]:
+ code = self.msg.incompatible_argument(
+ n,
+ m,
+ callee,
+ original_caller_type,
+ caller_kind,
+ object_type=object_type,
+ context=context,
+ outer_context=outer_context,
+ )
+ self.msg.incompatible_argument_note(
+ original_caller_type, callee_type, context, code=code
+ )
+ if not self.msg.prefer_simple_messages():
+ self.chk.check_possible_missing_await(caller_type, callee_type, context)
+
+ def check_overload_call(
+ self,
+ callee: Overloaded,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ arg_names: Sequence[str | None] | None,
+ callable_name: str | None,
+ object_type: Type | None,
+ context: Context,
+ ) -> tuple[Type, Type]:
"""Checks a call to an overloaded function."""
+ # Normalize unpacked kwargs before checking the call.
+ callee = callee.with_unpacked_kwargs()
arg_types = self.infer_arg_types_in_empty_context(args)
# Step 1: Filter call targets to remove ones where the argument counts don't match
- plausible_targets = self.plausible_overload_call_targets(arg_types, arg_kinds,
- arg_names, callee)
+ plausible_targets = self.plausible_overload_call_targets(
+ arg_types, arg_kinds, arg_names, callee
+ )
# Step 2: If the arguments contain a union, we try performing union math first,
# instead of picking the first matching overload.
# This is because picking the first overload often ends up being too greedy:
# for example, when we have a fallback alternative that accepts an unrestricted
# typevar. See https://github.com/python/mypy/issues/4063 for related discussion.
- erased_targets: Optional[List[CallableType]] = None
- unioned_result: Optional[Tuple[Type, Type]] = None
+ erased_targets: list[CallableType] | None = None
+ unioned_result: tuple[Type, Type] | None = None
union_interrupted = False # did we try all union combinations?
if any(self.real_union(arg) for arg in arg_types):
- unioned_errors = arg_messages.clean_copy()
try:
- unioned_return = self.union_overload_result(plausible_targets, args,
- arg_types, arg_kinds, arg_names,
- callable_name, object_type,
- context,
- arg_messages=unioned_errors)
+ with self.msg.filter_errors():
+ unioned_return = self.union_overload_result(
+ plausible_targets,
+ args,
+ arg_types,
+ arg_kinds,
+ arg_names,
+ callable_name,
+ object_type,
+ context,
+ )
except TooManyUnions:
union_interrupted = True
else:
@@ -1612,20 +2231,28 @@ def check_overload_call(self,
# a union of inferred callables because for example a call
# Union[int -> int, str -> str](Union[int, str]) is invalid and
# we don't want to introduce internal inconsistencies.
- unioned_result = (make_simplified_union(list(returns),
- context.line,
- context.column),
- self.combine_function_signatures(inferred_types))
+ unioned_result = (
+ make_simplified_union(list(returns), context.line, context.column),
+ self.combine_function_signatures(get_proper_types(inferred_types)),
+ )
# Step 3: We try checking each branch one-by-one.
- inferred_result = self.infer_overload_return_type(plausible_targets, args, arg_types,
- arg_kinds, arg_names, callable_name,
- object_type, context, arg_messages)
+ inferred_result = self.infer_overload_return_type(
+ plausible_targets,
+ args,
+ arg_types,
+ arg_kinds,
+ arg_names,
+ callable_name,
+ object_type,
+ context,
+ )
# If any of checks succeed, stop early.
if inferred_result is not None and unioned_result is not None:
# Both unioned and direct checks succeeded, choose the more precise type.
- if (is_subtype(inferred_result[0], unioned_result[0]) and
- not isinstance(get_proper_type(inferred_result[0]), AnyType)):
+ if is_subtype(inferred_result[0], unioned_result[0]) and not isinstance(
+ get_proper_type(inferred_result[0]), AnyType
+ ):
return inferred_result
return unioned_result
elif unioned_result is not None:
@@ -1642,8 +2269,9 @@ def check_overload_call(self,
#
# Neither alternative matches, but we can guess the user probably wants the
# second one.
- erased_targets = self.overload_erased_call_targets(plausible_targets, arg_types,
- arg_kinds, arg_names, args, context)
+ erased_targets = self.overload_erased_call_targets(
+ plausible_targets, arg_types, arg_kinds, arg_names, args, context
+ )
# Step 5: We try and infer a second-best alternative if possible. If not, fall back
# to using 'Any'.
@@ -1658,28 +2286,32 @@ def check_overload_call(self,
else:
# There was no plausible match: give up
target = AnyType(TypeOfAny.from_error)
-
- if not self.chk.should_suppress_optional_error(arg_types):
- if not is_operator_method(callable_name):
- code = None
- else:
- code = codes.OPERATOR
- arg_messages.no_variant_matches_arguments(
- callee, arg_types, context, code=code)
-
- result = self.check_call(target, args, arg_kinds, context, arg_names,
- arg_messages=arg_messages,
- callable_name=callable_name,
- object_type=object_type)
+ if not is_operator_method(callable_name):
+ code = None
+ else:
+ code = codes.OPERATOR
+ self.msg.no_variant_matches_arguments(callee, arg_types, context, code=code)
+
+ result = self.check_call(
+ target,
+ args,
+ arg_kinds,
+ context,
+ arg_names,
+ callable_name=callable_name,
+ object_type=object_type,
+ )
if union_interrupted:
self.chk.fail(message_registry.TOO_MANY_UNION_COMBINATIONS, context)
return result
- def plausible_overload_call_targets(self,
- arg_types: List[Type],
- arg_kinds: List[ArgKind],
- arg_names: Optional[Sequence[Optional[str]]],
- overload: Overloaded) -> List[CallableType]:
+ def plausible_overload_call_targets(
+ self,
+ arg_types: list[Type],
+ arg_kinds: list[ArgKind],
+ arg_names: Sequence[str | None] | None,
+ overload: Overloaded,
+ ) -> list[CallableType]:
"""Returns all overload call targets that having matching argument counts.
If the given args contains a star-arg (*arg or **kwarg argument), this method
@@ -1693,11 +2325,14 @@ def plausible_overload_call_targets(self,
def has_shape(typ: Type) -> bool:
typ = get_proper_type(typ)
- return (isinstance(typ, TupleType) or isinstance(typ, TypedDictType)
- or (isinstance(typ, Instance) and typ.type.is_named_tuple))
+ return (
+ isinstance(typ, TupleType)
+ or isinstance(typ, TypedDictType)
+ or (isinstance(typ, Instance) and typ.type.is_named_tuple)
+ )
- matches: List[CallableType] = []
- star_matches: List[CallableType] = []
+ matches: list[CallableType] = []
+ star_matches: list[CallableType] = []
args_have_var_arg = False
args_have_kw_arg = False
@@ -1708,32 +2343,34 @@ def has_shape(typ: Type) -> bool:
args_have_kw_arg = True
for typ in overload.items:
- formal_to_actual = map_actuals_to_formals(arg_kinds, arg_names,
- typ.arg_kinds, typ.arg_names,
- lambda i: arg_types[i])
-
- if self.check_argument_count(typ, arg_types, arg_kinds, arg_names,
- formal_to_actual, None, None):
- if args_have_var_arg and typ.is_var_arg:
- star_matches.append(typ)
- elif args_have_kw_arg and typ.is_kw_arg:
- star_matches.append(typ)
- else:
- matches.append(typ)
+ formal_to_actual = map_actuals_to_formals(
+ arg_kinds, arg_names, typ.arg_kinds, typ.arg_names, lambda i: arg_types[i]
+ )
+
+ with self.msg.filter_errors():
+ if self.check_argument_count(
+ typ, arg_types, arg_kinds, arg_names, formal_to_actual, None
+ ):
+ if args_have_var_arg and typ.is_var_arg:
+ star_matches.append(typ)
+ elif args_have_kw_arg and typ.is_kw_arg:
+ star_matches.append(typ)
+ else:
+ matches.append(typ)
return star_matches + matches
- def infer_overload_return_type(self,
- plausible_targets: List[CallableType],
- args: List[Expression],
- arg_types: List[Type],
- arg_kinds: List[ArgKind],
- arg_names: Optional[Sequence[Optional[str]]],
- callable_name: Optional[str],
- object_type: Optional[Type],
- context: Context,
- arg_messages: Optional[MessageBuilder] = None,
- ) -> Optional[Tuple[Type, Type]]:
+ def infer_overload_return_type(
+ self,
+ plausible_targets: list[CallableType],
+ args: list[Expression],
+ arg_types: list[Type],
+ arg_kinds: list[ArgKind],
+ arg_names: Sequence[str | None] | None,
+ callable_name: str | None,
+ object_type: Type | None,
+ context: Context,
+ ) -> tuple[Type, Type] | None:
"""Attempts to find the first matching callable from the given list.
If a match is found, returns a tuple containing the result type and the inferred
@@ -1744,36 +2381,26 @@ def infer_overload_return_type(self,
Assumes all of the given targets have argument counts compatible with the caller.
"""
- arg_messages = self.msg if arg_messages is None else arg_messages
- matches: List[CallableType] = []
- return_types: List[Type] = []
- inferred_types: List[Type] = []
+ matches: list[CallableType] = []
+ return_types: list[Type] = []
+ inferred_types: list[Type] = []
args_contain_any = any(map(has_any_type, arg_types))
+ type_maps: list[dict[Expression, Type]] = []
for typ in plausible_targets:
- overload_messages = self.msg.clean_copy()
- prev_messages = self.msg
assert self.msg is self.chk.msg
- self.msg = overload_messages
- self.chk.msg = overload_messages
- try:
- # Passing `overload_messages` as the `arg_messages` parameter doesn't
- # seem to reliably catch all possible errors.
- # TODO: Figure out why
- ret_type, infer_type = self.check_call(
- callee=typ,
- args=args,
- arg_kinds=arg_kinds,
- arg_names=arg_names,
- context=context,
- arg_messages=overload_messages,
- callable_name=callable_name,
- object_type=object_type)
- finally:
- self.chk.msg = prev_messages
- self.msg = prev_messages
-
- is_match = not overload_messages.is_errors()
+ with self.msg.filter_errors() as w:
+ with self.chk.local_type_map() as m:
+ ret_type, infer_type = self.check_call(
+ callee=typ,
+ args=args,
+ arg_kinds=arg_kinds,
+ arg_names=arg_names,
+ context=context,
+ callable_name=callable_name,
+ object_type=object_type,
+ )
+ is_match = not w.has_new_errors()
if is_match:
# Return early if possible; otherwise record info so we can
# check for ambiguity due to 'Any' below.
@@ -1782,6 +2409,7 @@ def infer_overload_return_type(self,
matches.append(typ)
return_types.append(ret_type)
inferred_types.append(infer_type)
+ type_maps.append(m)
if len(matches) == 0:
# No match was found
@@ -1790,52 +2418,59 @@ def infer_overload_return_type(self,
# An argument of type or containing the type 'Any' caused ambiguity.
# We try returning a precise type if we can. If not, we give up and just return 'Any'.
if all_same_types(return_types):
+ self.chk.store_types(type_maps[0])
return return_types[0], inferred_types[0]
elif all_same_types([erase_type(typ) for typ in return_types]):
+ self.chk.store_types(type_maps[0])
return erase_type(return_types[0]), erase_type(inferred_types[0])
else:
- return self.check_call(callee=AnyType(TypeOfAny.special_form),
- args=args,
- arg_kinds=arg_kinds,
- arg_names=arg_names,
- context=context,
- arg_messages=arg_messages,
- callable_name=callable_name,
- object_type=object_type)
+ return self.check_call(
+ callee=AnyType(TypeOfAny.special_form),
+ args=args,
+ arg_kinds=arg_kinds,
+ arg_names=arg_names,
+ context=context,
+ callable_name=callable_name,
+ object_type=object_type,
+ )
else:
# Success! No ambiguity; return the first match.
+ self.chk.store_types(type_maps[0])
return return_types[0], inferred_types[0]
- def overload_erased_call_targets(self,
- plausible_targets: List[CallableType],
- arg_types: List[Type],
- arg_kinds: List[ArgKind],
- arg_names: Optional[Sequence[Optional[str]]],
- args: List[Expression],
- context: Context) -> List[CallableType]:
+ def overload_erased_call_targets(
+ self,
+ plausible_targets: list[CallableType],
+ arg_types: list[Type],
+ arg_kinds: list[ArgKind],
+ arg_names: Sequence[str | None] | None,
+ args: list[Expression],
+ context: Context,
+ ) -> list[CallableType]:
"""Returns a list of all targets that match the caller after erasing types.
Assumes all of the given targets have argument counts compatible with the caller.
"""
- matches: List[CallableType] = []
+ matches: list[CallableType] = []
for typ in plausible_targets:
- if self.erased_signature_similarity(arg_types, arg_kinds, arg_names, args, typ,
- context):
+ if self.erased_signature_similarity(
+ arg_types, arg_kinds, arg_names, args, typ, context
+ ):
matches.append(typ)
return matches
- def union_overload_result(self,
- plausible_targets: List[CallableType],
- args: List[Expression],
- arg_types: List[Type],
- arg_kinds: List[ArgKind],
- arg_names: Optional[Sequence[Optional[str]]],
- callable_name: Optional[str],
- object_type: Optional[Type],
- context: Context,
- arg_messages: Optional[MessageBuilder] = None,
- level: int = 0
- ) -> Optional[List[Tuple[Type, Type]]]:
+ def union_overload_result(
+ self,
+ plausible_targets: list[CallableType],
+ args: list[Expression],
+ arg_types: list[Type],
+ arg_kinds: list[ArgKind],
+ arg_names: Sequence[str | None] | None,
+ callable_name: str | None,
+ object_type: Type | None,
+ context: Context,
+ level: int = 0,
+ ) -> list[tuple[Type, Type]] | None:
"""Accepts a list of overload signatures and attempts to match calls by destructuring
the first union.
@@ -1856,9 +2491,16 @@ def union_overload_result(self,
else:
# No unions in args, just fall back to normal inference
with self.type_overrides_set(args, arg_types):
- res = self.infer_overload_return_type(plausible_targets, args, arg_types,
- arg_kinds, arg_names, callable_name,
- object_type, context, arg_messages)
+ res = self.infer_overload_return_type(
+ plausible_targets,
+ args,
+ arg_types,
+ arg_kinds,
+ arg_names,
+ callable_name,
+ object_type,
+ context,
+ )
if res is not None:
return [res]
return None
@@ -1866,11 +2508,17 @@ def union_overload_result(self,
# Step 3: Try a direct match before splitting to avoid unnecessary union splits
# and save performance.
with self.type_overrides_set(args, arg_types):
- direct = self.infer_overload_return_type(plausible_targets, args, arg_types,
- arg_kinds, arg_names, callable_name,
- object_type, context, arg_messages)
- if direct is not None and not isinstance(get_proper_type(direct[0]),
- (UnionType, AnyType)):
+ direct = self.infer_overload_return_type(
+ plausible_targets,
+ args,
+ arg_types,
+ arg_kinds,
+ arg_names,
+ callable_name,
+ object_type,
+ context,
+ )
+ if direct is not None and not isinstance(get_proper_type(direct[0]), (UnionType, AnyType)):
# We only return non-unions soon, to avoid greedy match.
return [direct]
@@ -1882,10 +2530,17 @@ def union_overload_result(self,
for item in first_union.relevant_items():
new_arg_types = arg_types.copy()
new_arg_types[idx] = item
- sub_result = self.union_overload_result(plausible_targets, args, new_arg_types,
- arg_kinds, arg_names, callable_name,
- object_type, context, arg_messages,
- level + 1)
+ sub_result = self.union_overload_result(
+ plausible_targets,
+ args,
+ new_arg_types,
+ arg_kinds,
+ arg_names,
+ callable_name,
+ object_type,
+ context,
+ level + 1,
+ )
if sub_result is not None:
res_items.extend(sub_result)
else:
@@ -1893,7 +2548,7 @@ def union_overload_result(self,
return None
# Step 5: If splitting succeeded, then filter out duplicate items before returning.
- seen: Set[Tuple[Type, Type]] = set()
+ seen: set[tuple[Type, Type]] = set()
result = []
for pair in res_items:
if pair not in seen:
@@ -1906,8 +2561,9 @@ def real_union(self, typ: Type) -> bool:
return isinstance(typ, UnionType) and len(typ.relevant_items()) > 1
@contextmanager
- def type_overrides_set(self, exprs: Sequence[Expression],
- overrides: Sequence[Type]) -> Iterator[None]:
+ def type_overrides_set(
+ self, exprs: Sequence[Expression], overrides: Sequence[Type]
+ ) -> Iterator[None]:
"""Set _temporary_ type overrides for given expressions."""
assert len(exprs) == len(overrides)
for expr, typ in zip(exprs, overrides):
@@ -1918,7 +2574,7 @@ def type_overrides_set(self, exprs: Sequence[Expression],
for expr in exprs:
del self.type_overrides[expr]
- def combine_function_signatures(self, types: Sequence[Type]) -> Union[AnyType, CallableType]:
+ def combine_function_signatures(self, types: list[ProperType]) -> AnyType | CallableType:
"""Accepts a list of function signatures and attempts to combine them together into a
new CallableType consisting of the union of all of the given arguments and return types.
@@ -1926,10 +2582,9 @@ def combine_function_signatures(self, types: Sequence[Type]) -> Union[AnyType, C
an ambiguity because of Any in arguments).
"""
assert types, "Trying to merge no callables"
- types = get_proper_types(types)
if not all(isinstance(c, CallableType) for c in types):
return AnyType(TypeOfAny.special_form)
- callables = cast(Sequence[CallableType], types)
+ callables = cast("list[CallableType]", types)
if len(callables) == 1:
return callables[0]
@@ -1946,9 +2601,9 @@ def combine_function_signatures(self, types: Sequence[Type]) -> Union[AnyType, C
# confusing and ought to be re-written anyways.)
callables, variables = merge_typevars_in_callables_by_name(callables)
- new_args: List[List[Type]] = [[] for _ in range(len(callables[0].arg_types))]
+ new_args: list[list[Type]] = [[] for _ in range(len(callables[0].arg_types))]
new_kinds = list(callables[0].arg_kinds)
- new_returns: List[Type] = []
+ new_returns: list[Type] = []
too_complex = False
for target in callables:
@@ -1985,7 +2640,8 @@ def combine_function_signatures(self, types: Sequence[Type]) -> Union[AnyType, C
arg_names=[None, None],
ret_type=union_return,
variables=variables,
- implicit=True)
+ implicit=True,
+ )
final_args = []
for args_list in new_args:
@@ -1997,89 +2653,104 @@ def combine_function_signatures(self, types: Sequence[Type]) -> Union[AnyType, C
arg_kinds=new_kinds,
ret_type=union_return,
variables=variables,
- implicit=True)
-
- def erased_signature_similarity(self,
- arg_types: List[Type],
- arg_kinds: List[ArgKind],
- arg_names: Optional[Sequence[Optional[str]]],
- args: List[Expression],
- callee: CallableType,
- context: Context) -> bool:
+ implicit=True,
+ )
+
+ def erased_signature_similarity(
+ self,
+ arg_types: list[Type],
+ arg_kinds: list[ArgKind],
+ arg_names: Sequence[str | None] | None,
+ args: list[Expression],
+ callee: CallableType,
+ context: Context,
+ ) -> bool:
"""Determine whether arguments could match the signature at runtime, after
erasing types."""
- formal_to_actual = map_actuals_to_formals(arg_kinds,
- arg_names,
- callee.arg_kinds,
- callee.arg_names,
- lambda i: arg_types[i])
-
- if not self.check_argument_count(callee, arg_types, arg_kinds, arg_names,
- formal_to_actual, None, None):
- # Too few or many arguments -> no match.
- return False
+ formal_to_actual = map_actuals_to_formals(
+ arg_kinds, arg_names, callee.arg_kinds, callee.arg_names, lambda i: arg_types[i]
+ )
+
+ with self.msg.filter_errors():
+ if not self.check_argument_count(
+ callee, arg_types, arg_kinds, arg_names, formal_to_actual, None
+ ):
+ # Too few or many arguments -> no match.
+ return False
- def check_arg(caller_type: Type,
- original_ccaller_type: Type,
- caller_kind: ArgKind,
- callee_type: Type,
- n: int,
- m: int,
- callee: CallableType,
- object_type: Optional[Type],
- context: Context,
- outer_context: Context,
- messages: MessageBuilder) -> None:
+ def check_arg(
+ caller_type: Type,
+ original_ccaller_type: Type,
+ caller_kind: ArgKind,
+ callee_type: Type,
+ n: int,
+ m: int,
+ callee: CallableType,
+ object_type: Type | None,
+ context: Context,
+ outer_context: Context,
+ ) -> None:
if not arg_approximate_similarity(caller_type, callee_type):
# No match -- exit early since none of the remaining work can change
# the result.
raise Finished
try:
- self.check_argument_types(arg_types, arg_kinds, args, callee,
- formal_to_actual, context=context, check_arg=check_arg)
+ self.check_argument_types(
+ arg_types,
+ arg_kinds,
+ args,
+ callee,
+ formal_to_actual,
+ context=context,
+ check_arg=check_arg,
+ )
return True
except Finished:
return False
- def apply_generic_arguments(self, callable: CallableType, types: Sequence[Optional[Type]],
- context: Context, skip_unsatisfied: bool = False) -> CallableType:
+ def apply_generic_arguments(
+ self,
+ callable: CallableType,
+ types: Sequence[Type | None],
+ context: Context,
+ skip_unsatisfied: bool = False,
+ ) -> CallableType:
"""Simple wrapper around mypy.applytype.apply_generic_arguments."""
- return applytype.apply_generic_arguments(callable, types,
- self.msg.incompatible_typevar_value, context,
- skip_unsatisfied=skip_unsatisfied)
+ return applytype.apply_generic_arguments(
+ callable,
+ types,
+ self.msg.incompatible_typevar_value,
+ context,
+ skip_unsatisfied=skip_unsatisfied,
+ )
- def check_any_type_call(self, args: List[Expression], callee: Type) -> Tuple[Type, Type]:
+ def check_any_type_call(self, args: list[Expression], callee: Type) -> tuple[Type, Type]:
self.infer_arg_types_in_empty_context(args)
callee = get_proper_type(callee)
if isinstance(callee, AnyType):
- return (AnyType(TypeOfAny.from_another_any, source_any=callee),
- AnyType(TypeOfAny.from_another_any, source_any=callee))
+ return (
+ AnyType(TypeOfAny.from_another_any, source_any=callee),
+ AnyType(TypeOfAny.from_another_any, source_any=callee),
+ )
else:
return AnyType(TypeOfAny.special_form), AnyType(TypeOfAny.special_form)
- def check_union_call(self,
- callee: UnionType,
- args: List[Expression],
- arg_kinds: List[ArgKind],
- arg_names: Optional[Sequence[Optional[str]]],
- context: Context,
- arg_messages: MessageBuilder) -> Tuple[Type, Type]:
+ def check_union_call(
+ self,
+ callee: UnionType,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ arg_names: Sequence[str | None] | None,
+ context: Context,
+ ) -> tuple[Type, Type]:
with self.msg.disable_type_names():
results = [
- self.check_call(
- subtype,
- args,
- arg_kinds,
- context,
- arg_names,
- arg_messages=arg_messages,
- )
+ self.check_call(subtype, args, arg_kinds, context, arg_names)
for subtype in callee.relevant_items()
]
- return (make_simplified_union([res[0] for res in results]),
- callee)
+ return (make_simplified_union([res[0] for res in results]), callee)
def visit_member_expr(self, e: MemberExpr, is_lvalue: bool = False) -> Type:
"""Visit member expression (of form e.id)."""
@@ -2087,38 +2758,60 @@ def visit_member_expr(self, e: MemberExpr, is_lvalue: bool = False) -> Type:
result = self.analyze_ordinary_member_access(e, is_lvalue)
return self.narrow_type_from_binder(e, result)
- def analyze_ordinary_member_access(self, e: MemberExpr,
- is_lvalue: bool) -> Type:
+ def analyze_ordinary_member_access(self, e: MemberExpr, is_lvalue: bool) -> Type:
"""Analyse member expression or member lvalue."""
if e.kind is not None:
# This is a reference to a module attribute.
return self.analyze_ref_expr(e)
else:
# This is a reference to a non-module attribute.
- original_type = self.accept(e.expr)
+ original_type = self.accept(e.expr, is_callee=self.is_callee)
base = e.expr
module_symbol_table = None
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
module_symbol_table = base.node.names
+ if isinstance(base, RefExpr) and isinstance(base.node, Var):
+ is_self = base.node.is_self
+ else:
+ is_self = False
member_type = analyze_member_access(
- e.name, original_type, e, is_lvalue, False, False,
- self.msg, original_type=original_type, chk=self.chk,
+ e.name,
+ original_type,
+ e,
+ is_lvalue,
+ False,
+ False,
+ self.msg,
+ original_type=original_type,
+ chk=self.chk,
in_literal_context=self.is_literal_context(),
- module_symbol_table=module_symbol_table)
+ module_symbol_table=module_symbol_table,
+ is_self=is_self,
+ )
return member_type
- def analyze_external_member_access(self, member: str, base_type: Type,
- context: Context) -> Type:
+ def analyze_external_member_access(
+ self, member: str, base_type: Type, context: Context
+ ) -> Type:
"""Analyse member access that is external, i.e. it cannot
refer to private definitions. Return the result type.
"""
# TODO remove; no private definitions in mypy
- return analyze_member_access(member, base_type, context, False, False, False,
- self.msg, original_type=base_type, chk=self.chk,
- in_literal_context=self.is_literal_context())
+ return analyze_member_access(
+ member,
+ base_type,
+ context,
+ False,
+ False,
+ False,
+ self.msg,
+ original_type=base_type,
+ chk=self.chk,
+ in_literal_context=self.is_literal_context(),
+ )
def is_literal_context(self) -> bool:
return is_literal_type_like(self.type_context[-1])
@@ -2144,91 +2837,76 @@ def infer_literal_expr_type(self, value: LiteralValue, fallback_name: str) -> Ty
if self.is_literal_context():
return LiteralType(value=value, fallback=typ)
else:
- return typ.copy_modified(last_known_value=LiteralType(
- value=value,
- fallback=typ,
- line=typ.line,
- column=typ.column,
- ))
+ return typ.copy_modified(
+ last_known_value=LiteralType(
+ value=value, fallback=typ, line=typ.line, column=typ.column
+ )
+ )
def concat_tuples(self, left: TupleType, right: TupleType) -> TupleType:
"""Concatenate two fixed length tuples."""
- return TupleType(items=left.items + right.items,
- fallback=self.named_type('builtins.tuple'))
+ return TupleType(
+ items=left.items + right.items, fallback=self.named_type("builtins.tuple")
+ )
def visit_int_expr(self, e: IntExpr) -> Type:
"""Type check an integer literal (trivial)."""
- return self.infer_literal_expr_type(e.value, 'builtins.int')
+ return self.infer_literal_expr_type(e.value, "builtins.int")
def visit_str_expr(self, e: StrExpr) -> Type:
"""Type check a string literal (trivial)."""
- return self.infer_literal_expr_type(e.value, 'builtins.str')
+ return self.infer_literal_expr_type(e.value, "builtins.str")
def visit_bytes_expr(self, e: BytesExpr) -> Type:
"""Type check a bytes literal (trivial)."""
- return self.infer_literal_expr_type(e.value, 'builtins.bytes')
-
- def visit_unicode_expr(self, e: UnicodeExpr) -> Type:
- """Type check a unicode literal (trivial)."""
- return self.infer_literal_expr_type(e.value, 'builtins.unicode')
+ return self.infer_literal_expr_type(e.value, "builtins.bytes")
def visit_float_expr(self, e: FloatExpr) -> Type:
"""Type check a float literal (trivial)."""
- return self.named_type('builtins.float')
+ return self.named_type("builtins.float")
def visit_complex_expr(self, e: ComplexExpr) -> Type:
"""Type check a complex literal."""
- return self.named_type('builtins.complex')
+ return self.named_type("builtins.complex")
def visit_ellipsis(self, e: EllipsisExpr) -> Type:
"""Type check '...'."""
- if self.chk.options.python_version[0] >= 3:
- return self.named_type('builtins.ellipsis')
- else:
- # '...' is not valid in normal Python 2 code, but it can
- # be used in stubs. The parser makes sure that we only
- # get this far if we are in a stub, and we can safely
- # return 'object' as ellipsis is special cased elsewhere.
- # The builtins.ellipsis type does not exist in Python 2.
- return self.named_type('builtins.object')
+ return self.named_type("builtins.ellipsis")
def visit_op_expr(self, e: OpExpr) -> Type:
"""Type check a binary operator expression."""
- if e.op == 'and' or e.op == 'or':
+ if e.analyzed:
+ # It's actually a type expression X | Y.
+ return self.accept(e.analyzed)
+ if e.op == "and" or e.op == "or":
return self.check_boolean_op(e, e)
- if e.op == '*' and isinstance(e.left, ListExpr):
+ if e.op == "*" and isinstance(e.left, ListExpr):
# Expressions of form [...] * e get special type inference.
return self.check_list_multiply(e)
- if e.op == '%':
- pyversion = self.chk.options.python_version
- if pyversion[0] == 3:
- if isinstance(e.left, BytesExpr) and pyversion[1] >= 5:
- return self.strfrm_checker.check_str_interpolation(e.left, e.right)
- if isinstance(e.left, StrExpr):
- return self.strfrm_checker.check_str_interpolation(e.left, e.right)
- elif pyversion[0] == 2:
- if isinstance(e.left, (StrExpr, BytesExpr, UnicodeExpr)):
- return self.strfrm_checker.check_str_interpolation(e.left, e.right)
+ if e.op == "%":
+ if isinstance(e.left, BytesExpr) and self.chk.options.python_version >= (3, 5):
+ return self.strfrm_checker.check_str_interpolation(e.left, e.right)
+ if isinstance(e.left, StrExpr):
+ return self.strfrm_checker.check_str_interpolation(e.left, e.right)
left_type = self.accept(e.left)
proper_left_type = get_proper_type(left_type)
- if isinstance(proper_left_type, TupleType) and e.op == '+':
- left_add_method = proper_left_type.partial_fallback.type.get('__add__')
- if left_add_method and left_add_method.fullname == 'builtins.tuple.__add__':
+ if isinstance(proper_left_type, TupleType) and e.op == "+":
+ left_add_method = proper_left_type.partial_fallback.type.get("__add__")
+ if left_add_method and left_add_method.fullname == "builtins.tuple.__add__":
proper_right_type = get_proper_type(self.accept(e.right))
if isinstance(proper_right_type, TupleType):
- right_radd_method = proper_right_type.partial_fallback.type.get('__radd__')
+ right_radd_method = proper_right_type.partial_fallback.type.get("__radd__")
if right_radd_method is None:
return self.concat_tuples(proper_left_type, proper_right_type)
if e.op in operators.op_methods:
- method = self.get_operator_method(e.op)
- result, method_type = self.check_op(method, left_type, e.right, e,
- allow_reverse=True)
+ method = operators.op_methods[e.op]
+ result, method_type = self.check_op(method, left_type, e.right, e, allow_reverse=True)
e.method_type = method_type
return result
else:
- raise RuntimeError('Unknown operator {}'.format(e.op))
+ raise RuntimeError(f"Unknown operator {e.op}")
def visit_comparison_expr(self, e: ComparisonExpr) -> Type:
"""Type check a comparison expression.
@@ -2236,16 +2914,16 @@ def visit_comparison_expr(self, e: ComparisonExpr) -> Type:
Comparison expressions are type checked consecutive-pair-wise
That is, 'a < b > c == d' is check as 'a < b and b > c and c == d'
"""
- result: Optional[Type] = None
- sub_result: Optional[Type] = None
+ result: Type | None = None
+ sub_result: Type | None = None
# Check each consecutive operand pair and their operator
for left, right, operator in zip(e.operands, e.operands[1:], e.operators):
left_type = self.accept(left)
- method_type: Optional[mypy.types.Type] = None
+ method_type: mypy.types.Type | None = None
- if operator == 'in' or operator == 'not in':
+ if operator == "in" or operator == "not in":
# If the right operand has partial type, look it up without triggering
# a "Need type annotation ..." message, as it would be noise.
right_type = self.find_partial_type_ref_fast_path(right)
@@ -2254,15 +2932,15 @@ def visit_comparison_expr(self, e: ComparisonExpr) -> Type:
# Keep track of whether we get type check errors (these won't be reported, they
# are just to verify whether something is valid typing wise).
- local_errors = self.msg.clean_copy()
- _, method_type = self.check_method_call_by_name(
- method='__contains__',
- base_type=right_type,
- args=[left],
- arg_kinds=[ARG_POS],
- context=e,
- local_errors=local_errors,
- )
+ with self.msg.filter_errors(save_filtered_errors=True) as local_errors:
+ _, method_type = self.check_method_call_by_name(
+ method="__contains__",
+ base_type=right_type,
+ args=[left],
+ arg_kinds=[ARG_POS],
+ context=e,
+ )
+
sub_result = self.bool_type()
# Container item type for strict type overlap checks. Note: we need to only
# check for nominal type, because a usual "Unsupported operands for in"
@@ -2272,58 +2950,62 @@ def visit_comparison_expr(self, e: ComparisonExpr) -> Type:
if isinstance(right_type, PartialType):
# We don't really know if this is an error or not, so just shut up.
pass
- elif (local_errors.is_errors() and
+ elif (
+ local_errors.has_new_errors()
+ and
# is_valid_var_arg is True for any Iterable
- self.is_valid_var_arg(right_type)):
+ self.is_valid_var_arg(right_type)
+ ):
_, itertype = self.chk.analyze_iterable_item_type(right)
method_type = CallableType(
[left_type],
[nodes.ARG_POS],
[None],
self.bool_type(),
- self.named_type('builtins.function'))
+ self.named_type("builtins.function"),
+ )
if not is_subtype(left_type, itertype):
- self.msg.unsupported_operand_types('in', left_type, right_type, e)
+ self.msg.unsupported_operand_types("in", left_type, right_type, e)
# Only show dangerous overlap if there are no other errors.
- elif (not local_errors.is_errors() and cont_type and
- self.dangerous_comparison(left_type, cont_type,
- original_container=right_type)):
- self.msg.dangerous_comparison(left_type, cont_type, 'container', e)
+ elif (
+ not local_errors.has_new_errors()
+ and cont_type
+ and self.dangerous_comparison(
+ left_type, cont_type, original_container=right_type, prefer_literal=False
+ )
+ ):
+ self.msg.dangerous_comparison(left_type, cont_type, "container", e)
else:
- self.msg.add_errors(local_errors)
+ self.msg.add_errors(local_errors.filtered_errors())
elif operator in operators.op_methods:
- method = self.get_operator_method(operator)
- err_count = self.msg.errors.total_errors()
- sub_result, method_type = self.check_op(method, left_type, right, e,
- allow_reverse=True)
+ method = operators.op_methods[operator]
+
+ with ErrorWatcher(self.msg.errors) as w:
+ sub_result, method_type = self.check_op(
+ method, left_type, right, e, allow_reverse=True
+ )
+
# Only show dangerous overlap if there are no other errors. See
# testCustomEqCheckStrictEquality for an example.
- if self.msg.errors.total_errors() == err_count and operator in ('==', '!='):
+ if not w.has_new_errors() and operator in ("==", "!="):
right_type = self.accept(right)
- # We suppress the error if there is a custom __eq__() method on either
- # side. User defined (or even standard library) classes can define this
- # to return True for comparisons between non-overlapping types.
- if (not custom_special_method(left_type, '__eq__') and
- not custom_special_method(right_type, '__eq__')):
- # Also flag non-overlapping literals in situations like:
- # x: Literal['a', 'b']
- # if x == 'c':
- # ...
+ if self.dangerous_comparison(left_type, right_type):
+ # Show the most specific literal types possible
left_type = try_getting_literal(left_type)
right_type = try_getting_literal(right_type)
- if self.dangerous_comparison(left_type, right_type):
- self.msg.dangerous_comparison(left_type, right_type, 'equality', e)
+ self.msg.dangerous_comparison(left_type, right_type, "equality", e)
- elif operator == 'is' or operator == 'is not':
+ elif operator == "is" or operator == "is not":
right_type = self.accept(right) # validate the right operand
sub_result = self.bool_type()
- left_type = try_getting_literal(left_type)
- right_type = try_getting_literal(right_type)
if self.dangerous_comparison(left_type, right_type):
- self.msg.dangerous_comparison(left_type, right_type, 'identity', e)
+ # Show the most specific literal types possible
+ left_type = try_getting_literal(left_type)
+ right_type = try_getting_literal(right_type)
+ self.msg.dangerous_comparison(left_type, right_type, "identity", e)
method_type = None
else:
- raise RuntimeError('Unknown comparison operator {}'.format(operator))
+ raise RuntimeError(f"Unknown comparison operator {operator}")
e.method_types.append(method_type)
@@ -2336,7 +3018,7 @@ def visit_comparison_expr(self, e: ComparisonExpr) -> Type:
assert result is not None
return result
- def find_partial_type_ref_fast_path(self, expr: Expression) -> Optional[Type]:
+ def find_partial_type_ref_fast_path(self, expr: Expression) -> Type | None:
"""If expression has a partial generic type, return it without additional checks.
In particular, this does not generate an error about a missing annotation.
@@ -2348,12 +3030,18 @@ def find_partial_type_ref_fast_path(self, expr: Expression) -> Optional[Type]:
if isinstance(expr.node, Var):
result = self.analyze_var_ref(expr.node, expr)
if isinstance(result, PartialType) and result.type is not None:
- self.chk.store_type(expr, self.chk.fixup_partial_type(result))
+ self.chk.store_type(expr, fixup_partial_type(result))
return result
return None
- def dangerous_comparison(self, left: Type, right: Type,
- original_container: Optional[Type] = None) -> bool:
+ def dangerous_comparison(
+ self,
+ left: Type,
+ right: Type,
+ original_container: Type | None = None,
+ *,
+ prefer_literal: bool = True,
+ ) -> bool:
"""Check for dangerous non-overlapping comparisons like 42 == 'no'.
The original_container is the original container type for 'in' checks
@@ -2374,6 +3062,20 @@ def dangerous_comparison(self, left: Type, right: Type,
left, right = get_proper_types((left, right))
+ # We suppress the error if there is a custom __eq__() method on either
+ # side. User defined (or even standard library) classes can define this
+ # to return True for comparisons between non-overlapping types.
+ if custom_special_method(left, "__eq__") or custom_special_method(right, "__eq__"):
+ return False
+
+ if prefer_literal:
+ # Also flag non-overlapping literals in situations like:
+ # x: Literal['a', 'b']
+ # if x == 'c':
+ # ...
+ left = try_getting_literal(left)
+ right = try_getting_literal(right)
+
if self.chk.binder.is_unreachable_warning_suppressed():
# We are inside a function that contains type variables with value restrictions in
# its signature. In this case we just suppress all strict-equality checks to avoid
@@ -2393,104 +3095,108 @@ def dangerous_comparison(self, left: Type, right: Type,
left = remove_optional(left)
right = remove_optional(right)
left, right = get_proper_types((left, right))
- py2 = self.chk.options.python_version < (3, 0)
- if (original_container and has_bytes_component(original_container, py2) and
- has_bytes_component(left, py2)):
+ if (
+ original_container
+ and has_bytes_component(original_container)
+ and has_bytes_component(left)
+ ):
# We need to special case bytes and bytearray, because 97 in b'abc', b'a' in b'abc',
# b'a' in bytearray(b'abc') etc. all return True (and we want to show the error only
# if the check can _never_ be True).
return False
if isinstance(left, Instance) and isinstance(right, Instance):
# Special case some builtin implementations of AbstractSet.
- if (left.type.fullname in OVERLAPPING_TYPES_ALLOWLIST and
- right.type.fullname in OVERLAPPING_TYPES_ALLOWLIST):
- abstract_set = self.chk.lookup_typeinfo('typing.AbstractSet')
+ left_name = left.type.fullname
+ right_name = right.type.fullname
+ if (
+ left_name in OVERLAPPING_TYPES_ALLOWLIST
+ and right_name in OVERLAPPING_TYPES_ALLOWLIST
+ ):
+ abstract_set = self.chk.lookup_typeinfo("typing.AbstractSet")
left = map_instance_to_supertype(left, abstract_set)
right = map_instance_to_supertype(right, abstract_set)
- return not is_overlapping_types(left.args[0], right.args[0])
+ return self.dangerous_comparison(left.args[0], right.args[0])
+ elif left_name in ("builtins.list", "builtins.tuple") and right_name == left_name:
+ return self.dangerous_comparison(left.args[0], right.args[0])
if isinstance(left, LiteralType) and isinstance(right, LiteralType):
if isinstance(left.value, bool) and isinstance(right.value, bool):
# Comparing different booleans is not dangerous.
return False
return not is_overlapping_types(left, right, ignore_promotions=False)
- def get_operator_method(self, op: str) -> str:
- if op == '/' and self.chk.options.python_version[0] == 2:
- return (
- '__truediv__'
- if self.chk.tree.is_future_flag_set('division')
- else '__div__'
- )
- else:
- return operators.op_methods[op]
-
- def check_method_call_by_name(self,
- method: str,
- base_type: Type,
- args: List[Expression],
- arg_kinds: List[ArgKind],
- context: Context,
- local_errors: Optional[MessageBuilder] = None,
- original_type: Optional[Type] = None
- ) -> Tuple[Type, Type]:
+ def check_method_call_by_name(
+ self,
+ method: str,
+ base_type: Type,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ context: Context,
+ original_type: Type | None = None,
+ ) -> tuple[Type, Type]:
"""Type check a call to a named method on an object.
Return tuple (result type, inferred method type). The 'original_type'
is used for error messages.
"""
- local_errors = local_errors or self.msg
original_type = original_type or base_type
# Unions are special-cased to allow plugins to act on each element of the union.
base_type = get_proper_type(base_type)
if isinstance(base_type, UnionType):
- return self.check_union_method_call_by_name(method, base_type,
- args, arg_kinds,
- context, local_errors, original_type)
-
- method_type = analyze_member_access(method, base_type, context, False, False, True,
- local_errors, original_type=original_type,
- chk=self.chk,
- in_literal_context=self.is_literal_context())
- return self.check_method_call(
- method, base_type, method_type, args, arg_kinds, context, local_errors)
-
- def check_union_method_call_by_name(self,
- method: str,
- base_type: UnionType,
- args: List[Expression],
- arg_kinds: List[ArgKind],
- context: Context,
- local_errors: MessageBuilder,
- original_type: Optional[Type] = None
- ) -> Tuple[Type, Type]:
+ return self.check_union_method_call_by_name(
+ method, base_type, args, arg_kinds, context, original_type
+ )
+
+ method_type = analyze_member_access(
+ method,
+ base_type,
+ context,
+ False,
+ False,
+ True,
+ self.msg,
+ original_type=original_type,
+ chk=self.chk,
+ in_literal_context=self.is_literal_context(),
+ )
+ return self.check_method_call(method, base_type, method_type, args, arg_kinds, context)
+
+ def check_union_method_call_by_name(
+ self,
+ method: str,
+ base_type: UnionType,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ context: Context,
+ original_type: Type | None = None,
+ ) -> tuple[Type, Type]:
"""Type check a call to a named method on an object with union type.
This essentially checks the call using check_method_call_by_name() for each
union item and unions the result. We do this to allow plugins to act on
individual union items.
"""
- res: List[Type] = []
- meth_res: List[Type] = []
+ res: list[Type] = []
+ meth_res: list[Type] = []
for typ in base_type.relevant_items():
# Format error messages consistently with
# mypy.checkmember.analyze_union_member_access().
- with local_errors.disable_type_names():
+ with self.msg.disable_type_names():
item, meth_item = self.check_method_call_by_name(
- method, typ, args, arg_kinds,
- context, local_errors, original_type,
+ method, typ, args, arg_kinds, context, original_type
)
res.append(item)
meth_res.append(meth_item)
return make_simplified_union(res), make_simplified_union(meth_res)
- def check_method_call(self,
- method_name: str,
- base_type: Type,
- method_type: Type,
- args: List[Expression],
- arg_kinds: List[ArgKind],
- context: Context,
- local_errors: Optional[MessageBuilder] = None) -> Tuple[Type, Type]:
+ def check_method_call(
+ self,
+ method_name: str,
+ base_type: Type,
+ method_type: Type,
+ args: list[Expression],
+ arg_kinds: list[ArgKind],
+ context: Context,
+ ) -> tuple[Type, Type]:
"""Type check a call to a method with the given name and type on an object.
Return tuple (result type, inferred method type).
@@ -2500,21 +3206,28 @@ def check_method_call(self,
# Try to refine the method signature using plugin hooks before checking the call.
method_type = self.transform_callee_type(
- callable_name, method_type, args, arg_kinds, context, object_type=object_type)
-
- return self.check_call(method_type, args, arg_kinds,
- context, arg_messages=local_errors,
- callable_name=callable_name, object_type=base_type)
-
- def check_op_reversible(self,
- op_name: str,
- left_type: Type,
- left_expr: Expression,
- right_type: Type,
- right_expr: Expression,
- context: Context,
- msg: MessageBuilder) -> Tuple[Type, Type]:
- def lookup_operator(op_name: str, base_type: Type) -> Optional[Type]:
+ callable_name, method_type, args, arg_kinds, context, object_type=object_type
+ )
+
+ return self.check_call(
+ method_type,
+ args,
+ arg_kinds,
+ context,
+ callable_name=callable_name,
+ object_type=base_type,
+ )
+
+ def check_op_reversible(
+ self,
+ op_name: str,
+ left_type: Type,
+ left_expr: Expression,
+ right_type: Type,
+ right_expr: Expression,
+ context: Context,
+ ) -> tuple[Type, Type]:
+ def lookup_operator(op_name: str, base_type: Type) -> Type | None:
"""Looks up the given operator and returns the corresponding type,
if it exists."""
@@ -2525,26 +3238,22 @@ def lookup_operator(op_name: str, base_type: Type) -> Optional[Type]:
if not self.has_member(base_type, op_name):
return None
- local_errors = msg.clean_copy()
-
- member = analyze_member_access(
- name=op_name,
- typ=base_type,
- is_lvalue=False,
- is_super=False,
- is_operator=True,
- original_type=base_type,
- context=context,
- msg=local_errors,
- chk=self.chk,
- in_literal_context=self.is_literal_context()
- )
- if local_errors.is_errors():
- return None
- else:
- return member
+ with self.msg.filter_errors() as w:
+ member = analyze_member_access(
+ name=op_name,
+ typ=base_type,
+ is_lvalue=False,
+ is_super=False,
+ is_operator=True,
+ original_type=base_type,
+ context=context,
+ msg=self.msg,
+ chk=self.chk,
+ in_literal_context=self.is_literal_context(),
+ )
+ return None if w.has_new_errors() else member
- def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
+ def lookup_definer(typ: Instance, attr_name: str) -> str | None:
"""Returns the name of the class that contains the actual definition of attr_name.
So if class A defines foo and class B subclasses A, running
@@ -2577,7 +3286,7 @@ def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
# STEP 1:
# We start by getting the __op__ and __rop__ methods, if they exist.
- rev_op_name = self.get_reverse_op_method(op_name)
+ rev_op_name = operators.reverse_op_methods[op_name]
left_op = lookup_operator(op_name, left_type)
right_op = lookup_operator(rev_op_name, right_type)
@@ -2590,7 +3299,6 @@ def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
# We store the determined order inside the 'variants_raw' variable,
# which records tuples containing the method, base type, and the argument.
- bias_right = is_proper_subtype(right_type, left_type)
if op_name in operators.op_methods_that_shortcut and is_same_type(left_type, right_type):
# When we do "A() + A()", for example, Python will only call the __add__ method,
# never the __radd__ method.
@@ -2598,48 +3306,30 @@ def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
# This is the case even if the __add__ method is completely missing and the __radd__
# method is defined.
- variants_raw = [
- (left_op, left_type, right_expr)
- ]
- elif (is_subtype(right_type, left_type)
- and isinstance(left_type, Instance)
- and isinstance(right_type, Instance)
- and lookup_definer(left_type, op_name) != lookup_definer(right_type, rev_op_name)):
- # When we do "A() + B()" where B is a subclass of B, we'll actually try calling
+ variants_raw = [(left_op, left_type, right_expr)]
+ elif (
+ is_subtype(right_type, left_type)
+ and isinstance(left_type, Instance)
+ and isinstance(right_type, Instance)
+ and left_type.type.alt_promote is not right_type.type
+ and lookup_definer(left_type, op_name) != lookup_definer(right_type, rev_op_name)
+ ):
+ # When we do "A() + B()" where B is a subclass of A, we'll actually try calling
# B's __radd__ method first, but ONLY if B explicitly defines or overrides the
# __radd__ method.
#
# This mechanism lets subclasses "refine" the expected outcome of the operation, even
# if they're located on the RHS.
+ #
+ # As a special case, the alt_promote check makes sure that we don't use the
+ # __radd__ method of int if the LHS is a native int type.
- variants_raw = [
- (right_op, right_type, left_expr),
- (left_op, left_type, right_expr),
- ]
+ variants_raw = [(right_op, right_type, left_expr), (left_op, left_type, right_expr)]
else:
# In all other cases, we do the usual thing and call __add__ first and
# __radd__ second when doing "A() + B()".
- variants_raw = [
- (left_op, left_type, right_expr),
- (right_op, right_type, left_expr),
- ]
-
- # STEP 2b:
- # When running Python 2, we might also try calling the __cmp__ method.
-
- is_python_2 = self.chk.options.python_version[0] == 2
- if is_python_2 and op_name in operators.ops_falling_back_to_cmp:
- cmp_method = operators.comparison_fallback_method
- left_cmp_op = lookup_operator(cmp_method, left_type)
- right_cmp_op = lookup_operator(cmp_method, right_type)
-
- if bias_right:
- variants_raw.append((right_cmp_op, right_type, left_expr))
- variants_raw.append((left_cmp_op, left_type, right_expr))
- else:
- variants_raw.append((left_cmp_op, left_type, right_expr))
- variants_raw.append((right_cmp_op, right_type, left_expr))
+ variants_raw = [(left_op, left_type, right_expr), (right_op, right_type, left_expr)]
# STEP 3:
# We now filter out all non-existent operators. The 'variants' list contains
@@ -2656,11 +3346,10 @@ def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
errors = []
results = []
for method, obj, arg in variants:
- local_errors = msg.clean_copy()
- result = self.check_method_call(
- op_name, obj, method, [arg], [ARG_POS], context, local_errors)
- if local_errors.is_errors():
- errors.append(local_errors)
+ with self.msg.filter_errors(save_filtered_errors=True) as local_errors:
+ result = self.check_method_call(op_name, obj, method, [arg], [ARG_POS], context)
+ if local_errors.has_new_errors():
+ errors.append(local_errors.filtered_errors())
results.append(result)
else:
return result
@@ -2668,8 +3357,9 @@ def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
# We finish invoking above operators and no early return happens. Therefore,
# we check if either the LHS or the RHS is Instance and fallbacks to Any,
# if so, we also return Any
- if ((isinstance(left_type, Instance) and left_type.type.fallback_to_any) or
- (isinstance(right_type, Instance) and right_type.type.fallback_to_any)):
+ if (isinstance(left_type, Instance) and left_type.type.fallback_to_any) or (
+ isinstance(right_type, Instance) and right_type.type.fallback_to_any
+ ):
any_type = AnyType(TypeOfAny.special_form)
return any_type, any_type
@@ -2678,12 +3368,13 @@ def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
# call the __op__ method (even though it's missing).
if not variants:
- local_errors = msg.clean_copy()
- result = self.check_method_call_by_name(
- op_name, left_type, [right_expr], [ARG_POS], context, local_errors)
+ with self.msg.filter_errors(save_filtered_errors=True) as local_errors:
+ result = self.check_method_call_by_name(
+ op_name, left_type, [right_expr], [ARG_POS], context
+ )
- if local_errors.is_errors():
- errors.append(local_errors)
+ if local_errors.has_new_errors():
+ errors.append(local_errors.filtered_errors())
results.append(result)
else:
# In theory, we should never enter this case, but it seems
@@ -2696,7 +3387,7 @@ def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
# TODO: Remove this extra case
return result
- msg.add_errors(errors[0])
+ self.msg.add_errors(errors[0])
if len(results) == 1:
return results[0]
else:
@@ -2704,9 +3395,14 @@ def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
result = error_any, error_any
return result
- def check_op(self, method: str, base_type: Type,
- arg: Expression, context: Context,
- allow_reverse: bool = False) -> Tuple[Type, Type]:
+ def check_op(
+ self,
+ method: str,
+ base_type: Type,
+ arg: Expression,
+ context: Context,
+ allow_reverse: bool = False,
+ ) -> tuple[Type, Type]:
"""Type check a binary operation which maps to a method call.
Return tuple (result type, inferred operator method type).
@@ -2716,31 +3412,31 @@ def check_op(self, method: str, base_type: Type,
left_variants = [base_type]
base_type = get_proper_type(base_type)
if isinstance(base_type, UnionType):
- left_variants = [item for item in
- flatten_nested_unions(base_type.relevant_items(),
- handle_type_alias_type=True)]
+ left_variants = [
+ item for item in flatten_nested_unions(base_type.relevant_items())
+ ]
right_type = self.accept(arg)
# Step 1: We first try leaving the right arguments alone and destructure
# just the left ones. (Mypy can sometimes perform some more precise inference
# if we leave the right operands a union -- see testOperatorWithEmptyListAndSum.)
- msg = self.msg.clean_copy()
all_results = []
all_inferred = []
- for left_possible_type in left_variants:
- result, inferred = self.check_op_reversible(
- op_name=method,
- left_type=left_possible_type,
- left_expr=TempNode(left_possible_type, context=context),
- right_type=right_type,
- right_expr=arg,
- context=context,
- msg=msg)
- all_results.append(result)
- all_inferred.append(inferred)
+ with self.msg.filter_errors() as local_errors:
+ for left_possible_type in left_variants:
+ result, inferred = self.check_op_reversible(
+ op_name=method,
+ left_type=left_possible_type,
+ left_expr=TempNode(left_possible_type, context=context),
+ right_type=right_type,
+ right_expr=arg,
+ context=context,
+ )
+ all_results.append(result)
+ all_inferred.append(inferred)
- if not msg.is_errors():
+ if not local_errors.has_new_errors():
results_final = make_simplified_union(all_results)
inferred_final = make_simplified_union(all_inferred)
return results_final, inferred_final
@@ -2761,45 +3457,47 @@ def check_op(self, method: str, base_type: Type,
if isinstance(right_type, UnionType):
right_variants = [
(item, TempNode(item, context=context))
- for item in flatten_nested_unions(right_type.relevant_items(),
- handle_type_alias_type=True)
+ for item in flatten_nested_unions(right_type.relevant_items())
]
- msg = self.msg.clean_copy()
all_results = []
all_inferred = []
- for left_possible_type in left_variants:
- for right_possible_type, right_expr in right_variants:
- result, inferred = self.check_op_reversible(
- op_name=method,
- left_type=left_possible_type,
- left_expr=TempNode(left_possible_type, context=context),
- right_type=right_possible_type,
- right_expr=right_expr,
- context=context,
- msg=msg)
- all_results.append(result)
- all_inferred.append(inferred)
-
- if msg.is_errors():
- self.msg.add_errors(msg)
+ with self.msg.filter_errors(save_filtered_errors=True) as local_errors:
+ for left_possible_type in left_variants:
+ for right_possible_type, right_expr in right_variants:
+ result, inferred = self.check_op_reversible(
+ op_name=method,
+ left_type=left_possible_type,
+ left_expr=TempNode(left_possible_type, context=context),
+ right_type=right_possible_type,
+ right_expr=right_expr,
+ context=context,
+ )
+ all_results.append(result)
+ all_inferred.append(inferred)
+
+ if local_errors.has_new_errors():
+ self.msg.add_errors(local_errors.filtered_errors())
# Point any notes to the same location as an existing message.
- recent_context = msg.most_recent_context()
+ err = local_errors.filtered_errors()[-1]
+ recent_context = TempNode(NoneType())
+ recent_context.line = err.line
+ recent_context.column = err.column
if len(left_variants) >= 2 and len(right_variants) >= 2:
self.msg.warn_both_operands_are_from_unions(recent_context)
elif len(left_variants) >= 2:
- self.msg.warn_operand_was_from_union(
- "Left", base_type, context=recent_context)
+ self.msg.warn_operand_was_from_union("Left", base_type, context=recent_context)
elif len(right_variants) >= 2:
self.msg.warn_operand_was_from_union(
- "Right", right_type, context=recent_context)
+ "Right", right_type, context=recent_context
+ )
# See the comment in 'check_overload_call' for more details on why
# we call 'combine_function_signature' instead of just unioning the inferred
# callable types.
results_final = make_simplified_union(all_results)
- inferred_final = self.combine_function_signatures(all_inferred)
+ inferred_final = self.combine_function_signatures(get_proper_types(all_inferred))
return results_final, inferred_final
else:
return self.check_method_call_by_name(
@@ -2808,15 +3506,8 @@ def check_op(self, method: str, base_type: Type,
args=[arg],
arg_kinds=[ARG_POS],
context=context,
- local_errors=self.msg,
)
- def get_reverse_op_method(self, method: str) -> str:
- if method == '__div__' and self.chk.options.python_version[0] == 2:
- return '__rdiv__'
- else:
- return operators.reverse_op_methods[method]
-
def check_boolean_op(self, e: OpExpr, context: Context) -> Type:
"""Type check a boolean operation ('and' or 'or')."""
@@ -2832,15 +3523,16 @@ def check_boolean_op(self, e: OpExpr, context: Context) -> Type:
self.accept(e.left, ctx), "builtins.bool"
)
- assert e.op in ('and', 'or') # Checked by visit_op_expr
+ assert e.op in ("and", "or") # Checked by visit_op_expr
if e.right_always:
- left_map, right_map = None, {} # type: mypy.checker.TypeMap, mypy.checker.TypeMap
+ left_map: mypy.checker.TypeMap = None
+ right_map: mypy.checker.TypeMap = {}
elif e.right_unreachable:
left_map, right_map = {}, None
- elif e.op == 'and':
+ elif e.op == "and":
right_map, left_map = self.chk.find_isinstance_check(e.left)
- elif e.op == 'or':
+ elif e.op == "or":
left_map, right_map = self.chk.find_isinstance_check(e.left)
# If left_map is None then we know mypy considers the left expression
@@ -2864,7 +3556,7 @@ def check_boolean_op(self, e: OpExpr, context: Context) -> Type:
# If right_map is None then we know mypy considers the right branch
# to be unreachable and therefore any errors found in the right branch
# should be suppressed.
- with (self.msg.disable_errors() if right_map is None else nullcontext()):
+ with self.msg.filter_errors(filter_errors=right_map is None):
right_type = self.analyze_cond_branch(right_map, e.right, expanded_left_type)
if left_map is None and right_map is None:
@@ -2879,10 +3571,10 @@ def check_boolean_op(self, e: OpExpr, context: Context) -> Type:
assert right_map is not None
return right_type
- if e.op == 'and':
+ if e.op == "and":
restricted_left_type = false_only(expanded_left_type)
result_is_left = not expanded_left_type.can_be_true
- elif e.op == 'or':
+ elif e.op == "or":
restricted_left_type = true_only(expanded_left_type)
result_is_left = not expanded_left_type.can_be_false
@@ -2901,13 +3593,13 @@ def check_list_multiply(self, e: OpExpr) -> Type:
Type inference is special-cased for this common construct.
"""
right_type = self.accept(e.right)
- if is_subtype(right_type, self.named_type('builtins.int')):
+ if is_subtype(right_type, self.named_type("builtins.int")):
# Special case: [...] * . Use the type context of the
# OpExpr, since the multiplication does not affect the type.
left_type = self.accept(e.left, type_context=self.type_context[-1])
else:
left_type = self.accept(e.left)
- result, method_type = self.check_op('__mul__', left_type, e.right, e)
+ result, method_type = self.check_op("__mul__", left_type, e.right, e)
e.method_type = method_type
return result
@@ -2923,7 +3615,7 @@ def visit_unary_expr(self, e: UnaryExpr) -> Type:
"""Type check an unary operation ('not', '-', '+' or '~')."""
operand_type = self.accept(e.expr)
op = e.op
- if op == 'not':
+ if op == "not":
result: Type = self.bool_type()
else:
method = operators.unary_op_methods[op]
@@ -2937,10 +3629,14 @@ def visit_index_expr(self, e: IndexExpr) -> Type:
It may also represent type application.
"""
result = self.visit_index_expr_helper(e)
- result = get_proper_type(self.narrow_type_from_binder(e, result))
- if (self.is_literal_context() and isinstance(result, Instance)
- and result.last_known_value is not None):
- result = result.last_known_value
+ result = self.narrow_type_from_binder(e, result)
+ p_result = get_proper_type(result)
+ if (
+ self.is_literal_context()
+ and isinstance(p_result, Instance)
+ and p_result.last_known_value is not None
+ ):
+ result = p_result.last_known_value
return result
def visit_index_expr_helper(self, e: IndexExpr) -> Type:
@@ -2950,8 +3646,9 @@ def visit_index_expr_helper(self, e: IndexExpr) -> Type:
left_type = self.accept(e.base)
return self.visit_index_with_type(left_type, e)
- def visit_index_with_type(self, left_type: Type, e: IndexExpr,
- original_type: Optional[ProperType] = None) -> Type:
+ def visit_index_with_type(
+ self, left_type: Type, e: IndexExpr, original_type: ProperType | None = None
+ ) -> Type:
"""Analyze type of an index expression for a given type of base expression.
The 'original_type' is used for error messages (currently used for union types).
@@ -2965,10 +3662,13 @@ def visit_index_with_type(self, left_type: Type, e: IndexExpr,
if isinstance(left_type, UnionType):
original_type = original_type or left_type
# Don't combine literal types, since we may need them for type narrowing.
- return make_simplified_union([self.visit_index_with_type(typ, e,
- original_type)
- for typ in left_type.relevant_items()],
- contract_literals=False)
+ return make_simplified_union(
+ [
+ self.visit_index_with_type(typ, e, original_type)
+ for typ in left_type.relevant_items()
+ ],
+ contract_literals=False,
+ )
elif isinstance(left_type, TupleType) and self.chk.in_checked_function():
# Special case for tuples. They return a more specific type when
# indexed by an integer literal.
@@ -2991,23 +3691,27 @@ def visit_index_with_type(self, left_type: Type, e: IndexExpr,
return self.nonliteral_tuple_index_helper(left_type, index)
elif isinstance(left_type, TypedDictType):
return self.visit_typeddict_index_expr(left_type, e.index)
- elif (isinstance(left_type, CallableType)
- and left_type.is_type_obj() and left_type.type_object().is_enum):
+ elif (
+ isinstance(left_type, CallableType)
+ and left_type.is_type_obj()
+ and left_type.type_object().is_enum
+ ):
return self.visit_enum_index_expr(left_type.type_object(), e.index, e)
- elif (isinstance(left_type, TypeVarType)
- and not self.has_member(left_type.upper_bound, "__getitem__")):
+ elif isinstance(left_type, TypeVarType) and not self.has_member(
+ left_type.upper_bound, "__getitem__"
+ ):
return self.visit_index_with_type(left_type.upper_bound, e, original_type)
else:
result, method_type = self.check_method_call_by_name(
- '__getitem__', left_type, [e.index], [ARG_POS], e,
- original_type=original_type)
+ "__getitem__", left_type, [e.index], [ARG_POS], e, original_type=original_type
+ )
e.method_type = method_type
return result
def visit_tuple_slice_helper(self, left_type: TupleType, slic: SliceExpr) -> Type:
- begin: Sequence[Optional[int]] = [None]
- end: Sequence[Optional[int]] = [None]
- stride: Sequence[Optional[int]] = [None]
+ begin: Sequence[int | None] = [None]
+ end: Sequence[int | None] = [None]
+ stride: Sequence[int | None] = [None]
if slic.begin_index:
begin_raw = self.try_getting_int_literals(slic.begin_index)
@@ -3027,12 +3731,12 @@ def visit_tuple_slice_helper(self, left_type: TupleType, slic: SliceExpr) -> Typ
return self.nonliteral_tuple_index_helper(left_type, slic)
stride = stride_raw
- items: List[Type] = []
+ items: list[Type] = []
for b, e, s in itertools.product(begin, end, stride):
items.append(left_type.slice(b, e, s))
return make_simplified_union(items)
- def try_getting_int_literals(self, index: Expression) -> Optional[List[int]]:
+ def try_getting_int_literals(self, index: Expression) -> list[int] | None:
"""If the given expression or type corresponds to an int literal
or a union of int literals, returns a list of the underlying ints.
Otherwise, returns None.
@@ -3047,7 +3751,7 @@ def try_getting_int_literals(self, index: Expression) -> Optional[List[int]]:
if isinstance(index, IntExpr):
return [index.value]
elif isinstance(index, UnaryExpr):
- if index.op == '-':
+ if index.op == "-":
operand = index.expr
if isinstance(operand, IntExpr):
return [-1 * operand.value]
@@ -3067,31 +3771,20 @@ def try_getting_int_literals(self, index: Expression) -> Optional[List[int]]:
return None
def nonliteral_tuple_index_helper(self, left_type: TupleType, index: Expression) -> Type:
- index_type = self.accept(index)
- expected_type = UnionType.make_union([self.named_type('builtins.int'),
- self.named_type('builtins.slice')])
- if not self.chk.check_subtype(index_type, expected_type, index,
- message_registry.INVALID_TUPLE_INDEX_TYPE,
- 'actual type', 'expected type'):
- return AnyType(TypeOfAny.from_error)
- else:
- union = make_simplified_union(left_type.items)
- if isinstance(index, SliceExpr):
- return self.chk.named_generic_type('builtins.tuple', [union])
- else:
- return union
-
- def visit_typeddict_index_expr(self, td_type: TypedDictType,
- index: Expression,
- local_errors: Optional[MessageBuilder] = None
- ) -> Type:
- local_errors = local_errors or self.msg
- if isinstance(index, (StrExpr, UnicodeExpr)):
+ self.check_method_call_by_name("__getitem__", left_type, [index], [ARG_POS], context=index)
+ # We could return the return type from above, but unions are often better than the join
+ union = make_simplified_union(left_type.items)
+ if isinstance(index, SliceExpr):
+ return self.chk.named_generic_type("builtins.tuple", [union])
+ return union
+
+ def visit_typeddict_index_expr(self, td_type: TypedDictType, index: Expression) -> Type:
+ if isinstance(index, StrExpr):
key_names = [index.value]
else:
typ = get_proper_type(self.accept(index))
if isinstance(typ, UnionType):
- key_types: List[Type] = list(typ.items)
+ key_types: list[Type] = list(typ.items)
else:
key_types = [typ]
@@ -3100,19 +3793,21 @@ def visit_typeddict_index_expr(self, td_type: TypedDictType,
if isinstance(key_type, Instance) and key_type.last_known_value is not None:
key_type = key_type.last_known_value
- if (isinstance(key_type, LiteralType)
- and isinstance(key_type.value, str)
- and key_type.fallback.type.fullname != 'builtins.bytes'):
+ if (
+ isinstance(key_type, LiteralType)
+ and isinstance(key_type.value, str)
+ and key_type.fallback.type.fullname != "builtins.bytes"
+ ):
key_names.append(key_type.value)
else:
- local_errors.typeddict_key_must_be_string_literal(td_type, index)
+ self.msg.typeddict_key_must_be_string_literal(td_type, index)
return AnyType(TypeOfAny.from_error)
value_types = []
for key_name in key_names:
value_type = td_type.items.get(key_name)
if value_type is None:
- local_errors.typeddict_key_not_found(td_type, key_name, index)
+ self.msg.typeddict_key_not_found(td_type, key_name, index)
return AnyType(TypeOfAny.from_error)
else:
value_types.append(value_type)
@@ -3122,39 +3817,68 @@ def visit_enum_index_expr(
self, enum_type: TypeInfo, index: Expression, context: Context
) -> Type:
string_type: Type = self.named_type("builtins.str")
- if self.chk.options.python_version[0] < 3:
- string_type = UnionType.make_union([string_type,
- self.named_type('builtins.unicode')])
- self.chk.check_subtype(self.accept(index), string_type, context,
- "Enum index should be a string", "actual index type")
+ self.chk.check_subtype(
+ self.accept(index),
+ string_type,
+ context,
+ "Enum index should be a string",
+ "actual index type",
+ )
return Instance(enum_type, [])
def visit_cast_expr(self, expr: CastExpr) -> Type:
"""Type check a cast expression."""
- source_type = self.accept(expr.expr, type_context=AnyType(TypeOfAny.special_form),
- allow_none_return=True, always_allow_any=True)
+ source_type = self.accept(
+ expr.expr,
+ type_context=AnyType(TypeOfAny.special_form),
+ allow_none_return=True,
+ always_allow_any=True,
+ )
target_type = expr.type
options = self.chk.options
- if (options.warn_redundant_casts and not isinstance(get_proper_type(target_type), AnyType)
- and is_same_type(source_type, target_type)):
+ if (
+ options.warn_redundant_casts
+ and not isinstance(get_proper_type(target_type), AnyType)
+ and source_type == target_type
+ ):
self.msg.redundant_cast(target_type, expr)
if options.disallow_any_unimported and has_any_from_unimported_type(target_type):
self.msg.unimported_type_becomes_any("Target type of cast", target_type, expr)
- check_for_explicit_any(target_type, self.chk.options, self.chk.is_typeshed_stub, self.msg,
- context=expr)
+ check_for_explicit_any(
+ target_type, self.chk.options, self.chk.is_typeshed_stub, self.msg, context=expr
+ )
return target_type
+ def visit_assert_type_expr(self, expr: AssertTypeExpr) -> Type:
+ source_type = self.accept(
+ expr.expr,
+ type_context=self.type_context[-1],
+ allow_none_return=True,
+ always_allow_any=True,
+ )
+ target_type = expr.type
+ if not is_same_type(source_type, target_type):
+ if not self.chk.in_checked_function():
+ self.msg.note(
+ '"assert_type" expects everything to be "Any" in unchecked functions',
+ expr.expr,
+ )
+ self.msg.assert_type_fail(source_type, target_type, expr)
+ return source_type
+
def visit_reveal_expr(self, expr: RevealExpr) -> Type:
"""Type check a reveal_type expression."""
if expr.kind == REVEAL_TYPE:
assert expr.expr is not None
- revealed_type = self.accept(expr.expr, type_context=self.type_context[-1],
- allow_none_return=True)
+ revealed_type = self.accept(
+ expr.expr, type_context=self.type_context[-1], allow_none_return=True
+ )
if not self.chk.current_node_deferred:
self.msg.reveal_type(revealed_type, expr.expr)
if not self.chk.in_checked_function():
- self.msg.note("'reveal_type' always outputs 'Any' in unchecked functions",
- expr.expr)
+ self.msg.note(
+ "'reveal_type' always outputs 'Any' in unchecked functions", expr.expr
+ )
return revealed_type
else:
# REVEAL_LOCALS
@@ -3162,9 +3886,11 @@ def visit_reveal_expr(self, expr: RevealExpr) -> Type:
# the RevealExpr contains a local_nodes attribute,
# calculated at semantic analysis time. Use it to pull out the
# corresponding subset of variables in self.chk.type_map
- names_to_types = {
- var_node.name: var_node.type for var_node in expr.local_nodes
- } if expr.local_nodes is not None else {}
+ names_to_types = (
+ {var_node.name: var_node.type for var_node in expr.local_nodes}
+ if expr.local_nodes is not None
+ else {}
+ )
self.msg.reveal_locals(names_to_types, expr)
return NoneType()
@@ -3174,19 +3900,23 @@ def visit_type_application(self, tapp: TypeApplication) -> Type:
There are two different options here, depending on whether expr refers
to a type alias or directly to a generic class. In the first case we need
- to use a dedicated function typeanal.expand_type_aliases. This
- is due to the fact that currently type aliases machinery uses
- unbound type variables, while normal generics use bound ones;
- see TypeAlias docstring for more details.
+ to use a dedicated function typeanal.expand_type_alias(). This
+ is due to some differences in how type arguments are applied and checked.
"""
if isinstance(tapp.expr, RefExpr) and isinstance(tapp.expr.node, TypeAlias):
# Subscription of a (generic) alias in runtime context, expand the alias.
- item = expand_type_alias(tapp.expr.node, tapp.types, self.chk.fail,
- tapp.expr.node.no_args, tapp)
+ item = expand_type_alias(
+ tapp.expr.node, tapp.types, self.chk.fail, tapp.expr.node.no_args, tapp
+ )
item = get_proper_type(item)
if isinstance(item, Instance):
tp = type_object_type(item.type, self.named_type)
return self.apply_type_arguments_to_callable(tp, item.args, tapp)
+ elif isinstance(item, TupleType) and item.partial_fallback.type.is_named_tuple:
+ tp = type_object_type(item.partial_fallback.type, self.named_type)
+ return self.apply_type_arguments_to_callable(tp, item.partial_fallback.args, tapp)
+ elif isinstance(item, TypedDictType):
+ return self.typeddict_callable_from_context(item)
else:
self.chk.fail(message_registry.ONLY_CLASS_APPLICATION, tapp)
return AnyType(TypeOfAny.from_error)
@@ -3213,13 +3943,11 @@ def visit_type_alias_expr(self, alias: TypeAliasExpr) -> Type:
both `reveal_type` instances will reveal the same type `def (...) -> builtins.list[Any]`.
Note that type variables are implicitly substituted with `Any`.
"""
- return self.alias_type_in_runtime_context(alias.node, alias.no_args,
- alias, alias_definition=True)
+ return self.alias_type_in_runtime_context(alias.node, ctx=alias, alias_definition=True)
- def alias_type_in_runtime_context(self, alias: TypeAlias,
- no_args: bool, ctx: Context,
- *,
- alias_definition: bool = False) -> Type:
+ def alias_type_in_runtime_context(
+ self, alias: TypeAlias, *, ctx: Context, alias_definition: bool = False
+ ) -> Type:
"""Get type of a type alias (could be generic) in a runtime expression.
Note that this function can be called only if the alias appears _not_
@@ -3233,32 +3961,42 @@ class LongName(Generic[T]): ...
x = A()
y = cast(A, ...)
"""
- if isinstance(alias.target, Instance) and alias.target.invalid: # type: ignore
+ if isinstance(alias.target, Instance) and alias.target.invalid: # type: ignore[misc]
# An invalid alias, error already has been reported
return AnyType(TypeOfAny.from_error)
# If this is a generic alias, we set all variables to `Any`.
# For example:
# A = List[Tuple[T, T]]
# x = A() <- same as List[Tuple[Any, Any]], see PEP 484.
- item = get_proper_type(set_any_tvars(alias, ctx.line, ctx.column))
+ disallow_any = self.chk.options.disallow_any_generics and self.is_callee
+ item = get_proper_type(
+ set_any_tvars(
+ alias, ctx.line, ctx.column, disallow_any=disallow_any, fail=self.msg.fail
+ )
+ )
if isinstance(item, Instance):
# Normally we get a callable type (or overloaded) with .is_type_obj() true
# representing the class's constructor
tp = type_object_type(item.type, self.named_type)
- if no_args:
+ if alias.no_args:
return tp
return self.apply_type_arguments_to_callable(tp, item.args, ctx)
- elif (isinstance(item, TupleType) and
- # Tuple[str, int]() fails at runtime, only named tuples and subclasses work.
- tuple_fallback(item).type.fullname != 'builtins.tuple'):
+ elif (
+ isinstance(item, TupleType)
+ and
+ # Tuple[str, int]() fails at runtime, only named tuples and subclasses work.
+ tuple_fallback(item).type.fullname != "builtins.tuple"
+ ):
return type_object_type(tuple_fallback(item).type, self.named_type)
+ elif isinstance(item, TypedDictType):
+ return self.typeddict_callable_from_context(item)
elif isinstance(item, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=item)
else:
if alias_definition:
return AnyType(TypeOfAny.special_form)
- # This type is invalid in most runtime contexts, give it an 'object' type.
- return self.named_type('builtins.object')
+ # The _SpecialForm type can be used in some runtime contexts (e.g. it may have __or__).
+ return self.named_type("typing._SpecialForm")
def apply_type_arguments_to_callable(
self, tp: Type, args: Sequence[Type], ctx: Context
@@ -3274,30 +4012,30 @@ def apply_type_arguments_to_callable(
if isinstance(tp, CallableType):
if len(tp.variables) != len(args):
- self.msg.incompatible_type_application(len(tp.variables),
- len(args), ctx)
+ if tp.is_type_obj() and tp.type_object().fullname == "builtins.tuple":
+ # TODO: Specialize the callable for the type arguments
+ return tp
+ self.msg.incompatible_type_application(len(tp.variables), len(args), ctx)
return AnyType(TypeOfAny.from_error)
return self.apply_generic_arguments(tp, args, ctx)
if isinstance(tp, Overloaded):
for it in tp.items:
if len(it.variables) != len(args):
- self.msg.incompatible_type_application(len(it.variables),
- len(args), ctx)
+ self.msg.incompatible_type_application(len(it.variables), len(args), ctx)
return AnyType(TypeOfAny.from_error)
- return Overloaded([self.apply_generic_arguments(it, args, ctx)
- for it in tp.items])
+ return Overloaded([self.apply_generic_arguments(it, args, ctx) for it in tp.items])
return AnyType(TypeOfAny.special_form)
def visit_list_expr(self, e: ListExpr) -> Type:
"""Type check a list expression [...]."""
- return self.check_lst_expr(e.items, 'builtins.list', '', e)
+ return self.check_lst_expr(e, "builtins.list", "")
def visit_set_expr(self, e: SetExpr) -> Type:
- return self.check_lst_expr(e.items, 'builtins.set', '