diff --git a/.bumpversion.cfg b/.bumpversion.cfg index e2aa0e98..e8560a6a 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 3.3.0a6 +current_version = 3.3.0a7 commit = False tag = False diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 00000000..fce1037f --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,35 @@ +name: Performance + +on: + push: + branches: + - "main" + pull_request: + workflow_dispatch: + +jobs: + benchmarks: + name: ๐Ÿ“ˆ Benchmarks + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + id: setup-python + with: + python-version: "3.12" + architecture: x64 + + - name: Install with poetry + run: | + pipx install poetry + poetry env use 3.12 + poetry install --with test + + - name: Run benchmarks with CodSpeed + uses: CodSpeedHQ/action@v3 + with: + token: ${{ secrets.CODSPEED_TOKEN }} + run: poetry run pytest tests --benchmark-enable --codspeed diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 74f14604..703a56aa 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -4,6 +4,7 @@ on: [push, pull_request] jobs: lint: + name: ๐Ÿงน Lint runs-on: ubuntu-latest steps: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 561b3028..8bd8c296 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -7,6 +7,7 @@ on: jobs: build: + name: ๐Ÿ—๏ธ Build runs-on: ubuntu-latest steps: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e99059b8..581528cc 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,17 +4,42 @@ on: [push, pull_request] jobs: tests: + name: ๐Ÿงช Tests runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', 'pypy3.9', 'pypy3.10'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', 'pypy3.9', 'pypy3.10'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install "tox>=4.24,<5" "tox-gh-actions>=3.2,<4" + + - name: Run unit tests with tox + run: tox + + tests-old: + name: ๐Ÿงช Tests (older Python versions) + runs-on: ubuntu-22.04 + + strategy: + matrix: + python-version: ['3.7', '3.8'] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} diff --git a/README.md b/README.md index 7a0a1e7a..aa36c84d 100644 --- a/README.md +++ b/README.md @@ -6,19 +6,20 @@ a query language for APIs created by Facebook. [![PyPI version](https://badge.fury.io/py/graphql-core.svg)](https://badge.fury.io/py/graphql-core) [![Documentation Status](https://readthedocs.org/projects/graphql-core-3/badge/)](https://graphql-core-3.readthedocs.io) -![Test Status](https://github.com/graphql-python/graphql-core/actions/workflows/test.yml/badge.svg) -![Lint Status](https://github.com/graphql-python/graphql-core/actions/workflows/lint.yml/badge.svg) -[![Code Style](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black) +[![Test Status](https://github.com/graphql-python/graphql-core/actions/workflows/test.yml/badge.svg)](https://github.com/graphql-python/graphql-core/actions/workflows/test.yml) +[![Lint Status](https://github.com/graphql-python/graphql-core/actions/workflows/lint.yml/badge.svg)](https://github.com/graphql-python/graphql-core/actions/workflows/lint.yml) +[![CodSpeed](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/graphql-python/graphql-core) +[![Code style](https://img.shields.io/badge/code%20style-ruff-000000.svg)](https://github.com/astral-sh/ruff) -An extensive test suite with over 2300 unit tests and 100% coverage comprises a -replication of the complete test suite of GraphQL.js, making sure this port is -reliable and compatible with GraphQL.js. +An extensive test suite with over 2500 unit tests and 100% coverage replicates the +complete test suite of GraphQL.js, ensuring that this port is reliable and compatible +with GraphQL.js. -The current stable version 3.2.3 of GraphQL-core is up-to-date with GraphQL.js -version 16.6.0 and supports Python version 3.7 and newer. +The current stable version 3.2.6 of GraphQL-core is up-to-date with GraphQL.js +version 16.8.2 and supports Python versions 3.6 to 3.13. -You can also try out the latest alpha version 3.3.0a6 of GraphQL-core -which is up-to-date with GraphQL.js version 17.0.0a2. +You can also try out the latest alpha version 3.3.0a7 of GraphQL-core, +which is up-to-date with GraphQL.js version 17.0.0a3. Please note that this new minor version of GraphQL-core does not support Python 3.6 anymore. @@ -26,13 +27,12 @@ Note that for various reasons, GraphQL-core does not use SemVer like GraphQL.js. Changes in the major version of GraphQL.js are reflected in the minor version of GraphQL-core instead. This means there can be breaking changes in the API when the minor version changes, and only patch releases are fully backward compatible. -Therefore, we recommend something like `=~ 3.2.0` as version specifier +Therefore, we recommend using something like `~= 3.2.0` as the version specifier when including GraphQL-core as a dependency. - ## Documentation -A more detailed documentation for GraphQL-core 3 can be found at +More detailed documentation for GraphQL-core 3 can be found at [graphql-core-3.readthedocs.io](https://graphql-core-3.readthedocs.io/). The documentation for GraphQL.js can be found at [graphql.org/graphql-js/](https://graphql.org/graphql-js/). @@ -47,10 +47,10 @@ examples. A general overview of GraphQL is available in the [README](https://github.com/graphql/graphql-spec/blob/main/README.md) for the -[Specification for GraphQL](https://github.com/graphql/graphql-spec). That overview -describes a simple set of GraphQL examples that exist as [tests](tests) in this -repository. A good way to get started with this repository is to walk through that -README and the corresponding tests in parallel. +[Specification for GraphQL](https://github.com/graphql/graphql-spec). This overview +includes a simple set of GraphQL examples that are also available as [tests](tests) +in this repository. A good way to get started with this repository is to walk through +that README and the corresponding tests in parallel. ## Installation @@ -174,17 +174,17 @@ asyncio.run(main()) ## Goals and restrictions -GraphQL-core tries to reproduce the code of the reference implementation GraphQL.js -in Python as closely as possible and to stay up-to-date with the latest development of -GraphQL.js. +GraphQL-core aims to reproduce the code of the reference implementation GraphQL.js +in Python as closely as possible while staying up-to-date with the latest development +of GraphQL.js. -GraphQL-core 3 (formerly known as GraphQL-core-next) has been created as a modern +GraphQL-core 3 (formerly known as GraphQL-core-next) was created as a modern alternative to [GraphQL-core 2](https://github.com/graphql-python/graphql-core-legacy), -a prior work by Syrus Akbary, based on an older version of GraphQL.js and also -targeting older Python versions. Some parts of GraphQL-core 3 have been inspired by -GraphQL-core 2 or directly taken over with only slight modifications, but most of the -code has been re-implemented from scratch, replicating the latest code in GraphQL.js -very closely and adding type hints for Python. +a prior work by Syrus Akbary based on an older version of GraphQL.js that still +supported legacy Python versions. While some parts of GraphQL-core 3 were inspired by +GraphQL-core 2 or directly taken over with slight modifications, most of the code has +been re-implemented from scratch. This re-implementation closely replicates the latest +code in GraphQL.js and adds type hints for Python. Design goals for the GraphQL-core 3 library were: @@ -208,6 +208,10 @@ Some restrictions (mostly in line with the design goals): * supports asynchronous operations only via async.io (does not support the additional executors in GraphQL-core) +Note that meanwhile we are using the amazing [ruff](https://docs.astral.sh/ruff/) tool +to both format and check the code of GraphQL-core 3, +in addition to using [mypy](https://mypy-lang.org/) as type checker. + ## Integration with other libraries and roadmap @@ -217,19 +221,19 @@ Some restrictions (mostly in line with the design goals): also been created by Syrus Akbary, who meanwhile has handed over the maintenance and future development to members of the GraphQL-Python community. - The current version 2 of Graphene is using Graphql-core 2 as core library for much of - the heavy lifting. Note that Graphene 2 is not compatible with GraphQL-core 3. - The new version 3 of Graphene will use GraphQL-core 3 instead of GraphQL-core 2. + Graphene 3 is now using Graphql-core 3 as core library for much of the heavy lifting. * [Ariadne](https://github.com/mirumee/ariadne) is a Python library for implementing GraphQL servers using schema-first approach created by Mirumee Software. - Ariadne is already using GraphQL-core 3 as its GraphQL implementation. + Ariadne is also using GraphQL-core 3 as its GraphQL implementation. * [Strawberry](https://github.com/strawberry-graphql/strawberry), created by Patrick Arminio, is a new GraphQL library for Python 3, inspired by dataclasses, that is also using GraphQL-core 3 as underpinning. +* [Typed GraphQL](https://github.com/willemt/typed-graphql), thin layer over GraphQL-core that uses native Python types for creating GraphQL schemas. + ## Changelog @@ -240,6 +244,7 @@ Changes are tracked as ## Credits and history The GraphQL-core 3 library + * has been created and is maintained by Christoph Zwerschke * uses ideas and code from GraphQL-core 2, a prior work by Syrus Akbary * is a Python port of GraphQL.js which has been developed by Lee Byron and others diff --git a/docs/conf.py b/docs/conf.py index bd53efa0..f70b6d03 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -50,7 +50,7 @@ # General information about the project. project = "GraphQL-core 3" -copyright = "2024, Christoph Zwerschke" +copyright = "2025, Christoph Zwerschke" author = "Christoph Zwerschke" # The version info for the project you're documenting, acts as replacement for @@ -60,7 +60,7 @@ # The short X.Y version. # version = '3.3' # The full version, including alpha/beta/rc tags. -version = release = "3.3.0a6" +version = release = "3.3.0a7" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -142,17 +142,22 @@ """ GNT GT KT T VT TContext -enum.Enum +Enum traceback types.TracebackType TypeMap AwaitableOrValue +DeferredFragmentRecord +DeferUsage EnterLeaveVisitor ExperimentalIncrementalExecutionResults FieldGroup +FormattedIncrementalResult +FormattedPendingResult FormattedSourceLocation GraphQLAbstractType GraphQLCompositeType +GraphQLEnumValueMap GraphQLErrorExtensions GraphQLFieldResolver GraphQLInputType @@ -161,18 +166,38 @@ GraphQLTypeResolver GroupedFieldSet IncrementalDataRecord +IncrementalResult +InitialResultRecord Middleware +PendingResult +StreamItemsRecord +StreamRecord +SubsequentDataRecord asyncio.events.AbstractEventLoop -graphql.execution.collect_fields.FieldsAndPatches -graphql.execution.map_async_iterable.map_async_iterable -graphql.execution.Middleware -graphql.execution.execute.ExperimentalIncrementalExecutionResults +collections.abc.MutableMapping +collections.abc.MutableSet +enum.Enum +graphql.execution.collect_fields.DeferUsage +graphql.execution.collect_fields.CollectFieldsResult +graphql.execution.collect_fields.FieldGroup graphql.execution.execute.StreamArguments +graphql.execution.execute.StreamUsage +graphql.execution.map_async_iterable.map_async_iterable +graphql.execution.incremental_publisher.CompletedResult +graphql.execution.incremental_publisher.DeferredFragmentRecord +graphql.execution.incremental_publisher.DeferredGroupedFieldSetRecord +graphql.execution.incremental_publisher.FormattedCompletedResult +graphql.execution.incremental_publisher.FormattedPendingResult graphql.execution.incremental_publisher.IncrementalPublisher +graphql.execution.incremental_publisher.InitialResultRecord +graphql.execution.incremental_publisher.PendingResult graphql.execution.incremental_publisher.StreamItemsRecord -graphql.execution.incremental_publisher.DeferredFragmentRecord +graphql.execution.incremental_publisher.StreamRecord +graphql.execution.Middleware graphql.language.lexer.EscapeSequence graphql.language.visitor.EnterLeaveVisitor +graphql.pyutils.ref_map.K +graphql.pyutils.ref_map.V graphql.type.definition.GT_co graphql.type.definition.GNT_co graphql.type.definition.TContext diff --git a/docs/modules/pyutils.rst b/docs/modules/pyutils.rst index cd178d65..e33b5d1f 100644 --- a/docs/modules/pyutils.rst +++ b/docs/modules/pyutils.rst @@ -30,3 +30,7 @@ PyUtils .. autoclass:: SimplePubSub .. autoclass:: SimplePubSubIterator .. autodata:: Undefined +.. autoclass:: RefMap + :no-inherited-members: +.. autoclass:: RefSet + :no-inherited-members: diff --git a/docs/requirements.txt b/docs/requirements.txt index f52741c8..9652132e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,2 @@ -sphinx>=7.3.7,<8 -sphinx_rtd_theme>=2.0.0,<3 +sphinx>=7,<8 +sphinx_rtd_theme>=2,<3 diff --git a/poetry.lock b/poetry.lock index 1d4f8e60..6af5b224 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "alabaster" @@ -30,20 +30,20 @@ dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] [[package]] name = "babel" -version = "2.16.0" +version = "2.17.0" description = "Internationalization utilities" optional = false python-versions = ">=3.8" files = [ - {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, - {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, + {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, + {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, ] [package.dependencies] pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} [package.extras] -dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] +dev = ["backports.zoneinfo", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata"] [[package]] name = "bump2version" @@ -58,26 +58,181 @@ files = [ [[package]] name = "cachetools" -version = "5.4.0" +version = "5.5.2" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" files = [ - {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, - {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, + {file = "cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a"}, + {file = "cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4"}, ] [[package]] name = "certifi" -version = "2024.7.4" +version = "2025.4.26" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, + {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, + {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, +] + +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] +[package.dependencies] +pycparser = "*" + [[package]] name = "chardet" version = "5.2.0" @@ -91,101 +246,103 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +python-versions = ">=3.7" +files = [ + {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, + {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, + {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, ] [[package]] @@ -361,15 +518,93 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1 [package.extras] toml = ["tomli"] +[[package]] +name = "coverage" +version = "7.8.0" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "coverage-7.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2931f66991175369859b5fd58529cd4b73582461877ecfd859b6549869287ffe"}, + {file = "coverage-7.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:52a523153c568d2c0ef8826f6cc23031dc86cffb8c6aeab92c4ff776e7951b28"}, + {file = "coverage-7.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c8a5c139aae4c35cbd7cadca1df02ea8cf28a911534fc1b0456acb0b14234f3"}, + {file = "coverage-7.8.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a26c0c795c3e0b63ec7da6efded5f0bc856d7c0b24b2ac84b4d1d7bc578d676"}, + {file = "coverage-7.8.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821f7bcbaa84318287115d54becb1915eece6918136c6f91045bb84e2f88739d"}, + {file = "coverage-7.8.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a321c61477ff8ee705b8a5fed370b5710c56b3a52d17b983d9215861e37b642a"}, + {file = "coverage-7.8.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ed2144b8a78f9d94d9515963ed273d620e07846acd5d4b0a642d4849e8d91a0c"}, + {file = "coverage-7.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:042e7841a26498fff7a37d6fda770d17519982f5b7d8bf5278d140b67b61095f"}, + {file = "coverage-7.8.0-cp310-cp310-win32.whl", hash = "sha256:f9983d01d7705b2d1f7a95e10bbe4091fabc03a46881a256c2787637b087003f"}, + {file = "coverage-7.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:5a570cd9bd20b85d1a0d7b009aaf6c110b52b5755c17be6962f8ccd65d1dbd23"}, + {file = "coverage-7.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e7ac22a0bb2c7c49f441f7a6d46c9c80d96e56f5a8bc6972529ed43c8b694e27"}, + {file = "coverage-7.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bf13d564d310c156d1c8e53877baf2993fb3073b2fc9f69790ca6a732eb4bfea"}, + {file = "coverage-7.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5761c70c017c1b0d21b0815a920ffb94a670c8d5d409d9b38857874c21f70d7"}, + {file = "coverage-7.8.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5ff52d790c7e1628241ffbcaeb33e07d14b007b6eb00a19320c7b8a7024c040"}, + {file = "coverage-7.8.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d39fc4817fd67b3915256af5dda75fd4ee10621a3d484524487e33416c6f3543"}, + {file = "coverage-7.8.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b44674870709017e4b4036e3d0d6c17f06a0e6d4436422e0ad29b882c40697d2"}, + {file = "coverage-7.8.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8f99eb72bf27cbb167b636eb1726f590c00e1ad375002230607a844d9e9a2318"}, + {file = "coverage-7.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b571bf5341ba8c6bc02e0baeaf3b061ab993bf372d982ae509807e7f112554e9"}, + {file = "coverage-7.8.0-cp311-cp311-win32.whl", hash = "sha256:e75a2ad7b647fd8046d58c3132d7eaf31b12d8a53c0e4b21fa9c4d23d6ee6d3c"}, + {file = "coverage-7.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:3043ba1c88b2139126fc72cb48574b90e2e0546d4c78b5299317f61b7f718b78"}, + {file = "coverage-7.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bbb5cc845a0292e0c520656d19d7ce40e18d0e19b22cb3e0409135a575bf79fc"}, + {file = "coverage-7.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4dfd9a93db9e78666d178d4f08a5408aa3f2474ad4d0e0378ed5f2ef71640cb6"}, + {file = "coverage-7.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f017a61399f13aa6d1039f75cd467be388d157cd81f1a119b9d9a68ba6f2830d"}, + {file = "coverage-7.8.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0915742f4c82208ebf47a2b154a5334155ed9ef9fe6190674b8a46c2fb89cb05"}, + {file = "coverage-7.8.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a40fcf208e021eb14b0fac6bdb045c0e0cab53105f93ba0d03fd934c956143a"}, + {file = "coverage-7.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a1f406a8e0995d654b2ad87c62caf6befa767885301f3b8f6f73e6f3c31ec3a6"}, + {file = "coverage-7.8.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:77af0f6447a582fdc7de5e06fa3757a3ef87769fbb0fdbdeba78c23049140a47"}, + {file = "coverage-7.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f2d32f95922927186c6dbc8bc60df0d186b6edb828d299ab10898ef3f40052fe"}, + {file = "coverage-7.8.0-cp312-cp312-win32.whl", hash = "sha256:769773614e676f9d8e8a0980dd7740f09a6ea386d0f383db6821df07d0f08545"}, + {file = "coverage-7.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:e5d2b9be5b0693cf21eb4ce0ec8d211efb43966f6657807f6859aab3814f946b"}, + {file = "coverage-7.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5ac46d0c2dd5820ce93943a501ac5f6548ea81594777ca585bf002aa8854cacd"}, + {file = "coverage-7.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:771eb7587a0563ca5bb6f622b9ed7f9d07bd08900f7589b4febff05f469bea00"}, + {file = "coverage-7.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42421e04069fb2cbcbca5a696c4050b84a43b05392679d4068acbe65449b5c64"}, + {file = "coverage-7.8.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:554fec1199d93ab30adaa751db68acec2b41c5602ac944bb19187cb9a41a8067"}, + {file = "coverage-7.8.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aaeb00761f985007b38cf463b1d160a14a22c34eb3f6a39d9ad6fc27cb73008"}, + {file = "coverage-7.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:581a40c7b94921fffd6457ffe532259813fc68eb2bdda60fa8cc343414ce3733"}, + {file = "coverage-7.8.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f319bae0321bc838e205bf9e5bc28f0a3165f30c203b610f17ab5552cff90323"}, + {file = "coverage-7.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04bfec25a8ef1c5f41f5e7e5c842f6b615599ca8ba8391ec33a9290d9d2db3a3"}, + {file = "coverage-7.8.0-cp313-cp313-win32.whl", hash = "sha256:dd19608788b50eed889e13a5d71d832edc34fc9dfce606f66e8f9f917eef910d"}, + {file = "coverage-7.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:a9abbccd778d98e9c7e85038e35e91e67f5b520776781d9a1e2ee9d400869487"}, + {file = "coverage-7.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:18c5ae6d061ad5b3e7eef4363fb27a0576012a7447af48be6c75b88494c6cf25"}, + {file = "coverage-7.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:95aa6ae391a22bbbce1b77ddac846c98c5473de0372ba5c463480043a07bff42"}, + {file = "coverage-7.8.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e013b07ba1c748dacc2a80e69a46286ff145935f260eb8c72df7185bf048f502"}, + {file = "coverage-7.8.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d766a4f0e5aa1ba056ec3496243150698dc0481902e2b8559314368717be82b1"}, + {file = "coverage-7.8.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad80e6b4a0c3cb6f10f29ae4c60e991f424e6b14219d46f1e7d442b938ee68a4"}, + {file = "coverage-7.8.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b87eb6fc9e1bb8f98892a2458781348fa37e6925f35bb6ceb9d4afd54ba36c73"}, + {file = "coverage-7.8.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d1ba00ae33be84066cfbe7361d4e04dec78445b2b88bdb734d0d1cbab916025a"}, + {file = "coverage-7.8.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f3c38e4e5ccbdc9198aecc766cedbb134b2d89bf64533973678dfcf07effd883"}, + {file = "coverage-7.8.0-cp313-cp313t-win32.whl", hash = "sha256:379fe315e206b14e21db5240f89dc0774bdd3e25c3c58c2c733c99eca96f1ada"}, + {file = "coverage-7.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2e4b6b87bb0c846a9315e3ab4be2d52fac905100565f4b92f02c445c8799e257"}, + {file = "coverage-7.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa260de59dfb143af06dcf30c2be0b200bed2a73737a8a59248fcb9fa601ef0f"}, + {file = "coverage-7.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96121edfa4c2dfdda409877ea8608dd01de816a4dc4a0523356067b305e4e17a"}, + {file = "coverage-7.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8af63b9afa1031c0ef05b217faa598f3069148eeee6bb24b79da9012423b82"}, + {file = "coverage-7.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89b1f4af0d4afe495cd4787a68e00f30f1d15939f550e869de90a86efa7e0814"}, + {file = "coverage-7.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94ec0be97723ae72d63d3aa41961a0b9a6f5a53ff599813c324548d18e3b9e8c"}, + {file = "coverage-7.8.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a1d96e780bdb2d0cbb297325711701f7c0b6f89199a57f2049e90064c29f6bd"}, + {file = "coverage-7.8.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f1d8a2a57b47142b10374902777e798784abf400a004b14f1b0b9eaf1e528ba4"}, + {file = "coverage-7.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cf60dd2696b457b710dd40bf17ad269d5f5457b96442f7f85722bdb16fa6c899"}, + {file = "coverage-7.8.0-cp39-cp39-win32.whl", hash = "sha256:be945402e03de47ba1872cd5236395e0f4ad635526185a930735f66710e1bd3f"}, + {file = "coverage-7.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:90e7fbc6216ecaffa5a880cdc9c77b7418c1dcb166166b78dbc630d07f278cc3"}, + {file = "coverage-7.8.0-pp39.pp310.pp311-none-any.whl", hash = "sha256:b8194fb8e50d556d5849753de991d390c5a1edeeba50f68e3a9253fbd8bf8ccd"}, + {file = "coverage-7.8.0-py3-none-any.whl", hash = "sha256:dbf364b4c5e7bae9250528167dfe40219b62e2d573c854d74be213e1e52069f7"}, + {file = "coverage-7.8.0.tar.gz", hash = "sha256:7a3d62b3b03b4b6fd41a085f3574874cf946cb4604d2b4d3e8dca8cd570ca501"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + [[package]] name = "distlib" -version = "0.3.8" +version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" files = [ - {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, - {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, ] [[package]] @@ -425,31 +660,34 @@ testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "p [[package]] name = "filelock" -version = "3.15.4" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, - {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] -typing = ["typing-extensions (>=4.8)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "idna" -version = "3.7" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "imagesize" version = "1.4.1" @@ -483,22 +721,26 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs [[package]] name = "importlib-metadata" -version = "8.2.0" +version = "8.5.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-8.2.0-py3-none-any.whl", hash = "sha256:11901fa0c2f97919b288679932bb64febaeacf289d18ac84dd68cb2e74213369"}, - {file = "importlib_metadata-8.2.0.tar.gz", hash = "sha256:72e8d4399996132204f9a16dcc751af254a48f8d1b20b9ff0f98d4a8f901e73d"}, + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, ] [package.dependencies] -zipp = ">=0.5" +zipp = ">=3.20" [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] [[package]] name = "iniconfig" @@ -511,15 +753,26 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + [[package]] name = "jinja2" -version = "3.1.4" +version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, ] [package.dependencies] @@ -528,6 +781,30 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "markupsafe" version = "2.1.5" @@ -597,6 +874,17 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "mypy" version = "1.4.1" @@ -646,47 +934,112 @@ reports = ["lxml"] [[package]] name = "mypy" -version = "1.11.1" +version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a32fc80b63de4b5b3e65f4be82b4cfa362a46702672aa6a0f443b4689af7008c"}, - {file = "mypy-1.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1952f5ea8a5a959b05ed5f16452fddadbaae48b5d39235ab4c3fc444d5fd411"}, - {file = "mypy-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1e30dc3bfa4e157e53c1d17a0dad20f89dc433393e7702b813c10e200843b03"}, - {file = "mypy-1.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2c63350af88f43a66d3dfeeeb8d77af34a4f07d760b9eb3a8697f0386c7590b4"}, - {file = "mypy-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:a831671bad47186603872a3abc19634f3011d7f83b083762c942442d51c58d58"}, - {file = "mypy-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7b6343d338390bb946d449677726edf60102a1c96079b4f002dedff375953fc5"}, - {file = "mypy-1.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4fe9f4e5e521b458d8feb52547f4bade7ef8c93238dfb5bbc790d9ff2d770ca"}, - {file = "mypy-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:886c9dbecc87b9516eff294541bf7f3655722bf22bb898ee06985cd7269898de"}, - {file = "mypy-1.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fca4a60e1dd9fd0193ae0067eaeeb962f2d79e0d9f0f66223a0682f26ffcc809"}, - {file = "mypy-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:0bd53faf56de9643336aeea1c925012837432b5faf1701ccca7fde70166ccf72"}, - {file = "mypy-1.11.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f39918a50f74dc5969807dcfaecafa804fa7f90c9d60506835036cc1bc891dc8"}, - {file = "mypy-1.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bc71d1fb27a428139dd78621953effe0d208aed9857cb08d002280b0422003a"}, - {file = "mypy-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b868d3bcff720dd7217c383474008ddabaf048fad8d78ed948bb4b624870a417"}, - {file = "mypy-1.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a707ec1527ffcdd1c784d0924bf5cb15cd7f22683b919668a04d2b9c34549d2e"}, - {file = "mypy-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:64f4a90e3ea07f590c5bcf9029035cf0efeae5ba8be511a8caada1a4893f5525"}, - {file = "mypy-1.11.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:749fd3213916f1751fff995fccf20c6195cae941dc968f3aaadf9bb4e430e5a2"}, - {file = "mypy-1.11.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b639dce63a0b19085213ec5fdd8cffd1d81988f47a2dec7100e93564f3e8fb3b"}, - {file = "mypy-1.11.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c956b49c5d865394d62941b109728c5c596a415e9c5b2be663dd26a1ff07bc0"}, - {file = "mypy-1.11.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45df906e8b6804ef4b666af29a87ad9f5921aad091c79cc38e12198e220beabd"}, - {file = "mypy-1.11.1-cp38-cp38-win_amd64.whl", hash = "sha256:d44be7551689d9d47b7abc27c71257adfdb53f03880841a5db15ddb22dc63edb"}, - {file = "mypy-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2684d3f693073ab89d76da8e3921883019ea8a3ec20fa5d8ecca6a2db4c54bbe"}, - {file = "mypy-1.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79c07eb282cb457473add5052b63925e5cc97dfab9812ee65a7c7ab5e3cb551c"}, - {file = "mypy-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11965c2f571ded6239977b14deebd3f4c3abd9a92398712d6da3a772974fad69"}, - {file = "mypy-1.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a2b43895a0f8154df6519706d9bca8280cda52d3d9d1514b2d9c3e26792a0b74"}, - {file = "mypy-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:1a81cf05975fd61aec5ae16501a091cfb9f605dc3e3c878c0da32f250b74760b"}, - {file = "mypy-1.11.1-py3-none-any.whl", hash = "sha256:0624bdb940255d2dd24e829d99a13cfeb72e4e9031f9492148f410ed30bcab54"}, - {file = "mypy-1.11.1.tar.gz", hash = "sha256:f404a0b069709f18bbdb702eb3dcfe51910602995de00bd39cea3050b5772d08"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, + {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, + {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, + {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, + {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, + {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, + {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, + {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, + {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, + {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, + {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, + {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, + {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, + {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, + {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" +mypy_extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing_extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy" +version = "1.15.0" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, + {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, + {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, + {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, + {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, + {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, + {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, + {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, + {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, + {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, + {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, + {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, + {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, + {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, +] + +[package.dependencies] +mypy_extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.6.0" +typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -702,6 +1055,17 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] +[[package]] +name = "mypy-extensions" +version = "1.1.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, +] + [[package]] name = "packaging" version = "24.0" @@ -715,13 +1079,13 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, ] [[package]] @@ -744,19 +1108,19 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-co [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] [[package]] name = "pluggy" @@ -813,6 +1177,28 @@ files = [ {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, ] +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + [[package]] name = "pygments" version = "2.17.2" @@ -830,13 +1216,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pygments" -version = "2.18.0" +version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" files = [ - {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, - {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, + {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, + {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, ] [package.extras] @@ -844,13 +1230,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pyproject-api" -version = "1.7.1" +version = "1.8.0" description = "API to interact with the python pyproject.toml based projects" optional = false python-versions = ">=3.8" files = [ - {file = "pyproject_api-1.7.1-py3-none-any.whl", hash = "sha256:2dc1654062c2b27733d8fd4cdda672b22fe8741ef1dde8e3a998a9547b071eeb"}, - {file = "pyproject_api-1.7.1.tar.gz", hash = "sha256:7ebc6cd10710f89f4cf2a2731710a98abce37ebff19427116ff2174c9236a827"}, + {file = "pyproject_api-1.8.0-py3-none-any.whl", hash = "sha256:3d7d347a047afe796fd5d1885b1e391ba29be7169bd2f102fcd378f04273d228"}, + {file = "pyproject_api-1.8.0.tar.gz", hash = "sha256:77b8049f2feb5d33eefcc21b57f1e279636277a8ac8ad6b5871037b243778496"}, ] [package.dependencies] @@ -858,8 +1244,8 @@ packaging = ">=24.1" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} [package.extras] -docs = ["furo (>=2024.5.6)", "sphinx-autodoc-typehints (>=2.2.1)"] -testing = ["covdefaults (>=2.3)", "pytest (>=8.2.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "setuptools (>=70.1)"] +docs = ["furo (>=2024.8.6)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "pytest (>=8.3.3)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "setuptools (>=75.1)"] [[package]] name = "pytest" @@ -886,13 +1272,13 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no [[package]] name = "pytest" -version = "8.3.2" +version = "8.3.5" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, + {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, + {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, ] [package.dependencies] @@ -927,22 +1313,40 @@ testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy [[package]] name = "pytest-asyncio" -version = "0.23.8" +version = "0.24.0" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, - {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, + {file = "pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b"}, + {file = "pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276"}, ] [package.dependencies] -pytest = ">=7.0.0,<9" +pytest = ">=8.2,<9" [package.extras] docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] +[[package]] +name = "pytest-asyncio" +version = "0.25.3" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3"}, + {file = "pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + [[package]] name = "pytest-benchmark" version = "4.0.0" @@ -963,6 +1367,79 @@ aspect = ["aspectlib"] elasticsearch = ["elasticsearch"] histogram = ["pygal", "pygaljs"] +[[package]] +name = "pytest-benchmark" +version = "5.1.0" +description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." +optional = false +python-versions = ">=3.9" +files = [ + {file = "pytest-benchmark-5.1.0.tar.gz", hash = "sha256:9ea661cdc292e8231f7cd4c10b0319e56a2118e2c09d9f50e1b3d150d2aca105"}, + {file = "pytest_benchmark-5.1.0-py3-none-any.whl", hash = "sha256:922de2dfa3033c227c96da942d1878191afa135a29485fb942e85dff1c592c89"}, +] + +[package.dependencies] +py-cpuinfo = "*" +pytest = ">=8.1" + +[package.extras] +aspect = ["aspectlib"] +elasticsearch = ["elasticsearch"] +histogram = ["pygal", "pygaljs", "setuptools"] + +[[package]] +name = "pytest-codspeed" +version = "2.2.1" +description = "Pytest plugin to create CodSpeed benchmarks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest_codspeed-2.2.1-py3-none-any.whl", hash = "sha256:aad08033015f3e6c8c14c8bf0eca475921a9b088e92c98b626bf8af8f516471e"}, + {file = "pytest_codspeed-2.2.1.tar.gz", hash = "sha256:0adc24baf01c64a6ca0a0b83b3cd704351708997e09ec086b7776c32227d4e0a"}, +] + +[package.dependencies] +cffi = ">=1.15.1" +filelock = ">=3.12.2" +pytest = ">=3.8" + +[package.extras] +compat = ["pytest-benchmark (>=4.0.0,<4.1.0)", "pytest-xdist (>=2.0.0,<2.1.0)"] +lint = ["mypy (>=1.3.0,<1.4.0)", "ruff (>=0.3.3,<0.4.0)"] +test = ["pytest (>=7.0,<8.0)", "pytest-cov (>=4.0.0,<4.1.0)"] + +[[package]] +name = "pytest-codspeed" +version = "3.2.0" +description = "Pytest plugin to create CodSpeed benchmarks" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pytest_codspeed-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c5165774424c7ab8db7e7acdb539763a0e5657996effefdf0664d7fd95158d34"}, + {file = "pytest_codspeed-3.2.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9bd55f92d772592c04a55209950c50880413ae46876e66bd349ef157075ca26c"}, + {file = "pytest_codspeed-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf6f56067538f4892baa8d7ab5ef4e45bb59033be1ef18759a2c7fc55b32035"}, + {file = "pytest_codspeed-3.2.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:39a687b05c3d145642061b45ea78e47e12f13ce510104d1a2cda00eee0e36f58"}, + {file = "pytest_codspeed-3.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:46a1afaaa1ac4c2ca5b0700d31ac46d80a27612961d031067d73c6ccbd8d3c2b"}, + {file = "pytest_codspeed-3.2.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c48ce3af3dfa78413ed3d69d1924043aa1519048dbff46edccf8f35a25dab3c2"}, + {file = "pytest_codspeed-3.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:66692506d33453df48b36a84703448cb8b22953eea51f03fbb2eb758dc2bdc4f"}, + {file = "pytest_codspeed-3.2.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:479774f80d0bdfafa16112700df4dbd31bf2a6757fac74795fd79c0a7b3c389b"}, + {file = "pytest_codspeed-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:109f9f4dd1088019c3b3f887d003b7d65f98a7736ca1d457884f5aa293e8e81c"}, + {file = "pytest_codspeed-3.2.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e2f69a03b52c9bb041aec1b8ee54b7b6c37a6d0a948786effa4c71157765b6da"}, + {file = "pytest_codspeed-3.2.0-py3-none-any.whl", hash = "sha256:54b5c2e986d6a28e7b0af11d610ea57bd5531cec8326abe486f1b55b09d91c39"}, + {file = "pytest_codspeed-3.2.0.tar.gz", hash = "sha256:f9d1b1a3b2c69cdc0490a1e8b1ced44bffbd0e8e21d81a7160cfdd923f6e8155"}, +] + +[package.dependencies] +cffi = ">=1.17.1" +importlib-metadata = {version = ">=8.5.0", markers = "python_version < \"3.10\""} +pytest = ">=3.8" +rich = ">=13.8.1" + +[package.extras] +compat = ["pytest-benchmark (>=5.0.0,<5.1.0)", "pytest-xdist (>=3.6.1,<3.7.0)"] +lint = ["mypy (>=1.11.2,<1.12.0)", "ruff (>=0.6.5,<0.7.0)"] +test = ["pytest (>=7.0,<8.0)", "pytest-cov (>=4.0.0,<4.1.0)"] + [[package]] name = "pytest-cov" version = "4.1.0" @@ -999,6 +1476,24 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] +[[package]] +name = "pytest-cov" +version = "6.1.1" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +files = [ + {file = "pytest_cov-6.1.1-py3-none-any.whl", hash = "sha256:bddf29ed2d0ab6f4df17b4c55b0a657287db8684af9c42ea546b21b1041b3dde"}, + {file = "pytest_cov-6.1.1.tar.gz", hash = "sha256:46935f7aaefba760e716c2ebfbe1c216240b9592966e7da99ea8292d4d3e2a0a"}, +] + +[package.dependencies] +coverage = {version = ">=7.5", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + [[package]] name = "pytest-describe" version = "2.2.0" @@ -1029,13 +1524,13 @@ pytest = ">=7.0.0" [[package]] name = "pytz" -version = "2024.1" +version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, - {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, + {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, + {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, ] [[package]] @@ -1080,42 +1575,61 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rich" +version = "14.0.0" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0"}, + {file = "rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + [[package]] name = "ruff" -version = "0.5.7" +version = "0.11.8" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"}, - {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"}, - {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"}, - {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"}, - {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"}, - {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"}, - {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"}, + {file = "ruff-0.11.8-py3-none-linux_armv6l.whl", hash = "sha256:896a37516c594805e34020c4a7546c8f8a234b679a7716a3f08197f38913e1a3"}, + {file = "ruff-0.11.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ab86d22d3d721a40dd3ecbb5e86ab03b2e053bc93c700dc68d1c3346b36ce835"}, + {file = "ruff-0.11.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:258f3585057508d317610e8a412788cf726efeefa2fec4dba4001d9e6f90d46c"}, + {file = "ruff-0.11.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:727d01702f7c30baed3fc3a34901a640001a2828c793525043c29f7614994a8c"}, + {file = "ruff-0.11.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3dca977cc4fc8f66e89900fa415ffe4dbc2e969da9d7a54bfca81a128c5ac219"}, + {file = "ruff-0.11.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c657fa987d60b104d2be8b052d66da0a2a88f9bd1d66b2254333e84ea2720c7f"}, + {file = "ruff-0.11.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f2e74b021d0de5eceb8bd32919f6ff8a9b40ee62ed97becd44993ae5b9949474"}, + {file = "ruff-0.11.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9b5ef39820abc0f2c62111f7045009e46b275f5b99d5e59dda113c39b7f4f38"}, + {file = "ruff-0.11.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1dba3135ca503727aa4648152c0fa67c3b1385d3dc81c75cd8a229c4b2a1458"}, + {file = "ruff-0.11.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f024d32e62faad0f76b2d6afd141b8c171515e4fb91ce9fd6464335c81244e5"}, + {file = "ruff-0.11.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d365618d3ad747432e1ae50d61775b78c055fee5936d77fb4d92c6f559741948"}, + {file = "ruff-0.11.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4d9aaa91035bdf612c8ee7266153bcf16005c7c7e2f5878406911c92a31633cb"}, + {file = "ruff-0.11.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:0eba551324733efc76116d9f3a0d52946bc2751f0cd30661564117d6fd60897c"}, + {file = "ruff-0.11.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:161eb4cff5cfefdb6c9b8b3671d09f7def2f960cee33481dd898caf2bcd02304"}, + {file = "ruff-0.11.8-py3-none-win32.whl", hash = "sha256:5b18caa297a786465cc511d7f8be19226acf9c0a1127e06e736cd4e1878c3ea2"}, + {file = "ruff-0.11.8-py3-none-win_amd64.whl", hash = "sha256:6e70d11043bef637c5617297bdedec9632af15d53ac1e1ba29c448da9341b0c4"}, + {file = "ruff-0.11.8-py3-none-win_arm64.whl", hash = "sha256:304432e4c4a792e3da85b7699feb3426a0908ab98bf29df22a31b0cdd098fac2"}, + {file = "ruff-0.11.8.tar.gz", hash = "sha256:6d742d10626f9004b781f4558154bb226620a7242080e11caeffab1a40e99df8"}, ] [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] @@ -1362,6 +1876,47 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "tomli" +version = "2.2.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, +] + [[package]] name = "tox" version = "3.28.0" @@ -1390,30 +1945,30 @@ testing = ["flaky (>=3.4.0)", "freezegun (>=0.3.11)", "pathlib2 (>=2.3.3)", "psu [[package]] name = "tox" -version = "4.17.1" +version = "4.25.0" description = "tox is a generic virtualenv management and test command line tool" optional = false python-versions = ">=3.8" files = [ - {file = "tox-4.17.1-py3-none-any.whl", hash = "sha256:2974597c0353577126ab014f52d1a399fb761049e165ff34427f84e8cfe6c990"}, - {file = "tox-4.17.1.tar.gz", hash = "sha256:2c41565a571e34480bd401d668a4899806169a4633e972ac296c54406d2ded8a"}, + {file = "tox-4.25.0-py3-none-any.whl", hash = "sha256:4dfdc7ba2cc6fdc6688dde1b21e7b46ff6c41795fb54586c91a3533317b5255c"}, + {file = "tox-4.25.0.tar.gz", hash = "sha256:dd67f030317b80722cf52b246ff42aafd3ed27ddf331c415612d084304cf5e52"}, ] [package.dependencies] -cachetools = ">=5.4" +cachetools = ">=5.5.1" chardet = ">=5.2" colorama = ">=0.4.6" -filelock = ">=3.15.4" -packaging = ">=24.1" -platformdirs = ">=4.2.2" +filelock = ">=3.16.1" +packaging = ">=24.2" +platformdirs = ">=4.3.6" pluggy = ">=1.5" -pyproject-api = ">=1.7.1" -tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} -virtualenv = ">=20.26.3" +pyproject-api = ">=1.8" +tomli = {version = ">=2.2.1", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.12.2", markers = "python_version < \"3.11\""} +virtualenv = ">=20.29.1" [package.extras] -docs = ["furo (>=2024.7.18)", "sphinx (>=7.4.7)", "sphinx-argparse-cli (>=1.16)", "sphinx-autodoc-typehints (>=2.2.3)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.11)"] -testing = ["build[virtualenv] (>=1.2.1)", "covdefaults (>=2.3)", "detect-test-pollution (>=1.2)", "devpi-process (>=1)", "diff-cover (>=9.1.1)", "distlib (>=0.3.8)", "flaky (>=3.8.1)", "hatch-vcs (>=0.4)", "hatchling (>=1.25)", "psutil (>=6)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-xdist (>=3.6.1)", "re-assert (>=1.1)", "setuptools (>=70.3)", "time-machine (>=2.14.2)", "wheel (>=0.43)"] +test = ["devpi-process (>=1.0.2)", "pytest (>=8.3.4)", "pytest-mock (>=3.14)"] [[package]] name = "typed-ast" @@ -1478,13 +2033,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.12.2" +version = "4.13.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, + {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, + {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, ] [[package]] @@ -1506,13 +2061,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "urllib3" -version = "2.2.2" +version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [package.extras] @@ -1523,13 +2078,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.26.3" +version = "20.26.6" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, - {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, + {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"}, + {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"}, ] [package.dependencies] @@ -1542,6 +2097,26 @@ platformdirs = ">=3.9.1,<5" docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +[[package]] +name = "virtualenv" +version = "20.30.0" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.8" +files = [ + {file = "virtualenv-20.30.0-py3-none-any.whl", hash = "sha256:e34302959180fca3af42d1800df014b35019490b119eba981af27f2fa486e5d6"}, + {file = "virtualenv-20.30.0.tar.gz", hash = "sha256:800863162bcaa5450a6e4d721049730e7f2dae07720e0902b0e4040bd6f9ada8"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + [[package]] name = "zipp" version = "3.15.0" @@ -1559,20 +2134,24 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more [[package]] name = "zipp" -version = "3.20.0" +version = "3.20.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.20.0-py3-none-any.whl", hash = "sha256:58da6168be89f0be59beb194da1250516fdaa062ccebd30127ac65d30045e10d"}, - {file = "zipp-3.20.0.tar.gz", hash = "sha256:0145e43d89664cfe1a2e533adc75adafed82fe2da404b4bbb6b026c0157bdb31"}, + {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, + {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, ] [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = "^3.7" -content-hash = "de9ad44d919a23237212508ca6da20b929c8c6cc8aa0da01406ef2f731debe10" +content-hash = "73cdf582288c9a4f22ebca27df8a40982b23954061d23e7d2301dfe9877cdb8d" diff --git a/pyproject.toml b/pyproject.toml index e149de23..e8d2ec6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "graphql-core" -version = "3.3.0a6" +version = "3.3.0a7" description = """\ GraphQL-core is a Python port of GraphQL.js,\ the JavaScript reference implementation for GraphQL.""" @@ -22,6 +22,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13" ] packages = [ { include = "graphql", from = "src" }, @@ -43,7 +44,7 @@ Changelog = "https://github.com/graphql-python/graphql-core/releases" [tool.poetry.dependencies] python = "^3.7" typing-extensions = [ - { version = "^4.12", python = ">=3.8,<3.10" }, + { version = "^4.12.2", python = ">=3.8,<3.10" }, { version = "^4.7.1", python = "<3.8" }, ] @@ -53,21 +54,30 @@ optional = true [tool.poetry.group.test.dependencies] pytest = [ { version = "^8.3", python = ">=3.8" }, - { version = "^7.4", python = "<3.8"} + { version = "^7.4", python = "<3.8" } ] pytest-asyncio = [ - { version = "^0.23.8", python = ">=3.8" }, - { version = "~0.21.1", python = "<3.8"} + { version = "^0.25.2", python = ">=3.9" }, + { version = "~0.24.0", python = ">=3.8,<3.9" }, + { version = "~0.21.1", python = "<3.8" } +] +pytest-benchmark = [ + { version = "^5.1", python = ">=3.9" }, + { version = "^4.0", python = "<3.9" } ] -pytest-benchmark = "^4.0" pytest-cov = [ - { version = "^5.0", python = ">=3.8" }, + { version = "^6.0", python = ">=3.9" }, + { version = "^5.0", python = ">=3.8,<3.9" }, { version = "^4.1", python = "<3.8" }, ] pytest-describe = "^2.2" pytest-timeout = "^2.3" +pytest-codspeed = [ + { version = "^3.1.2", python = ">=3.9" }, + { version = "^2.2.1", python = "<3.8" } +] tox = [ - { version = "^4.16", python = ">=3.8" }, + { version = "^4.24", python = ">=3.8" }, { version = "^3.28", python = "<3.8" } ] @@ -75,22 +85,23 @@ tox = [ optional = true [tool.poetry.group.lint.dependencies] -ruff = ">=0.5.7,<0.6" +ruff = ">=0.11,<0.12" mypy = [ - { version = "^1.11", python = ">=3.8" }, + { version = "^1.15", python = ">=3.9" }, + { version = "~1.14", python = ">=3.8,<3.9" }, { version = "~1.4", python = "<3.8" } ] -bump2version = ">=1.0,<2" +bump2version = ">=1,<2" [tool.poetry.group.doc] optional = true [tool.poetry.group.doc.dependencies] sphinx = [ - { version = ">=7,<8", python = ">=3.8" }, + { version = ">=7,<9", python = ">=3.8" }, { version = ">=4,<6", python = "<3.8" } ] -sphinx_rtd_theme = "^2.0" +sphinx_rtd_theme = ">=2,<4" [tool.ruff] line-length = 88 @@ -144,7 +155,7 @@ select = [ "YTT", # flake8-2020 ] ignore = [ - "ANN101", "ANN102", # no type annotation for self and cls needed + "A005", # allow using standard-lib module names "ANN401", # allow explicit Any "COM812", # allow trailing commas for auto-formatting "D105", "D107", # no docstring needed for magic methods @@ -308,13 +319,17 @@ minversion = "7.4" addopts = "--benchmark-disable" # Deactivate default name pattern for test classes (we use pytest_describe). python_classes = "PyTest*" -# Handle all async fixtures and tests automatically by asyncio +# Handle all async fixtures and tests automatically by asyncio, asyncio_mode = "auto" # Set a timeout in seconds for aborting tests that run too long. timeout = "100" # Ignore config options not (yet) available in older Python versions. filterwarnings = "ignore::pytest.PytestConfigWarning" +# All tests can be found in the tests directory. +testpaths = ["tests"] +# Use the functions scope as the default for asynchronous tests. +asyncio_default_fixture_loop_scope = "function" [build-system] -requires = ["poetry_core>=1.6.1,<2"] +requires = ["poetry_core>=1.6.1,<3"] build-backend = "poetry.core.masonry.api" diff --git a/src/graphql/__init__.py b/src/graphql/__init__.py index e85c51ee..6938435a 100644 --- a/src/graphql/__init__.py +++ b/src/graphql/__init__.py @@ -259,6 +259,7 @@ GraphQLStreamDirective, GraphQLDeprecatedDirective, GraphQLSpecifiedByDirective, + GraphQLOneOfDirective, # "Enum" of Type Kinds TypeKind, # Constant Deprecation Reason @@ -473,344 +474,345 @@ __all__ = [ - "version", - "version_info", - "version_js", - "version_info_js", - "graphql", - "graphql_sync", - "GraphQLSchema", - "GraphQLDirective", - "GraphQLScalarType", - "GraphQLObjectType", - "GraphQLInterfaceType", - "GraphQLUnionType", - "GraphQLEnumType", - "GraphQLInputObjectType", - "GraphQLList", - "GraphQLNonNull", - "specified_scalar_types", - "GraphQLInt", - "GraphQLFloat", - "GraphQLString", - "GraphQLBoolean", - "GraphQLID", + "BREAK", + "DEFAULT_DEPRECATION_REASON", "GRAPHQL_MAX_INT", "GRAPHQL_MIN_INT", - "specified_directives", - "GraphQLIncludeDirective", - "GraphQLSkipDirective", - "GraphQLDeferDirective", - "GraphQLStreamDirective", - "GraphQLDeprecatedDirective", - "GraphQLSpecifiedByDirective", - "TypeKind", - "DEFAULT_DEPRECATION_REASON", - "introspection_types", - "SchemaMetaFieldDef", - "TypeMetaFieldDef", - "TypeNameMetaFieldDef", - "is_schema", - "is_directive", - "is_type", - "is_scalar_type", - "is_object_type", - "is_interface_type", - "is_union_type", - "is_enum_type", - "is_input_object_type", - "is_list_type", - "is_non_null_type", - "is_input_type", - "is_output_type", - "is_leaf_type", - "is_composite_type", - "is_abstract_type", - "is_wrapping_type", - "is_nullable_type", - "is_named_type", - "is_required_argument", - "is_required_input_field", - "is_specified_scalar_type", - "is_introspection_type", - "is_specified_directive", - "assert_schema", - "assert_directive", - "assert_type", - "assert_scalar_type", - "assert_object_type", - "assert_interface_type", - "assert_union_type", - "assert_enum_type", - "assert_input_object_type", - "assert_list_type", - "assert_non_null_type", - "assert_input_type", - "assert_output_type", - "assert_leaf_type", - "assert_composite_type", - "assert_abstract_type", - "assert_wrapping_type", - "assert_nullable_type", - "assert_named_type", - "get_nullable_type", - "get_named_type", - "resolve_thunk", - "validate_schema", - "assert_valid_schema", - "assert_name", - "assert_enum_value_name", - "GraphQLType", - "GraphQLInputType", - "GraphQLOutputType", - "GraphQLLeafType", - "GraphQLCompositeType", + "IDLE", + "REMOVE", + "SKIP", + "ASTValidationRule", + "ArgumentNode", + "BooleanValueNode", + "BreakingChange", + "BreakingChangeType", + "ConstArgumentNode", + "ConstDirectiveNode", + "ConstListValueNode", + "ConstObjectFieldNode", + "ConstObjectValueNode", + "ConstValueNode", + "DangerousChange", + "DangerousChangeType", + "DefinitionNode", + "DirectiveDefinitionNode", + "DirectiveLocation", + "DirectiveNode", + "DocumentNode", + "EnumTypeDefinitionNode", + "EnumTypeExtensionNode", + "EnumValueDefinitionNode", + "EnumValueNode", + "ErrorBoundaryNode", + "ExecutableDefinitionNode", + "ExecutableDefinitionsRule", + "ExecutionContext", + "ExecutionResult", + "ExperimentalIncrementalExecutionResults", + "FieldDefinitionNode", + "FieldNode", + "FieldsOnCorrectTypeRule", + "FloatValueNode", + "FormattedExecutionResult", + "FormattedIncrementalDeferResult", + "FormattedIncrementalResult", + "FormattedIncrementalStreamResult", + "FormattedInitialIncrementalExecutionResult", + "FormattedSubsequentIncrementalExecutionResult", + "FragmentDefinitionNode", + "FragmentSpreadNode", + "FragmentsOnCompositeTypesRule", "GraphQLAbstractType", - "GraphQLWrappingType", - "GraphQLNullableType", - "GraphQLNullableInputType", - "GraphQLNullableOutputType", - "GraphQLNamedType", - "GraphQLNamedInputType", - "GraphQLNamedOutputType", - "Thunk", - "ThunkCollection", - "ThunkMapping", "GraphQLArgument", + "GraphQLArgumentKwargs", "GraphQLArgumentMap", + "GraphQLBoolean", + "GraphQLCompositeType", + "GraphQLDeferDirective", + "GraphQLDeprecatedDirective", + "GraphQLDirective", + "GraphQLDirectiveKwargs", + "GraphQLEnumType", + "GraphQLEnumTypeKwargs", "GraphQLEnumValue", + "GraphQLEnumValueKwargs", "GraphQLEnumValueMap", + "GraphQLError", + "GraphQLErrorExtensions", "GraphQLField", + "GraphQLFieldKwargs", "GraphQLFieldMap", "GraphQLFieldResolver", + "GraphQLFloat", + "GraphQLFormattedError", + "GraphQLID", + "GraphQLIncludeDirective", "GraphQLInputField", + "GraphQLInputFieldKwargs", "GraphQLInputFieldMap", "GraphQLInputFieldOutType", - "GraphQLScalarSerializer", - "GraphQLScalarValueParser", - "GraphQLScalarLiteralParser", - "GraphQLIsTypeOfFn", - "GraphQLResolveInfo", - "ResponsePath", - "GraphQLTypeResolver", - "GraphQLArgumentKwargs", - "GraphQLDirectiveKwargs", - "GraphQLEnumTypeKwargs", - "GraphQLEnumValueKwargs", - "GraphQLFieldKwargs", - "GraphQLInputFieldKwargs", + "GraphQLInputObjectType", "GraphQLInputObjectTypeKwargs", + "GraphQLInputType", + "GraphQLInt", + "GraphQLInterfaceType", "GraphQLInterfaceTypeKwargs", + "GraphQLIsTypeOfFn", + "GraphQLLeafType", + "GraphQLList", + "GraphQLNamedInputType", + "GraphQLNamedOutputType", + "GraphQLNamedType", "GraphQLNamedTypeKwargs", + "GraphQLNonNull", + "GraphQLNullableInputType", + "GraphQLNullableOutputType", + "GraphQLNullableType", + "GraphQLObjectType", "GraphQLObjectTypeKwargs", + "GraphQLOneOfDirective", + "GraphQLOutputType", + "GraphQLResolveInfo", + "GraphQLScalarLiteralParser", + "GraphQLScalarSerializer", + "GraphQLScalarType", "GraphQLScalarTypeKwargs", + "GraphQLScalarValueParser", + "GraphQLSchema", "GraphQLSchemaKwargs", - "GraphQLUnionTypeKwargs", - "Source", - "get_location", - "print_location", - "print_source_location", - "Lexer", - "TokenKind", - "parse", - "parse_value", - "parse_const_value", - "parse_type", - "print_ast", - "visit", - "ParallelVisitor", - "TypeInfoVisitor", - "Visitor", - "VisitorAction", - "VisitorKeyMap", - "BREAK", - "SKIP", - "REMOVE", - "IDLE", - "DirectiveLocation", - "is_definition_node", - "is_executable_definition_node", - "is_nullability_assertion_node", - "is_selection_node", - "is_value_node", - "is_const_value_node", - "is_type_node", - "is_type_system_definition_node", - "is_type_definition_node", - "is_type_system_extension_node", - "is_type_extension_node", - "SourceLocation", - "Location", - "Token", - "Node", - "NameNode", - "DocumentNode", - "DefinitionNode", - "ExecutableDefinitionNode", - "OperationDefinitionNode", - "OperationType", - "VariableDefinitionNode", - "VariableNode", - "SelectionSetNode", - "SelectionNode", - "FieldNode", - "ArgumentNode", - "NullabilityAssertionNode", - "NonNullAssertionNode", - "ErrorBoundaryNode", - "ListNullabilityOperatorNode", - "ConstArgumentNode", - "FragmentSpreadNode", - "InlineFragmentNode", - "FragmentDefinitionNode", - "ValueNode", - "ConstValueNode", - "IntValueNode", - "FloatValueNode", - "StringValueNode", - "BooleanValueNode", - "NullValueNode", - "EnumValueNode", - "ListValueNode", - "ConstListValueNode", - "ObjectValueNode", - "ConstObjectValueNode", - "ObjectFieldNode", - "ConstObjectFieldNode", - "DirectiveNode", - "ConstDirectiveNode", - "TypeNode", - "NamedTypeNode", - "ListTypeNode", - "NonNullTypeNode", - "TypeSystemDefinitionNode", - "SchemaDefinitionNode", - "OperationTypeDefinitionNode", - "TypeDefinitionNode", - "ScalarTypeDefinitionNode", - "ObjectTypeDefinitionNode", - "FieldDefinitionNode", - "InputValueDefinitionNode", - "InterfaceTypeDefinitionNode", - "UnionTypeDefinitionNode", - "EnumTypeDefinitionNode", - "EnumValueDefinitionNode", - "InputObjectTypeDefinitionNode", - "DirectiveDefinitionNode", - "TypeSystemExtensionNode", - "SchemaExtensionNode", - "TypeExtensionNode", - "ScalarTypeExtensionNode", - "ObjectTypeExtensionNode", - "InterfaceTypeExtensionNode", - "UnionTypeExtensionNode", - "EnumTypeExtensionNode", - "InputObjectTypeExtensionNode", - "execute", - "execute_sync", - "default_field_resolver", - "default_type_resolver", - "get_argument_values", - "get_directive_values", - "get_variable_values", - "ExecutionContext", - "ExecutionResult", - "ExperimentalIncrementalExecutionResults", - "InitialIncrementalExecutionResult", - "SubsequentIncrementalExecutionResult", + "GraphQLSkipDirective", + "GraphQLSpecifiedByDirective", + "GraphQLStreamDirective", + "GraphQLString", + "GraphQLSyntaxError", + "GraphQLType", + "GraphQLTypeResolver", + "GraphQLUnionType", + "GraphQLUnionTypeKwargs", + "GraphQLWrappingType", "IncrementalDeferResult", - "IncrementalStreamResult", "IncrementalResult", - "FormattedExecutionResult", - "FormattedInitialIncrementalExecutionResult", - "FormattedSubsequentIncrementalExecutionResult", - "FormattedIncrementalDeferResult", - "FormattedIncrementalStreamResult", - "FormattedIncrementalResult", - "Middleware", - "MiddlewareManager", - "subscribe", - "create_source_event_stream", - "map_async_iterable", - "validate", - "ValidationContext", - "ValidationRule", - "ASTValidationRule", - "SDLValidationRule", - "specified_rules", - "ExecutableDefinitionsRule", - "FieldsOnCorrectTypeRule", - "FragmentsOnCompositeTypesRule", + "IncrementalStreamResult", + "InitialIncrementalExecutionResult", + "InlineFragmentNode", + "InputObjectTypeDefinitionNode", + "InputObjectTypeExtensionNode", + "InputValueDefinitionNode", + "IntValueNode", + "InterfaceTypeDefinitionNode", + "InterfaceTypeExtensionNode", + "IntrospectionQuery", "KnownArgumentNamesRule", "KnownDirectivesRule", "KnownFragmentNamesRule", "KnownTypeNamesRule", + "Lexer", + "ListNullabilityOperatorNode", + "ListTypeNode", + "ListValueNode", + "Location", "LoneAnonymousOperationRule", + "LoneSchemaDefinitionRule", + "Middleware", + "MiddlewareManager", + "NameNode", + "NamedTypeNode", + "NoDeprecatedCustomRule", "NoFragmentCyclesRule", + "NoSchemaIntrospectionCustomRule", "NoUndefinedVariablesRule", "NoUnusedFragmentsRule", "NoUnusedVariablesRule", + "Node", + "NonNullAssertionNode", + "NonNullTypeNode", + "NullValueNode", + "NullabilityAssertionNode", + "ObjectFieldNode", + "ObjectTypeDefinitionNode", + "ObjectTypeExtensionNode", + "ObjectValueNode", + "OperationDefinitionNode", + "OperationType", + "OperationTypeDefinitionNode", "OverlappingFieldsCanBeMergedRule", + "ParallelVisitor", "PossibleFragmentSpreadsRule", + "PossibleTypeExtensionsRule", "ProvidedRequiredArgumentsRule", + "ResponsePath", + "SDLValidationRule", "ScalarLeafsRule", + "ScalarTypeDefinitionNode", + "ScalarTypeExtensionNode", + "SchemaDefinitionNode", + "SchemaExtensionNode", + "SchemaMetaFieldDef", + "SelectionNode", + "SelectionSetNode", "SingleFieldSubscriptionsRule", + "Source", + "SourceLocation", + "StringValueNode", + "SubsequentIncrementalExecutionResult", + "Thunk", + "ThunkCollection", + "ThunkMapping", + "Token", + "TokenKind", + "TypeDefinitionNode", + "TypeExtensionNode", + "TypeInfo", + "TypeInfoVisitor", + "TypeKind", + "TypeMetaFieldDef", + "TypeNameMetaFieldDef", + "TypeNode", + "TypeSystemDefinitionNode", + "TypeSystemExtensionNode", + "Undefined", + "UndefinedType", + "UnionTypeDefinitionNode", + "UnionTypeExtensionNode", + "UniqueArgumentDefinitionNamesRule", "UniqueArgumentNamesRule", + "UniqueDirectiveNamesRule", "UniqueDirectivesPerLocationRule", + "UniqueEnumValueNamesRule", + "UniqueFieldDefinitionNamesRule", "UniqueFragmentNamesRule", "UniqueInputFieldNamesRule", "UniqueOperationNamesRule", + "UniqueOperationTypesRule", + "UniqueTypeNamesRule", "UniqueVariableNamesRule", + "ValidationContext", + "ValidationRule", + "ValueNode", "ValuesOfCorrectTypeRule", + "VariableDefinitionNode", + "VariableNode", "VariablesAreInputTypesRule", "VariablesInAllowedPositionRule", - "LoneSchemaDefinitionRule", - "UniqueOperationTypesRule", - "UniqueTypeNamesRule", - "UniqueEnumValueNamesRule", - "UniqueFieldDefinitionNamesRule", - "UniqueArgumentDefinitionNamesRule", - "UniqueDirectiveNamesRule", - "PossibleTypeExtensionsRule", - "NoDeprecatedCustomRule", - "NoSchemaIntrospectionCustomRule", - "GraphQLError", - "GraphQLErrorExtensions", - "GraphQLFormattedError", - "GraphQLSyntaxError", - "located_error", - "get_introspection_query", - "IntrospectionQuery", - "get_operation_ast", - "introspection_from_schema", - "build_client_schema", + "Visitor", + "VisitorAction", + "VisitorKeyMap", + "assert_abstract_type", + "assert_composite_type", + "assert_directive", + "assert_enum_type", + "assert_enum_value_name", + "assert_input_object_type", + "assert_input_type", + "assert_interface_type", + "assert_leaf_type", + "assert_list_type", + "assert_name", + "assert_named_type", + "assert_non_null_type", + "assert_nullable_type", + "assert_object_type", + "assert_output_type", + "assert_scalar_type", + "assert_schema", + "assert_type", + "assert_union_type", + "assert_valid_schema", + "assert_wrapping_type", + "ast_from_value", + "ast_to_dict", "build_ast_schema", + "build_client_schema", "build_schema", + "coerce_input_value", + "concat_ast", + "create_source_event_stream", + "default_field_resolver", + "default_type_resolver", + "do_types_overlap", + "execute", + "execute_sync", "extend_schema", + "find_breaking_changes", + "find_dangerous_changes", + "get_argument_values", + "get_directive_values", + "get_introspection_query", + "get_location", + "get_named_type", + "get_nullable_type", + "get_operation_ast", + "get_variable_values", + "graphql", + "graphql_sync", + "introspection_from_schema", + "introspection_types", + "is_abstract_type", + "is_composite_type", + "is_const_value_node", + "is_definition_node", + "is_directive", + "is_enum_type", + "is_equal_type", + "is_executable_definition_node", + "is_input_object_type", + "is_input_type", + "is_interface_type", + "is_introspection_type", + "is_leaf_type", + "is_list_type", + "is_named_type", + "is_non_null_type", + "is_nullability_assertion_node", + "is_nullable_type", + "is_object_type", + "is_output_type", + "is_required_argument", + "is_required_input_field", + "is_scalar_type", + "is_schema", + "is_selection_node", + "is_specified_directive", + "is_specified_scalar_type", + "is_type", + "is_type_definition_node", + "is_type_extension_node", + "is_type_node", + "is_type_sub_type_of", + "is_type_system_definition_node", + "is_type_system_extension_node", + "is_union_type", + "is_value_node", + "is_wrapping_type", "lexicographic_sort_schema", - "print_schema", - "print_type", + "located_error", + "map_async_iterable", + "parse", + "parse_const_value", + "parse_type", + "parse_value", + "print_ast", "print_directive", "print_introspection_schema", + "print_location", + "print_schema", + "print_source_location", + "print_type", + "resolve_thunk", + "separate_operations", + "specified_directives", + "specified_rules", + "specified_scalar_types", + "strip_ignored_characters", + "subscribe", "type_from_ast", + "validate", + "validate_schema", "value_from_ast", "value_from_ast_untyped", - "ast_from_value", - "ast_to_dict", - "TypeInfo", - "coerce_input_value", - "concat_ast", - "separate_operations", - "strip_ignored_characters", - "is_equal_type", - "is_type_sub_type_of", - "do_types_overlap", - "find_breaking_changes", - "find_dangerous_changes", - "BreakingChange", - "BreakingChangeType", - "DangerousChange", - "DangerousChangeType", - "Undefined", - "UndefinedType", + "version", + "version_info", + "version_info_js", + "version_js", + "visit", ] diff --git a/src/graphql/error/graphql_error.py b/src/graphql/error/graphql_error.py index ff128748..8123a713 100644 --- a/src/graphql/error/graphql_error.py +++ b/src/graphql/error/graphql_error.py @@ -108,14 +108,14 @@ class GraphQLError(Exception): """Extension fields to add to the formatted error""" __slots__ = ( + "extensions", + "locations", "message", "nodes", - "source", - "positions", - "locations", - "path", "original_error", - "extensions", + "path", + "positions", + "source", ) __hash__ = Exception.__hash__ diff --git a/src/graphql/execution/__init__.py b/src/graphql/execution/__init__.py index aec85be1..375ec400 100644 --- a/src/graphql/execution/__init__.py +++ b/src/graphql/execution/__init__.py @@ -14,21 +14,21 @@ default_type_resolver, subscribe, ExecutionContext, - ExecutionResult, - ExperimentalIncrementalExecutionResults, - InitialIncrementalExecutionResult, - FormattedExecutionResult, - FormattedInitialIncrementalExecutionResult, Middleware, ) from .incremental_publisher import ( + ExecutionResult, + ExperimentalIncrementalExecutionResults, FormattedSubsequentIncrementalExecutionResult, FormattedIncrementalDeferResult, FormattedIncrementalResult, FormattedIncrementalStreamResult, + FormattedExecutionResult, + FormattedInitialIncrementalExecutionResult, IncrementalDeferResult, IncrementalResult, IncrementalStreamResult, + InitialIncrementalExecutionResult, SubsequentIncrementalExecutionResult, ) from .async_iterables import map_async_iterable @@ -37,31 +37,31 @@ __all__ = [ "ASYNC_DELAY", - "create_source_event_stream", - "execute", - "experimental_execute_incrementally", - "execute_sync", - "default_field_resolver", - "default_type_resolver", - "subscribe", "ExecutionContext", "ExecutionResult", "ExperimentalIncrementalExecutionResults", - "InitialIncrementalExecutionResult", - "SubsequentIncrementalExecutionResult", - "IncrementalDeferResult", - "IncrementalStreamResult", - "IncrementalResult", "FormattedExecutionResult", - "FormattedInitialIncrementalExecutionResult", - "FormattedSubsequentIncrementalExecutionResult", "FormattedIncrementalDeferResult", - "FormattedIncrementalStreamResult", "FormattedIncrementalResult", - "map_async_iterable", + "FormattedIncrementalStreamResult", + "FormattedInitialIncrementalExecutionResult", + "FormattedSubsequentIncrementalExecutionResult", + "IncrementalDeferResult", + "IncrementalResult", + "IncrementalStreamResult", + "InitialIncrementalExecutionResult", "Middleware", "MiddlewareManager", + "SubsequentIncrementalExecutionResult", + "create_source_event_stream", + "default_field_resolver", + "default_type_resolver", + "execute", + "execute_sync", + "experimental_execute_incrementally", "get_argument_values", "get_directive_values", "get_variable_values", + "map_async_iterable", + "subscribe", ] diff --git a/src/graphql/execution/async_iterables.py b/src/graphql/execution/async_iterables.py index 747a515d..b8faad88 100644 --- a/src/graphql/execution/async_iterables.py +++ b/src/graphql/execution/async_iterables.py @@ -2,7 +2,7 @@ from __future__ import annotations -from contextlib import AbstractAsyncContextManager +from contextlib import AbstractAsyncContextManager, suppress from typing import ( AsyncGenerator, AsyncIterable, @@ -20,6 +20,8 @@ AsyncIterableOrGenerator = Union[AsyncGenerator[T, None], AsyncIterable[T]] +suppress_exceptions = suppress(Exception) + class aclosing(AbstractAsyncContextManager, Generic[T]): # noqa: N801 """Async context manager for safely finalizing an async iterator or generator. @@ -40,7 +42,8 @@ async def __aexit__(self, *_exc_info: object) -> None: except AttributeError: pass # do not complain if the iterator has no aclose() method else: - await aclose() + with suppress_exceptions: # or if the aclose() method fails + await aclose() async def map_async_iterable( diff --git a/src/graphql/execution/collect_fields.py b/src/graphql/execution/collect_fields.py index 5cb5a723..c3fc99cc 100644 --- a/src/graphql/execution/collect_fields.py +++ b/src/graphql/execution/collect_fields.py @@ -3,8 +3,7 @@ from __future__ import annotations import sys -from collections import defaultdict -from typing import Any, Dict, List, NamedTuple +from typing import Any, Dict, NamedTuple, Union, cast from ..language import ( FieldNode, @@ -15,6 +14,7 @@ OperationType, SelectionSetNode, ) +from ..pyutils import RefMap, RefSet from ..type import ( GraphQLDeferDirective, GraphQLIncludeDirective, @@ -33,33 +33,88 @@ __all__ = [ + "NON_DEFERRED_TARGET_SET", + "CollectFieldsContext", + "CollectFieldsResult", + "DeferUsage", + "DeferUsageSet", + "FieldDetails", + "FieldGroup", + "GroupedFieldSetDetails", + "Target", + "TargetSet", "collect_fields", "collect_subfields", - "FieldGroup", - "FieldsAndPatches", - "GroupedFieldSet", ] + +class DeferUsage(NamedTuple): + """An optionally labelled list of ancestor targets.""" + + label: str | None + ancestors: list[Target] + + +Target: TypeAlias = Union[DeferUsage, None] + +TargetSet: TypeAlias = RefSet[Target] +DeferUsageSet: TypeAlias = RefSet[DeferUsage] + + +NON_DEFERRED_TARGET_SET: TargetSet = RefSet([None]) + + +class FieldDetails(NamedTuple): + """A field node and its target.""" + + node: FieldNode + target: Target + + +class FieldGroup(NamedTuple): + """A group of fields that share the same target set.""" + + fields: list[FieldDetails] + targets: TargetSet + + def to_nodes(self) -> list[FieldNode]: + """Return the field nodes in this group.""" + return [field_details.node for field_details in self.fields] + + if sys.version_info < (3, 9): - FieldGroup: TypeAlias = List[FieldNode] - GroupedFieldSet = Dict[str, FieldGroup] + GroupedFieldSet: TypeAlias = Dict[str, FieldGroup] else: # Python >= 3.9 - FieldGroup: TypeAlias = list[FieldNode] - GroupedFieldSet = dict[str, FieldGroup] + GroupedFieldSet: TypeAlias = dict[str, FieldGroup] -class PatchFields(NamedTuple): - """Optionally labelled set of fields to be used as a patch.""" +class GroupedFieldSetDetails(NamedTuple): + """A grouped field set with defer info.""" - label: str | None grouped_field_set: GroupedFieldSet + should_initiate_defer: bool -class FieldsAndPatches(NamedTuple): - """Tuple of collected fields and patches to be applied.""" +class CollectFieldsResult(NamedTuple): + """Collected fields and deferred usages.""" grouped_field_set: GroupedFieldSet - patches: list[PatchFields] + new_grouped_field_set_details: RefMap[DeferUsageSet, GroupedFieldSetDetails] + new_defer_usages: list[DeferUsage] + + +class CollectFieldsContext(NamedTuple): + """Context for collecting fields.""" + + schema: GraphQLSchema + fragments: dict[str, FragmentDefinitionNode] + variable_values: dict[str, Any] + operation: OperationDefinitionNode + runtime_type: GraphQLObjectType + targets_by_key: dict[str, TargetSet] + fields_by_target: RefMap[Target, dict[str, list[FieldNode]]] + new_defer_usages: list[DeferUsage] + visited_fragment_names: set[str] def collect_fields( @@ -68,7 +123,7 @@ def collect_fields( variable_values: dict[str, Any], runtime_type: GraphQLObjectType, operation: OperationDefinitionNode, -) -> FieldsAndPatches: +) -> CollectFieldsResult: """Collect fields. Given a selection_set, collects all the fields and returns them. @@ -79,20 +134,23 @@ def collect_fields( For internal use only. """ - grouped_field_set: dict[str, list[FieldNode]] = defaultdict(list) - patches: list[PatchFields] = [] - collect_fields_impl( + context = CollectFieldsContext( schema, fragments, variable_values, operation, runtime_type, - operation.selection_set, - grouped_field_set, - patches, + {}, + RefMap(), + [], set(), ) - return FieldsAndPatches(grouped_field_set, patches) + collect_fields_impl(context, operation.selection_set) + + return CollectFieldsResult( + *build_grouped_field_sets(context.targets_by_key, context.fields_by_target), + context.new_defer_usages, + ) def collect_subfields( @@ -102,7 +160,7 @@ def collect_subfields( operation: OperationDefinitionNode, return_type: GraphQLObjectType, field_group: FieldGroup, -) -> FieldsAndPatches: +) -> CollectFieldsResult: """Collect subfields. Given a list of field nodes, collects all the subfields of the passed in fields, @@ -114,47 +172,73 @@ def collect_subfields( For internal use only. """ - sub_grouped_field_set: dict[str, list[FieldNode]] = defaultdict(list) - visited_fragment_names: set[str] = set() - - sub_patches: list[PatchFields] = [] - sub_fields_and_patches = FieldsAndPatches(sub_grouped_field_set, sub_patches) + context = CollectFieldsContext( + schema, + fragments, + variable_values, + operation, + return_type, + {}, + RefMap(), + [], + set(), + ) - for node in field_group: + for field_details in field_group.fields: + node = field_details.node if node.selection_set: - collect_fields_impl( - schema, - fragments, - variable_values, - operation, - return_type, - node.selection_set, - sub_grouped_field_set, - sub_patches, - visited_fragment_names, - ) - return sub_fields_and_patches + collect_fields_impl(context, node.selection_set, field_details.target) + + return CollectFieldsResult( + *build_grouped_field_sets( + context.targets_by_key, context.fields_by_target, field_group.targets + ), + context.new_defer_usages, + ) def collect_fields_impl( - schema: GraphQLSchema, - fragments: dict[str, FragmentDefinitionNode], - variable_values: dict[str, Any], - operation: OperationDefinitionNode, - runtime_type: GraphQLObjectType, + context: CollectFieldsContext, selection_set: SelectionSetNode, - grouped_field_set: dict[str, list[FieldNode]], - patches: list[PatchFields], - visited_fragment_names: set[str], + parent_target: Target | None = None, + new_target: Target | None = None, ) -> None: """Collect fields (internal implementation).""" - patch_fields: dict[str, list[FieldNode]] + ( + schema, + fragments, + variable_values, + operation, + runtime_type, + targets_by_key, + fields_by_target, + new_defer_usages, + visited_fragment_names, + ) = context + + ancestors: list[Target] for selection in selection_set.selections: if isinstance(selection, FieldNode): if not should_include_node(variable_values, selection): continue - grouped_field_set[get_field_entry_key(selection)].append(selection) + key = get_field_entry_key(selection) + target = new_target or parent_target + key_targets = targets_by_key.get(key) + if key_targets is None: + key_targets = RefSet([target]) + targets_by_key[key] = key_targets + else: + key_targets.add(target) + target_fields = fields_by_target.get(target) + if target_fields is None: + fields_by_target[target] = {key: [selection]} + else: + field_nodes = target_fields.get(key) + if field_nodes is None: + target_fields[key] = [selection] + else: + field_nodes.append(selection) elif isinstance(selection, InlineFragmentNode): if not should_include_node( variable_values, selection @@ -162,32 +246,19 @@ def collect_fields_impl( continue defer = get_defer_values(operation, variable_values, selection) + if defer: - patch_fields = defaultdict(list) - collect_fields_impl( - schema, - fragments, - variable_values, - operation, - runtime_type, - selection.selection_set, - patch_fields, - patches, - visited_fragment_names, + ancestors = ( + [None] + if parent_target is None + else [parent_target, *parent_target.ancestors] ) - patches.append(PatchFields(defer.label, patch_fields)) + target = DeferUsage(defer.label, ancestors) + new_defer_usages.append(target) else: - collect_fields_impl( - schema, - fragments, - variable_values, - operation, - runtime_type, - selection.selection_set, - grouped_field_set, - patches, - visited_fragment_names, - ) + target = new_target + + collect_fields_impl(context, selection.selection_set, parent_target, target) elif isinstance(selection, FragmentSpreadNode): # pragma: no cover else frag_name = selection.name.value @@ -204,35 +275,19 @@ def collect_fields_impl( ): continue - if not defer: - visited_fragment_names.add(frag_name) - if defer: - patch_fields = defaultdict(list) - collect_fields_impl( - schema, - fragments, - variable_values, - operation, - runtime_type, - fragment.selection_set, - patch_fields, - patches, - visited_fragment_names, + ancestors = ( + [None] + if parent_target is None + else [parent_target, *parent_target.ancestors] ) - patches.append(PatchFields(defer.label, patch_fields)) + target = DeferUsage(defer.label, ancestors) + new_defer_usages.append(target) else: - collect_fields_impl( - schema, - fragments, - variable_values, - operation, - runtime_type, - fragment.selection_set, - grouped_field_set, - patches, - visited_fragment_names, - ) + visited_fragment_names.add(frag_name) + target = new_target + + collect_fields_impl(context, fragment.selection_set, parent_target, target) class DeferValues(NamedTuple): @@ -305,3 +360,111 @@ def does_fragment_condition_match( def get_field_entry_key(node: FieldNode) -> str: """Implement the logic to compute the key of a given field's entry""" return node.alias.value if node.alias else node.name.value + + +def build_grouped_field_sets( + targets_by_key: dict[str, TargetSet], + fields_by_target: RefMap[Target, dict[str, list[FieldNode]]], + parent_targets: TargetSet = NON_DEFERRED_TARGET_SET, +) -> tuple[GroupedFieldSet, RefMap[DeferUsageSet, GroupedFieldSetDetails]]: + """Build grouped field sets.""" + parent_target_keys, target_set_details_map = get_target_set_details( + targets_by_key, parent_targets + ) + + grouped_field_set = ( + get_ordered_grouped_field_set( + parent_target_keys, parent_targets, targets_by_key, fields_by_target + ) + if parent_target_keys + else {} + ) + + new_grouped_field_set_details: RefMap[DeferUsageSet, GroupedFieldSetDetails] = ( + RefMap() + ) + + for masking_targets, target_set_details in target_set_details_map.items(): + keys, should_initiate_defer = target_set_details + + new_grouped_field_set = get_ordered_grouped_field_set( + keys, masking_targets, targets_by_key, fields_by_target + ) + + # All TargetSets that causes new grouped field sets consist only of DeferUsages + # and have should_initiate_defer defined + + new_grouped_field_set_details[cast("DeferUsageSet", masking_targets)] = ( + GroupedFieldSetDetails(new_grouped_field_set, should_initiate_defer) + ) + + return grouped_field_set, new_grouped_field_set_details + + +class TargetSetDetails(NamedTuple): + """A set of target keys with defer info.""" + + keys: set[str] + should_initiate_defer: bool + + +def get_target_set_details( + targets_by_key: dict[str, TargetSet], parent_targets: TargetSet +) -> tuple[set[str], RefMap[TargetSet, TargetSetDetails]]: + """Get target set details.""" + parent_target_keys: set[str] = set() + target_set_details_map: RefMap[TargetSet, TargetSetDetails] = RefMap() + + for response_key, targets in targets_by_key.items(): + masking_target_list: list[Target] = [] + for target in targets: + if not target or all( + ancestor not in targets for ancestor in target.ancestors + ): + masking_target_list.append(target) + + masking_targets: TargetSet = RefSet(masking_target_list) + if masking_targets == parent_targets: + parent_target_keys.add(response_key) + continue + + for target_set, target_set_details in target_set_details_map.items(): + if target_set == masking_targets: + target_set_details.keys.add(response_key) + break + else: + target_set_details = TargetSetDetails( + {response_key}, + any( + defer_usage not in parent_targets for defer_usage in masking_targets + ), + ) + target_set_details_map[masking_targets] = target_set_details + + return parent_target_keys, target_set_details_map + + +def get_ordered_grouped_field_set( + keys: set[str], + masking_targets: TargetSet, + targets_by_key: dict[str, TargetSet], + fields_by_target: RefMap[Target, dict[str, list[FieldNode]]], +) -> GroupedFieldSet: + """Get ordered grouped field set.""" + grouped_field_set: GroupedFieldSet = {} + + first_target = next(iter(masking_targets)) + first_fields = fields_by_target[first_target] + for key in list(first_fields): + if key in keys: + field_group = grouped_field_set.get(key) + if field_group is None: # pragma: no cover else + field_group = FieldGroup([], masking_targets) + grouped_field_set[key] = field_group + for target in targets_by_key[key]: + fields_for_target = fields_by_target[target] + nodes = fields_for_target[key] + del fields_for_target[key] + field_group.fields.extend(FieldDetails(node, target) for node in nodes) + + return grouped_field_set diff --git a/src/graphql/execution/execute.py b/src/graphql/execution/execute.py index e370bcc1..1097e80f 100644 --- a/src/graphql/execution/execute.py +++ b/src/graphql/execution/execute.py @@ -3,8 +3,8 @@ from __future__ import annotations from asyncio import ensure_future, gather, shield, wait_for -from collections.abc import Mapping from contextlib import suppress +from copy import copy from typing import ( Any, AsyncGenerator, @@ -13,8 +13,8 @@ Awaitable, Callable, Iterable, - Iterator, List, + Mapping, NamedTuple, Optional, Sequence, @@ -24,20 +24,15 @@ ) try: - from typing import TypedDict -except ImportError: # Python < 3.8 - from typing_extensions import TypedDict -try: - from typing import TypeAlias, TypeGuard + from typing import TypeAlias, TypeGuard # noqa: F401 except ImportError: # Python < 3.10 - from typing_extensions import TypeAlias, TypeGuard + from typing_extensions import TypeAlias try: # only needed for Python < 3.11 - # noinspection PyCompatibility - from asyncio.exceptions import TimeoutError + from asyncio.exceptions import TimeoutError # noqa: A004 except ImportError: # Python < 3.7 - from concurrent.futures import TimeoutError + from concurrent.futures import TimeoutError # noqa: A004 -from ..error import GraphQLError, GraphQLFormattedError, located_error +from ..error import GraphQLError, located_error from ..language import ( DocumentNode, FragmentDefinitionNode, @@ -47,6 +42,7 @@ from ..pyutils import ( AwaitableOrValue, Path, + RefMap, Undefined, async_reduce, inspect, @@ -74,35 +70,45 @@ ) from .async_iterables import map_async_iterable from .collect_fields import ( + NON_DEFERRED_TARGET_SET, + CollectFieldsResult, + DeferUsage, + DeferUsageSet, + FieldDetails, FieldGroup, - FieldsAndPatches, GroupedFieldSet, + GroupedFieldSetDetails, collect_fields, collect_subfields, ) from .incremental_publisher import ( ASYNC_DELAY, - FormattedIncrementalResult, + DeferredFragmentRecord, + DeferredGroupedFieldSetRecord, + ExecutionResult, + ExperimentalIncrementalExecutionResults, IncrementalDataRecord, IncrementalPublisher, - IncrementalResult, + InitialResultRecord, StreamItemsRecord, - SubsequentIncrementalExecutionResult, + StreamRecord, ) from .middleware import MiddlewareManager from .values import get_argument_values, get_directive_values, get_variable_values try: # pragma: no cover - anext # noqa: B018 + anext # noqa: B018 # pyright: ignore except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator: AsyncIterator) -> Any: # noqa: A001 + async def anext(iterator: AsyncIterator) -> Any: """Return the next item from an async iterator.""" return await iterator.__anext__() __all__ = [ "ASYNC_DELAY", + "ExecutionContext", + "Middleware", "create_source_event_stream", "default_field_resolver", "default_type_resolver", @@ -110,13 +116,6 @@ async def anext(iterator: AsyncIterator) -> Any: # noqa: A001 "execute_sync", "experimental_execute_incrementally", "subscribe", - "ExecutionResult", - "ExecutionContext", - "ExperimentalIncrementalExecutionResults", - "FormattedExecutionResult", - "FormattedInitialIncrementalExecutionResult", - "InitialIncrementalExecutionResult", - "Middleware", ] suppress_exceptions = suppress(Exception) @@ -142,198 +141,15 @@ async def anext(iterator: AsyncIterator) -> Any: # noqa: A001 # 3) inline fragment "spreads" e.g. "...on Type { a }" -class FormattedExecutionResult(TypedDict, total=False): - """Formatted execution result""" - - data: dict[str, Any] | None - errors: list[GraphQLFormattedError] - extensions: dict[str, Any] - - -class ExecutionResult: - """The result of GraphQL execution. - - - ``data`` is the result of a successful execution of the query. - - ``errors`` is included when any errors occurred as a non-empty list. - - ``extensions`` is reserved for adding non-standard properties. - """ - - __slots__ = "data", "errors", "extensions" - - data: dict[str, Any] | None - errors: list[GraphQLError] | None - extensions: dict[str, Any] | None - - def __init__( - self, - data: dict[str, Any] | None = None, - errors: list[GraphQLError] | None = None, - extensions: dict[str, Any] | None = None, - ) -> None: - self.data = data - self.errors = errors - self.extensions = extensions - - def __repr__(self) -> str: - name = self.__class__.__name__ - ext = "" if self.extensions is None else f", extensions={self.extensions}" - return f"{name}(data={self.data!r}, errors={self.errors!r}{ext})" - - def __iter__(self) -> Iterator[Any]: - return iter((self.data, self.errors)) - - @property - def formatted(self) -> FormattedExecutionResult: - """Get execution result formatted according to the specification.""" - formatted: FormattedExecutionResult = {"data": self.data} - if self.errors is not None: - formatted["errors"] = [error.formatted for error in self.errors] - if self.extensions is not None: - formatted["extensions"] = self.extensions - return formatted - - def __eq__(self, other: object) -> bool: - if isinstance(other, dict): - if "extensions" not in other: - return other == {"data": self.data, "errors": self.errors} - return other == { - "data": self.data, - "errors": self.errors, - "extensions": self.extensions, - } - if isinstance(other, tuple): - if len(other) == 2: - return other == (self.data, self.errors) - return other == (self.data, self.errors, self.extensions) - return ( - isinstance(other, self.__class__) - and other.data == self.data - and other.errors == self.errors - and other.extensions == self.extensions - ) - - def __ne__(self, other: object) -> bool: - return not self == other - - -class FormattedInitialIncrementalExecutionResult(TypedDict, total=False): - """Formatted initial incremental execution result""" - - data: dict[str, Any] | None - errors: list[GraphQLFormattedError] - hasNext: bool - incremental: list[FormattedIncrementalResult] - extensions: dict[str, Any] - - -class InitialIncrementalExecutionResult: - """Initial incremental execution result. - - - ``has_next`` is True if a future payload is expected. - - ``incremental`` is a list of the results from defer/stream directives. - """ - - data: dict[str, Any] | None - errors: list[GraphQLError] | None - incremental: Sequence[IncrementalResult] | None - has_next: bool - extensions: dict[str, Any] | None - - __slots__ = "data", "errors", "has_next", "incremental", "extensions" - - def __init__( - self, - data: dict[str, Any] | None = None, - errors: list[GraphQLError] | None = None, - incremental: Sequence[IncrementalResult] | None = None, - has_next: bool = False, - extensions: dict[str, Any] | None = None, - ) -> None: - self.data = data - self.errors = errors - self.incremental = incremental - self.has_next = has_next - self.extensions = extensions - - def __repr__(self) -> str: - name = self.__class__.__name__ - args: list[str] = [f"data={self.data!r}, errors={self.errors!r}"] - if self.incremental: - args.append(f"incremental[{len(self.incremental)}]") - if self.has_next: - args.append("has_next") - if self.extensions: - args.append(f"extensions={self.extensions}") - return f"{name}({', '.join(args)})" - - @property - def formatted(self) -> FormattedInitialIncrementalExecutionResult: - """Get execution result formatted according to the specification.""" - formatted: FormattedInitialIncrementalExecutionResult = {"data": self.data} - if self.errors is not None: - formatted["errors"] = [error.formatted for error in self.errors] - if self.incremental: - formatted["incremental"] = [result.formatted for result in self.incremental] - formatted["hasNext"] = self.has_next - if self.extensions is not None: - formatted["extensions"] = self.extensions - return formatted - - def __eq__(self, other: object) -> bool: - if isinstance(other, dict): - return ( - other.get("data") == self.data - and other.get("errors") == self.errors - and ( - "incremental" not in other - or other["incremental"] == self.incremental - ) - and ("hasNext" not in other or other["hasNext"] == self.has_next) - and ( - "extensions" not in other or other["extensions"] == self.extensions - ) - ) - if isinstance(other, tuple): - size = len(other) - return ( - 1 < size < 6 - and ( - self.data, - self.errors, - self.incremental, - self.has_next, - self.extensions, - )[:size] - == other - ) - return ( - isinstance(other, self.__class__) - and other.data == self.data - and other.errors == self.errors - and other.incremental == self.incremental - and other.has_next == self.has_next - and other.extensions == self.extensions - ) - - def __ne__(self, other: object) -> bool: - return not self == other +Middleware: TypeAlias = Optional[Union[Tuple, List, MiddlewareManager]] -class StreamArguments(NamedTuple): - """Arguments of the stream directive""" +class StreamUsage(NamedTuple): + """Stream directive usage information""" - initial_count: int label: str | None - - -class ExperimentalIncrementalExecutionResults(NamedTuple): - """Execution results when retrieved incrementally.""" - - initial_result: InitialIncrementalExecutionResult - subsequent_results: AsyncGenerator[SubsequentIncrementalExecutionResult, None] - - -Middleware: TypeAlias = Optional[Union[Tuple, List, MiddlewareManager]] + initial_count: int + field_group: FieldGroup class ExecutionContext: @@ -352,13 +168,10 @@ class ExecutionContext: field_resolver: GraphQLFieldResolver type_resolver: GraphQLTypeResolver subscribe_field_resolver: GraphQLFieldResolver - errors: list[GraphQLError] incremental_publisher: IncrementalPublisher middleware_manager: MiddlewareManager | None - is_awaitable: Callable[[Any], TypeGuard[Awaitable]] = staticmethod( - default_is_awaitable - ) + is_awaitable: Callable[[Any], bool] = staticmethod(default_is_awaitable) def __init__( self, @@ -371,7 +184,6 @@ def __init__( field_resolver: GraphQLFieldResolver, type_resolver: GraphQLTypeResolver, subscribe_field_resolver: GraphQLFieldResolver, - errors: list[GraphQLError], incremental_publisher: IncrementalPublisher, middleware_manager: MiddlewareManager | None, is_awaitable: Callable[[Any], bool] | None, @@ -385,14 +197,14 @@ def __init__( self.field_resolver = field_resolver self.type_resolver = type_resolver self.subscribe_field_resolver = subscribe_field_resolver - self.errors = errors self.incremental_publisher = incremental_publisher self.middleware_manager = middleware_manager if is_awaitable: self.is_awaitable = is_awaitable self._canceled_iterators: set[AsyncIterator] = set() - self._subfields_cache: dict[tuple, FieldsAndPatches] = {} + self._subfields_cache: dict[tuple, CollectFieldsResult] = {} self._tasks: set[Awaitable] = set() + self._stream_usages: RefMap[FieldGroup, StreamUsage] = RefMap() @classmethod def build( @@ -408,6 +220,7 @@ def build( subscribe_field_resolver: GraphQLFieldResolver | None = None, middleware: Middleware | None = None, is_awaitable: Callable[[Any], bool] | None = None, + **custom_args: Any, ) -> list[GraphQLError] | ExecutionContext: """Build an execution context @@ -478,57 +291,27 @@ def build( field_resolver or default_field_resolver, type_resolver or default_type_resolver, subscribe_field_resolver or default_field_resolver, - [], IncrementalPublisher(), middleware_manager, is_awaitable, + **custom_args, ) - @staticmethod - def build_response( - data: dict[str, Any] | None, errors: list[GraphQLError] - ) -> ExecutionResult: - """Build response. - - Given a completed execution context and data, build the (data, errors) response - defined by the "Response" section of the GraphQL spec. - """ - if not errors: - return ExecutionResult(data, None) - # Sort the error list in order to make it deterministic, since we might have - # been using parallel execution. - errors.sort( - key=lambda error: (error.locations or [], error.path or [], error.message) - ) - return ExecutionResult(data, errors) - def build_per_event_execution_context(self, payload: Any) -> ExecutionContext: """Create a copy of the execution context for usage with subscribe events.""" - return self.__class__( - self.schema, - self.fragments, - payload, - self.context_value, - self.operation, - self.variable_values, - self.field_resolver, - self.type_resolver, - self.subscribe_field_resolver, - [], - # no need to update incrementalPublisher, - # incremental delivery is not supported for subscriptions - self.incremental_publisher, - self.middleware_manager, - self.is_awaitable, - ) + context = copy(self) + context.root_value = payload + return context - def execute_operation(self) -> AwaitableOrValue[dict[str, Any]]: + def execute_operation( + self, initial_result_record: InitialResultRecord + ) -> AwaitableOrValue[dict[str, Any]]: """Execute an operation. Implements the "Executing operations" section of the spec. """ - schema = self.schema operation = self.operation + schema = self.schema root_type = schema.get_root_type(operation.operation) if root_type is None: msg = ( @@ -537,12 +320,24 @@ def execute_operation(self) -> AwaitableOrValue[dict[str, Any]]: ) raise GraphQLError(msg, operation) - grouped_field_set, patches = collect_fields( - schema, - self.fragments, - self.variable_values, - root_type, - operation, + grouped_field_set, new_grouped_field_set_details, new_defer_usages = ( + collect_fields( + schema, self.fragments, self.variable_values, root_type, operation + ) + ) + + incremental_publisher = self.incremental_publisher + new_defer_map = add_new_deferred_fragments( + incremental_publisher, new_defer_usages, initial_result_record + ) + + path: Path | None = None + + new_deferred_grouped_field_set_records = add_new_deferred_grouped_field_sets( + incremental_publisher, + new_grouped_field_set_details, + new_defer_map, + path, ) root_value = self.root_value @@ -551,13 +346,22 @@ def execute_operation(self) -> AwaitableOrValue[dict[str, Any]]: self.execute_fields_serially if operation.operation == OperationType.MUTATION else self.execute_fields - )(root_type, root_value, None, grouped_field_set) # type: ignore + )( + root_type, + root_value, + path, + grouped_field_set, + initial_result_record, + new_defer_map, + ) - for patch in patches: - label, patch_grouped_filed_set = patch - self.execute_deferred_fragment( - root_type, root_value, patch_grouped_filed_set, label, None - ) + self.execute_deferred_grouped_field_sets( + root_type, + root_value, + path, + new_deferred_grouped_field_set_records, + new_defer_map, + ) return result @@ -567,6 +371,8 @@ def execute_fields_serially( source_value: Any, path: Path | None, grouped_field_set: GroupedFieldSet, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[dict[str, Any]]: """Execute the given fields serially. @@ -581,7 +387,12 @@ def reducer( response_name, field_group = field_item field_path = Path(path, response_name, parent_type.name) result = self.execute_field( - parent_type, source_value, field_group, field_path + parent_type, + source_value, + field_group, + field_path, + incremental_data_record, + defer_map, ) if result is Undefined: return results @@ -607,7 +418,8 @@ def execute_fields( source_value: Any, path: Path | None, grouped_field_set: GroupedFieldSet, - incremental_data_record: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[dict[str, Any]]: """Execute the given fields concurrently. @@ -626,6 +438,7 @@ def execute_fields( field_group, field_path, incremental_data_record, + defer_map, ) if result is not Undefined: results[response_name] = result @@ -662,7 +475,8 @@ def execute_field( source: Any, field_group: FieldGroup, path: Path, - incremental_data_record: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[Any]: """Resolve the field on the given source object. @@ -672,7 +486,7 @@ def execute_field( calling its resolve function, then calls complete_value to await coroutine objects, serialize scalars, or execute the sub-selection-set for objects. """ - field_name = field_group[0].name.value + field_name = field_group.fields[0].node.name.value field_def = self.schema.get_field(parent_type, field_name) if not field_def: return Undefined @@ -690,7 +504,9 @@ def execute_field( try: # Build a dictionary of arguments from the field.arguments AST, using the # variables scope to fulfill any variable references. - args = get_argument_values(field_def, field_group[0], self.variable_values) + args = get_argument_values( + field_def, field_group.fields[0].node, self.variable_values + ) # Note that contrary to the JavaScript implementation, we pass the context # value as part of the resolve info. @@ -704,10 +520,17 @@ def execute_field( path, result, incremental_data_record, + defer_map, ) completed = self.complete_value( - return_type, field_group, info, path, result, incremental_data_record + return_type, + field_group, + info, + path, + result, + incremental_data_record, + defer_map, ) if self.is_awaitable(completed): # noinspection PyShadowingNames @@ -754,8 +577,8 @@ def build_resolve_info( # The resolve function's first argument is a collection of information about # the current execution state. return GraphQLResolveInfo( - field_group[0].name.value, - field_group, + field_group.fields[0].node.name.value, + field_group.to_nodes(), field_def.type, parent_type, path, @@ -774,23 +597,19 @@ def handle_field_error( return_type: GraphQLOutputType, field_group: FieldGroup, path: Path, - incremental_data_record: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, ) -> None: """Handle error properly according to the field type.""" - error = located_error(raw_error, field_group, path.as_list()) + error = located_error(raw_error, field_group.to_nodes(), path.as_list()) # If the field type is non-nullable, then it is resolved without any protection # from errors, however it still properly locates the error. if is_non_null_type(return_type): raise error - errors = ( - incremental_data_record.errors if incremental_data_record else self.errors - ) - # Otherwise, error protection is applied, logging the error and resolving a # null value for this field if one is encountered. - errors.append(error) + self.incremental_publisher.add_field_error(incremental_data_record, error) def complete_value( self, @@ -799,7 +618,8 @@ def complete_value( info: GraphQLResolveInfo, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[Any]: """Complete a value. @@ -837,6 +657,7 @@ def complete_value( path, result, incremental_data_record, + defer_map, ) if completed is None: msg = ( @@ -853,7 +674,13 @@ def complete_value( # If field type is List, complete each item in the list with inner type if is_list_type(return_type): return self.complete_list_value( - return_type, field_group, info, path, result, incremental_data_record + return_type, + field_group, + info, + path, + result, + incremental_data_record, + defer_map, ) # If field type is a leaf type, Scalar or Enum, serialize to a valid value, @@ -865,13 +692,25 @@ def complete_value( # Object type and complete for that type. if is_abstract_type(return_type): return self.complete_abstract_value( - return_type, field_group, info, path, result, incremental_data_record + return_type, + field_group, + info, + path, + result, + incremental_data_record, + defer_map, ) # If field type is Object, execute and complete all sub-selections. if is_object_type(return_type): return self.complete_object_value( - return_type, field_group, info, path, result, incremental_data_record + return_type, + field_group, + info, + path, + result, + incremental_data_record, + defer_map, ) # Not reachable. All possible output types have been considered. @@ -888,7 +727,8 @@ async def complete_awaitable_value( info: GraphQLResolveInfo, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> Any: """Complete an awaitable value.""" try: @@ -900,6 +740,7 @@ async def complete_awaitable_value( path, resolved, incremental_data_record, + defer_map, ) if self.is_awaitable(completed): completed = await completed @@ -911,12 +752,12 @@ async def complete_awaitable_value( completed = None return completed - def get_stream_values( + def get_stream_usage( self, field_group: FieldGroup, path: Path - ) -> StreamArguments | None: - """Get stream values. + ) -> StreamUsage | None: + """Get stream usage. - Returns an object containing the `@stream` arguments if a field should be + Returns an object containing info for streaming if a field should be streamed based on the experimental flag, stream directive present and not disabled by the "if" argument. """ @@ -924,10 +765,14 @@ def get_stream_values( if isinstance(path.key, int): return None + stream_usage = self._stream_usages.get(field_group) + if stream_usage is not None: + return stream_usage # pragma: no cover + # validation only allows equivalent streams on multiple fields, so it is # safe to only check the first field_node for the stream directive stream = get_directive_values( - GraphQLStreamDirective, field_group[0], self.variable_values + GraphQLStreamDirective, field_group.fields[0].node, self.variable_values ) if not stream or stream.get("if") is False: @@ -945,8 +790,21 @@ def get_stream_values( ) raise TypeError(msg) - label = stream.get("label") - return StreamArguments(initial_count=initial_count, label=label) + streamed_field_group = FieldGroup( + [ + FieldDetails(field_details.node, None) + for field_details in field_group.fields + ], + NON_DEFERRED_TARGET_SET, + ) + + stream_usage = StreamUsage( + stream.get("label"), stream["initialCount"], streamed_field_group + ) + + self._stream_usages[field_group] = stream_usage + + return stream_usage async def complete_async_iterator_value( self, @@ -955,37 +813,40 @@ async def complete_async_iterator_value( info: GraphQLResolveInfo, path: Path, async_iterator: AsyncIterator[Any], - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> list[Any]: """Complete an async iterator. Complete an async iterator value by completing the result and calling recursively until all the results are completed. """ - stream = self.get_stream_values(field_group, path) + stream_usage = self.get_stream_usage(field_group, path) complete_list_item_value = self.complete_list_item_value awaitable_indices: list[int] = [] append_awaitable = awaitable_indices.append completed_results: list[Any] = [] index = 0 while True: - if ( - stream - and isinstance(stream.initial_count, int) - and index >= stream.initial_count - ): + if stream_usage and index >= stream_usage.initial_count: + try: + early_return = async_iterator.aclose # type: ignore + except AttributeError: + early_return = None + stream_record = StreamRecord(path, stream_usage.label, early_return) + with suppress_timeout_error: await wait_for( shield( self.execute_stream_async_iterator( index, async_iterator, - field_group, + stream_usage.field_group, info, item_type, path, - stream.label, incremental_data_record, + stream_record, ) ), timeout=ASYNC_DELAY, @@ -1000,7 +861,7 @@ async def complete_async_iterator_value( break except Exception as raw_error: raise located_error( - raw_error, field_group, path.as_list() + raw_error, field_group.to_nodes(), path.as_list() ) from raw_error if complete_list_item_value( value, @@ -1010,6 +871,7 @@ async def complete_async_iterator_value( info, item_path, incremental_data_record, + defer_map, ): append_awaitable(index) @@ -1039,7 +901,8 @@ def complete_list_value( info: GraphQLResolveInfo, path: Path, result: AsyncIterable[Any] | Iterable[Any], - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[list[Any]]: """Complete a list value. @@ -1057,6 +920,7 @@ def complete_list_value( path, async_iterator, incremental_data_record, + defer_map, ) if not is_iterable(result): @@ -1066,35 +930,34 @@ def complete_list_value( ) raise GraphQLError(msg) - stream = self.get_stream_values(field_group, path) + stream_usage = self.get_stream_usage(field_group, path) # This is specified as a simple map, however we're optimizing the path where # the list contains no coroutine objects by avoiding creating another coroutine # object. complete_list_item_value = self.complete_list_item_value + current_parents = incremental_data_record awaitable_indices: list[int] = [] append_awaitable = awaitable_indices.append - previous_incremental_data_record = incremental_data_record completed_results: list[Any] = [] + stream_record: StreamRecord | None = None for index, item in enumerate(result): # No need to modify the info object containing the path, since from here on # it is not ever accessed by resolver functions. item_path = path.add_key(index, None) - if ( - stream - and isinstance(stream.initial_count, int) - and index >= stream.initial_count - ): - previous_incremental_data_record = self.execute_stream_field( + if stream_usage and index >= stream_usage.initial_count: + if stream_record is None: + stream_record = StreamRecord(path, stream_usage.label) + current_parents = self.execute_stream_field( path, item_path, item, - field_group, + stream_usage.field_group, info, item_type, - stream.label, - previous_incremental_data_record, + current_parents, + stream_record, ) continue @@ -1106,9 +969,15 @@ def complete_list_value( info, item_path, incremental_data_record, + defer_map, ): append_awaitable(index) + if stream_record is not None: + self.incremental_publisher.set_is_final_record( + cast("StreamItemsRecord", current_parents) + ) + if not awaitable_indices: return completed_results @@ -1138,7 +1007,8 @@ def complete_list_item_value( field_group: FieldGroup, info: GraphQLResolveInfo, item_path: Path, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> bool: """Complete a list item value by adding it to the completed results. @@ -1155,6 +1025,7 @@ def complete_list_item_value( item_path, item, incremental_data_record, + defer_map, ) ) return True @@ -1167,6 +1038,7 @@ def complete_list_item_value( item_path, item, incremental_data_record, + defer_map, ) if is_awaitable(completed_item): @@ -1229,7 +1101,8 @@ def complete_abstract_value( info: GraphQLResolveInfo, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[Any]: """Complete an abstract value. @@ -1240,7 +1113,7 @@ def complete_abstract_value( runtime_type = resolve_type_fn(result, info, return_type) if self.is_awaitable(runtime_type): - runtime_type = cast(Awaitable, runtime_type) + runtime_type = cast("Awaitable", runtime_type) async def await_complete_object_value() -> Any: value = self.complete_object_value( @@ -1256,13 +1129,14 @@ async def await_complete_object_value() -> Any: path, result, incremental_data_record, + defer_map, ) if self.is_awaitable(value): return await value # type: ignore return value # pragma: no cover return await_complete_object_value() - runtime_type = cast(Optional[str], runtime_type) + runtime_type = cast("Optional[str]", runtime_type) return self.complete_object_value( self.ensure_valid_runtime_type( @@ -1273,6 +1147,7 @@ async def await_complete_object_value() -> Any: path, result, incremental_data_record, + defer_map, ) def ensure_valid_runtime_type( @@ -1293,7 +1168,7 @@ def ensure_valid_runtime_type( " a 'resolve_type' function or each possible type should provide" " an 'is_type_of' function." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) if is_object_type(runtime_type_name): # pragma: no cover msg = ( @@ -1309,7 +1184,7 @@ def ensure_valid_runtime_type( f" for field '{info.parent_type.name}.{info.field_name}' with value" f" {inspect(result)}, received '{inspect(runtime_type_name)}'." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) runtime_type = self.schema.get_type(runtime_type_name) @@ -1318,21 +1193,21 @@ def ensure_valid_runtime_type( f"Abstract type '{return_type.name}' was resolved to a type" f" '{runtime_type_name}' that does not exist inside the schema." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) if not is_object_type(runtime_type): msg = ( f"Abstract type '{return_type.name}' was resolved" f" to a non-object type '{runtime_type_name}'." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) if not self.schema.is_sub_type(return_type, runtime_type): msg = ( f"Runtime Object type '{runtime_type.name}' is not a possible" f" type for '{return_type.name}'." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) # noinspection PyTypeChecker return runtime_type @@ -1344,7 +1219,8 @@ def complete_object_value( info: GraphQLResolveInfo, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[dict[str, Any]]: """Complete an Object value by executing all sub-selections.""" # If there is an `is_type_of()` predicate function, call it with the current @@ -1361,7 +1237,12 @@ async def execute_subfields_async() -> dict[str, Any]: return_type, result, field_group ) return self.collect_and_execute_subfields( - return_type, field_group, path, result, incremental_data_record + return_type, + field_group, + path, + result, + incremental_data_record, + defer_map, ) # type: ignore return execute_subfields_async() @@ -1370,7 +1251,7 @@ async def execute_subfields_async() -> dict[str, Any]: raise invalid_return_type_error(return_type, result, field_group) return self.collect_and_execute_subfields( - return_type, field_group, path, result, incremental_data_record + return_type, field_group, path, result, incremental_data_record, defer_map ) def collect_and_execute_subfields( @@ -1379,33 +1260,48 @@ def collect_and_execute_subfields( field_group: FieldGroup, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[dict[str, Any]]: """Collect sub-fields to execute to complete this value.""" - sub_grouped_field_set, sub_patches = self.collect_subfields( - return_type, field_group + grouped_field_set, new_grouped_field_set_details, new_defer_usages = ( + self.collect_subfields(return_type, field_group) + ) + + incremental_publisher = self.incremental_publisher + new_defer_map = add_new_deferred_fragments( + incremental_publisher, + new_defer_usages, + incremental_data_record, + defer_map, + path, + ) + new_deferred_grouped_field_set_records = add_new_deferred_grouped_field_sets( + incremental_publisher, new_grouped_field_set_details, new_defer_map, path ) sub_fields = self.execute_fields( - return_type, result, path, sub_grouped_field_set, incremental_data_record + return_type, + result, + path, + grouped_field_set, + incremental_data_record, + new_defer_map, ) - for sub_patch in sub_patches: - label, sub_patch_grouped_field_set = sub_patch - self.execute_deferred_fragment( - return_type, - result, - sub_patch_grouped_field_set, - label, - path, - incremental_data_record, - ) + self.execute_deferred_grouped_field_sets( + return_type, + result, + path, + new_deferred_grouped_field_set_records, + new_defer_map, + ) return sub_fields def collect_subfields( self, return_type: GraphQLObjectType, field_group: FieldGroup - ) -> FieldsAndPatches: + ) -> CollectFieldsResult: """Collect subfields. A cached collection of relevant subfields with regard to the return type is @@ -1462,64 +1358,98 @@ async def callback(payload: Any) -> ExecutionResult: # typecast to ExecutionResult, not possible to return # ExperimentalIncrementalExecutionResults when operation is 'subscription'. return ( - await cast(Awaitable[ExecutionResult], result) + await cast("Awaitable[ExecutionResult]", result) if self.is_awaitable(result) - else cast(ExecutionResult, result) + else cast("ExecutionResult", result) ) return map_async_iterable(result_or_stream, callback) - def execute_deferred_fragment( + def execute_deferred_grouped_field_sets( self, parent_type: GraphQLObjectType, source_value: Any, - fields: GroupedFieldSet, - label: str | None = None, - path: Path | None = None, - parent_context: IncrementalDataRecord | None = None, + path: Path | None, + new_deferred_grouped_field_set_records: Sequence[DeferredGroupedFieldSetRecord], + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> None: - """Execute deferred fragment.""" + """Execute deferred grouped field sets.""" + for deferred_grouped_field_set_record in new_deferred_grouped_field_set_records: + if deferred_grouped_field_set_record.should_initiate_defer: + + async def execute_deferred_grouped_field_set( + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + ) -> None: + self.execute_deferred_grouped_field_set( + parent_type, + source_value, + path, + deferred_grouped_field_set_record, + defer_map, + ) + + self.add_task( + execute_deferred_grouped_field_set( + deferred_grouped_field_set_record + ) + ) + + else: + self.execute_deferred_grouped_field_set( + parent_type, + source_value, + path, + deferred_grouped_field_set_record, + defer_map, + ) + + def execute_deferred_grouped_field_set( + self, + parent_type: GraphQLObjectType, + source_value: Any, + path: Path | None, + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], + ) -> None: + """Execute deferred grouped field set.""" incremental_publisher = self.incremental_publisher - incremental_data_record = ( - incremental_publisher.prepare_new_deferred_fragment_record( - label, path, parent_context - ) - ) try: - awaitable_or_data = self.execute_fields( - parent_type, source_value, path, fields, incremental_data_record + incremental_result = self.execute_fields( + parent_type, + source_value, + path, + deferred_grouped_field_set_record.grouped_field_set, + deferred_grouped_field_set_record, + defer_map, ) - if self.is_awaitable(awaitable_or_data): + if self.is_awaitable(incremental_result): + incremental_result = cast("Awaitable", incremental_result) - async def await_data() -> None: + async def await_incremental_result() -> None: try: - data = await awaitable_or_data # type: ignore + result = await incremental_result except GraphQLError as error: - incremental_publisher.add_field_error( - incremental_data_record, error - ) - incremental_publisher.complete_deferred_fragment_record( - incremental_data_record, None + incremental_publisher.mark_errored_deferred_grouped_field_set( + deferred_grouped_field_set_record, error ) else: - incremental_publisher.complete_deferred_fragment_record( - incremental_data_record, data + incremental_publisher.complete_deferred_grouped_field_set( + deferred_grouped_field_set_record, result ) - self.add_task(await_data()) + self.add_task(await_incremental_result()) else: - incremental_publisher.complete_deferred_fragment_record( - incremental_data_record, - awaitable_or_data, # type: ignore + incremental_publisher.complete_deferred_grouped_field_set( + deferred_grouped_field_set_record, + incremental_result, # type: ignore ) + except GraphQLError as error: - incremental_publisher.add_field_error(incremental_data_record, error) - incremental_publisher.complete_deferred_fragment_record( - incremental_data_record, None + incremental_publisher.mark_errored_deferred_grouped_field_set( + deferred_grouped_field_set_record, error ) - awaitable_or_data = None def execute_stream_field( self, @@ -1529,14 +1459,15 @@ def execute_stream_field( field_group: FieldGroup, info: GraphQLResolveInfo, item_type: GraphQLOutputType, - label: str | None = None, - parent_context: IncrementalDataRecord | None = None, - ) -> IncrementalDataRecord: + incremental_data_record: IncrementalDataRecord, + stream_record: StreamRecord, + ) -> StreamItemsRecord: """Execute stream field.""" is_awaitable = self.is_awaitable incremental_publisher = self.incremental_publisher - incremental_data_record = incremental_publisher.prepare_new_stream_items_record( - label, item_path, parent_context + stream_items_record = StreamItemsRecord(stream_record, item_path) + incremental_publisher.report_new_stream_items_record( + stream_items_record, incremental_data_record ) completed_item: Any @@ -1550,23 +1481,21 @@ async def await_completed_awaitable_item() -> None: info, item_path, item, - incremental_data_record, + stream_items_record, + RefMap(), ) except GraphQLError as error: - incremental_publisher.add_field_error( - incremental_data_record, error - ) - incremental_publisher.filter(path, incremental_data_record) - incremental_publisher.complete_stream_items_record( - incremental_data_record, None + incremental_publisher.filter(path, stream_items_record) + incremental_publisher.mark_errored_stream_items_record( + stream_items_record, error ) else: incremental_publisher.complete_stream_items_record( - incremental_data_record, [value] + stream_items_record, [value] ) self.add_task(await_completed_awaitable_item()) - return incremental_data_record + return stream_items_record try: try: @@ -1576,7 +1505,8 @@ async def await_completed_awaitable_item() -> None: info, item_path, item, - incremental_data_record, + stream_items_record, + RefMap(), ) except Exception as raw_error: self.handle_field_error( @@ -1584,17 +1514,16 @@ async def await_completed_awaitable_item() -> None: item_type, field_group, item_path, - incremental_data_record, + stream_items_record, ) completed_item = None - incremental_publisher.filter(item_path, incremental_data_record) + incremental_publisher.filter(item_path, stream_items_record) except GraphQLError as error: - incremental_publisher.add_field_error(incremental_data_record, error) - incremental_publisher.filter(path, incremental_data_record) - incremental_publisher.complete_stream_items_record( - incremental_data_record, None + incremental_publisher.filter(path, stream_items_record) + incremental_publisher.mark_errored_stream_items_record( + stream_items_record, error ) - return incremental_data_record + return stream_items_record if is_awaitable(completed_item): @@ -1608,30 +1537,27 @@ async def await_completed_item() -> None: item_type, field_group, item_path, - incremental_data_record, + stream_items_record, ) - incremental_publisher.filter(item_path, incremental_data_record) + incremental_publisher.filter(item_path, stream_items_record) value = None except GraphQLError as error: # pragma: no cover - incremental_publisher.add_field_error( - incremental_data_record, error - ) - incremental_publisher.filter(path, incremental_data_record) - incremental_publisher.complete_stream_items_record( - incremental_data_record, None + incremental_publisher.filter(path, stream_items_record) + incremental_publisher.mark_errored_stream_items_record( + stream_items_record, error ) else: incremental_publisher.complete_stream_items_record( - incremental_data_record, [value] + stream_items_record, [value] ) self.add_task(await_completed_item()) - return incremental_data_record + return stream_items_record incremental_publisher.complete_stream_items_record( - incremental_data_record, [completed_item] + stream_items_record, [completed_item] ) - return incremental_data_record + return stream_items_record async def execute_stream_async_iterator_item( self, @@ -1639,8 +1565,7 @@ async def execute_stream_async_iterator_item( field_group: FieldGroup, info: GraphQLResolveInfo, item_type: GraphQLOutputType, - incremental_data_record: StreamItemsRecord, - path: Path, + stream_items_record: StreamItemsRecord, item_path: Path, ) -> Any: """Execute stream iterator item.""" @@ -1650,14 +1575,27 @@ async def execute_stream_async_iterator_item( item = await anext(async_iterator) except StopAsyncIteration as raw_error: self.incremental_publisher.set_is_completed_async_iterator( - incremental_data_record + stream_items_record ) raise StopAsyncIteration from raw_error except Exception as raw_error: - raise located_error(raw_error, field_group, path.as_list()) from raw_error + raise located_error( + raw_error, + field_group.to_nodes(), + stream_items_record.stream_record.path, + ) from raw_error + else: + if stream_items_record.stream_record.errors: + raise StopAsyncIteration # pragma: no cover try: completed_item = self.complete_value( - item_type, field_group, info, item_path, item, incremental_data_record + item_type, + field_group, + info, + item_path, + item, + stream_items_record, + RefMap(), ) return ( await completed_item @@ -1666,9 +1604,9 @@ async def execute_stream_async_iterator_item( ) except Exception as raw_error: self.handle_field_error( - raw_error, item_type, field_group, item_path, incremental_data_record + raw_error, item_type, field_group, item_path, stream_items_record ) - self.incremental_publisher.filter(item_path, incremental_data_record) + self.incremental_publisher.filter(item_path, stream_items_record) async def execute_stream_async_iterator( self, @@ -1678,21 +1616,19 @@ async def execute_stream_async_iterator( info: GraphQLResolveInfo, item_type: GraphQLOutputType, path: Path, - label: str | None = None, - parent_context: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, + stream_record: StreamRecord, ) -> None: """Execute stream iterator.""" incremental_publisher = self.incremental_publisher index = initial_index - previous_incremental_data_record = parent_context + current_incremental_data_record = incremental_data_record - done = False while True: item_path = Path(path, index, None) - incremental_data_record = ( - incremental_publisher.prepare_new_stream_items_record( - label, item_path, previous_incremental_data_record, async_iterator - ) + stream_items_record = StreamItemsRecord(stream_record, item_path) + incremental_publisher.report_new_stream_items_record( + stream_items_record, current_incremental_data_record ) try: @@ -1701,15 +1637,13 @@ async def execute_stream_async_iterator( field_group, info, item_type, - incremental_data_record, - path, + stream_items_record, item_path, ) except GraphQLError as error: - incremental_publisher.add_field_error(incremental_data_record, error) - incremental_publisher.filter(path, incremental_data_record) - incremental_publisher.complete_stream_items_record( - incremental_data_record, None + incremental_publisher.filter(path, stream_items_record) + incremental_publisher.mark_errored_stream_items_record( + stream_items_record, error ) if async_iterator: # pragma: no cover else with suppress_exceptions: @@ -1717,18 +1651,20 @@ async def execute_stream_async_iterator( # running generators cannot be closed since Python 3.8, # so we need to remember that this iterator is already canceled self._canceled_iterators.add(async_iterator) - break + return except StopAsyncIteration: done = True + completed_item = None + else: + done = False incremental_publisher.complete_stream_items_record( - incremental_data_record, - [completed_item], + stream_items_record, [completed_item] ) if done: break - previous_incremental_data_record = incremental_data_record + current_incremental_data_record = stream_items_record index += 1 def add_task(self, awaitable: Awaitable[Any]) -> None: @@ -1765,6 +1701,7 @@ def execute( middleware: Middleware | None = None, execution_context_class: type[ExecutionContext] | None = None, is_awaitable: Callable[[Any], bool] | None = None, + **custom_context_args: Any, ) -> AwaitableOrValue[ExecutionResult]: """Execute a GraphQL operation. @@ -1797,6 +1734,7 @@ def execute( middleware, execution_context_class, is_awaitable, + **custom_context_args, ) if isinstance(result, ExecutionResult): return result @@ -1825,6 +1763,7 @@ def experimental_execute_incrementally( middleware: Middleware | None = None, execution_context_class: type[ExecutionContext] | None = None, is_awaitable: Callable[[Any], bool] | None = None, + **custom_context_args: Any, ) -> AwaitableOrValue[ExecutionResult | ExperimentalIncrementalExecutionResults]: """Execute GraphQL operation incrementally (internal implementation). @@ -1853,6 +1792,7 @@ def experimental_execute_incrementally( subscribe_field_resolver, middleware, is_awaitable, + **custom_context_args, ) # Return early errors if execution context failed. @@ -1877,52 +1817,31 @@ def execute_impl( # Errors from sub-fields of a NonNull type may propagate to the top level, # at which point we still log the error and null the parent field, which # in this case is the entire response. - errors = context.errors incremental_publisher = context.incremental_publisher - build_response = context.build_response + initial_result_record = InitialResultRecord() try: - result = context.execute_operation() + data = context.execute_operation(initial_result_record) + if context.is_awaitable(data): - if context.is_awaitable(result): - # noinspection PyShadowingNames - async def await_result() -> Any: + async def await_response() -> ( + ExecutionResult | ExperimentalIncrementalExecutionResults + ): try: - initial_result = build_response( - await result, # type: ignore - errors, + return incremental_publisher.build_data_response( + initial_result_record, + await data, # type: ignore ) - incremental_publisher.publish_initial() - if incremental_publisher.has_next(): - return ExperimentalIncrementalExecutionResults( - initial_result=InitialIncrementalExecutionResult( - initial_result.data, - initial_result.errors, - has_next=True, - ), - subsequent_results=incremental_publisher.subscribe(), - ) except GraphQLError as error: - errors.append(error) - return build_response(None, errors) - return initial_result + return incremental_publisher.build_error_response( + initial_result_record, error + ) - return await_result() + return await_response() + + return incremental_publisher.build_data_response(initial_result_record, data) # type: ignore - initial_result = build_response(result, errors) # type: ignore - incremental_publisher.publish_initial() - if incremental_publisher.has_next(): - return ExperimentalIncrementalExecutionResults( - initial_result=InitialIncrementalExecutionResult( - initial_result.data, - initial_result.errors, - has_next=True, - ), - subsequent_results=incremental_publisher.subscribe(), - ) except GraphQLError as error: - errors.append(error) - return build_response(None, errors) - return initial_result + return incremental_publisher.build_error_response(initial_result_record, error) def assume_not_awaitable(_value: Any) -> bool: @@ -1978,11 +1897,11 @@ def execute_sync( result, ExperimentalIncrementalExecutionResults ): if default_is_awaitable(result): - ensure_future(cast(Awaitable[ExecutionResult], result)).cancel() + ensure_future(cast("Awaitable[ExecutionResult]", result)).cancel() msg = "GraphQL execution failed to complete synchronously." raise RuntimeError(msg) - return cast(ExecutionResult, result) + return cast("ExecutionResult", result) def invalid_return_type_error( @@ -1991,10 +1910,122 @@ def invalid_return_type_error( """Create a GraphQLError for an invalid return type.""" return GraphQLError( f"Expected value of type '{return_type.name}' but got: {inspect(result)}.", - field_group, + field_group.to_nodes(), ) +def add_new_deferred_fragments( + incremental_publisher: IncrementalPublisher, + new_defer_usages: Sequence[DeferUsage], + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord] | None = None, + path: Path | None = None, +) -> RefMap[DeferUsage, DeferredFragmentRecord]: + """Add new deferred fragments to the defer map. + + Instantiates new DeferredFragmentRecords for the given path within an + incremental data record, returning an updated map of DeferUsage + objects to DeferredFragmentRecords. + + Note: As defer directives may be used with operations returning lists, + a DeferUsage object may correspond to many DeferredFragmentRecords. + + DeferredFragmentRecord creation includes the following steps: + 1. The new DeferredFragmentRecord is instantiated at the given path. + 2. The parent result record is calculated from the given incremental data record. + 3. The IncrementalPublisher is notified that a new DeferredFragmentRecord + with the calculated parent has been added; the record will be released only + after the parent has completed. + """ + if not new_defer_usages: + # Given no DeferUsages, return the existing map, creating one if necessary. + return RefMap() if defer_map is None else defer_map + + # Create a copy of the old map. + new_defer_map = RefMap() if defer_map is None else RefMap(defer_map.items()) + + # For each new DeferUsage object: + for defer_usage in new_defer_usages: + ancestors = defer_usage.ancestors + parent_defer_usage = ancestors[0] if ancestors else None + + # If the parent target is defined, the parent target is a DeferUsage object + # and the parent result record is the DeferredFragmentRecord corresponding + # to that DeferUsage. + # If the parent target is not defined, the parent result record is either: + # - the InitialResultRecord, or + # - a StreamItemsRecord, as `@defer` may be nested under `@stream`. + parent = ( + cast( + "Union[InitialResultRecord, StreamItemsRecord]", incremental_data_record + ) + if parent_defer_usage is None + else deferred_fragment_record_from_defer_usage( + parent_defer_usage, new_defer_map + ) + ) + + # Instantiate the new record. + deferred_fragment_record = DeferredFragmentRecord(path, defer_usage.label) + + # Report the new record to the Incremental Publisher. + incremental_publisher.report_new_defer_fragment_record( + deferred_fragment_record, parent + ) + + # Update the map. + new_defer_map[defer_usage] = deferred_fragment_record + + return new_defer_map + + +def deferred_fragment_record_from_defer_usage( + defer_usage: DeferUsage, defer_map: RefMap[DeferUsage, DeferredFragmentRecord] +) -> DeferredFragmentRecord: + """Get the deferred fragment record mapped to the given defer usage.""" + return defer_map[defer_usage] + + +def add_new_deferred_grouped_field_sets( + incremental_publisher: IncrementalPublisher, + new_grouped_field_set_details: Mapping[DeferUsageSet, GroupedFieldSetDetails], + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], + path: Path | None = None, +) -> list[DeferredGroupedFieldSetRecord]: + """Add new deferred grouped field sets to the defer map.""" + new_deferred_grouped_field_set_records: list[DeferredGroupedFieldSetRecord] = [] + + for ( + new_grouped_field_set_defer_usages, + grouped_field_set_details, + ) in new_grouped_field_set_details.items(): + deferred_fragment_records = get_deferred_fragment_records( + new_grouped_field_set_defer_usages, defer_map + ) + deferred_grouped_field_set_record = DeferredGroupedFieldSetRecord( + deferred_fragment_records, + grouped_field_set_details.grouped_field_set, + grouped_field_set_details.should_initiate_defer, + path, + ) + incremental_publisher.report_new_deferred_grouped_filed_set_record( + deferred_grouped_field_set_record + ) + new_deferred_grouped_field_set_records.append(deferred_grouped_field_set_record) + + return new_deferred_grouped_field_set_records + + +def get_deferred_fragment_records( + defer_usages: DeferUsageSet, defer_map: RefMap[DeferUsage, DeferredFragmentRecord] +) -> list[DeferredFragmentRecord]: + """Get the deferred fragment records for the given defer usages.""" + return [ + deferred_fragment_record_from_defer_usage(defer_usage, defer_map) + for defer_usage in defer_usages + ] + + def get_typename(value: Any) -> str | None: """Get the ``__typename`` property of the given value.""" if isinstance(value, Mapping): @@ -2040,7 +2071,7 @@ def default_type_resolver( is_type_of_result = type_.is_type_of(value, info) if is_awaitable(is_type_of_result): - append_awaitable_results(cast(Awaitable, is_type_of_result)) + append_awaitable_results(cast("Awaitable", is_type_of_result)) append_awaitable_types(type_) elif is_type_of_result: return type_.name @@ -2094,6 +2125,7 @@ def subscribe( subscribe_field_resolver: GraphQLFieldResolver | None = None, execution_context_class: type[ExecutionContext] | None = None, middleware: MiddlewareManager | None = None, + **custom_context_args: Any, ) -> AwaitableOrValue[AsyncIterator[ExecutionResult] | ExecutionResult]: """Create a GraphQL subscription. @@ -2134,6 +2166,7 @@ def subscribe( type_resolver, subscribe_field_resolver, middleware=middleware, + **custom_context_args, ) # Return early errors if execution context failed. @@ -2169,6 +2202,7 @@ def create_source_event_stream( type_resolver: GraphQLTypeResolver | None = None, subscribe_field_resolver: GraphQLFieldResolver | None = None, execution_context_class: type[ExecutionContext] | None = None, + **custom_context_args: Any, ) -> AwaitableOrValue[AsyncIterable[Any] | ExecutionResult]: """Create source event stream @@ -2205,6 +2239,7 @@ def create_source_event_stream( field_resolver, type_resolver, subscribe_field_resolver, + **custom_context_args, ) # Return early errors if execution context failed. @@ -2224,7 +2259,7 @@ def create_source_event_stream_impl( return ExecutionResult(None, errors=[error]) if context.is_awaitable(event_stream): - awaitable_event_stream = cast(Awaitable, event_stream) + awaitable_event_stream = cast("Awaitable", event_stream) # noinspection PyShadowingNames async def await_event_stream() -> AsyncIterable[Any] | ExecutionResult: @@ -2257,12 +2292,12 @@ def execute_subscription( ).grouped_field_set first_root_field = next(iter(grouped_field_set.items())) response_name, field_group = first_root_field - field_name = field_group[0].name.value + field_name = field_group.fields[0].node.name.value field_def = schema.get_field(root_type, field_name) if not field_def: msg = f"The subscription field '{field_name}' is not defined." - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) path = Path(None, response_name, root_type.name) info = context.build_resolve_info(field_def, field_group, root_type, path) @@ -2273,7 +2308,9 @@ def execute_subscription( try: # Build a dictionary of arguments from the field.arguments AST, using the # variables scope to fulfill any variable references. - args = get_argument_values(field_def, field_group[0], context.variable_values) + args = get_argument_values( + field_def, field_group.fields[0].node, context.variable_values + ) # Call the `subscribe()` resolver or the default resolver to produce an # AsyncIterable yielding raw payloads. @@ -2286,14 +2323,16 @@ async def await_result() -> AsyncIterable[Any]: try: return assert_event_stream(await result) except Exception as error: - raise located_error(error, field_group, path.as_list()) from error + raise located_error( + error, field_group.to_nodes(), path.as_list() + ) from error return await_result() return assert_event_stream(result) except Exception as error: - raise located_error(error, field_group, path.as_list()) from error + raise located_error(error, field_group.to_nodes(), path.as_list()) from error def assert_event_stream(result: Any) -> AsyncIterable: diff --git a/src/graphql/execution/incremental_publisher.py b/src/graphql/execution/incremental_publisher.py index fb660e85..839f62d8 100644 --- a/src/graphql/execution/incremental_publisher.py +++ b/src/graphql/execution/incremental_publisher.py @@ -2,17 +2,17 @@ from __future__ import annotations -from asyncio import Event, ensure_future, gather +from asyncio import Event, ensure_future, gather, sleep from contextlib import suppress from typing import ( TYPE_CHECKING, Any, AsyncGenerator, - AsyncIterator, Awaitable, + Callable, Collection, + Iterator, NamedTuple, - Sequence, Union, ) @@ -21,23 +21,31 @@ except ImportError: # Python < 3.8 from typing_extensions import TypedDict +from ..pyutils import RefSet if TYPE_CHECKING: from ..error import GraphQLError, GraphQLFormattedError from ..pyutils import Path + from .collect_fields import GroupedFieldSet __all__ = [ "ASYNC_DELAY", "DeferredFragmentRecord", + "ExecutionResult", + "ExperimentalIncrementalExecutionResults", + "FormattedExecutionResult", "FormattedIncrementalDeferResult", "FormattedIncrementalResult", "FormattedIncrementalStreamResult", + "FormattedInitialIncrementalExecutionResult", "FormattedSubsequentIncrementalExecutionResult", "IncrementalDataRecord", "IncrementalDeferResult", "IncrementalPublisher", "IncrementalResult", "IncrementalStreamResult", + "InitialIncrementalExecutionResult", + "InitialResultRecord", "StreamItemsRecord", "SubsequentIncrementalExecutionResult", ] @@ -48,62 +56,263 @@ suppress_key_error = suppress(KeyError) -class FormattedIncrementalDeferResult(TypedDict, total=False): - """Formatted incremental deferred execution result""" +class FormattedPendingResult(TypedDict, total=False): + """Formatted pending execution result""" - data: dict[str, Any] | None - errors: list[GraphQLFormattedError] + id: str path: list[str | int] label: str + + +class PendingResult: + """Pending execution result""" + + id: str + path: list[str | int] + label: str | None + + __slots__ = "id", "label", "path" + + def __init__( + self, + id: str, # noqa: A002 + path: list[str | int], + label: str | None = None, + ) -> None: + self.id = id + self.path = path + self.label = label + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [f"id={self.id!r}, path={self.path!r}"] + if self.label: + args.append(f"label={self.label!r}") + return f"{name}({', '.join(args)})" + + @property + def formatted(self) -> FormattedPendingResult: + """Get pending result formatted according to the specification.""" + formatted: FormattedPendingResult = {"id": self.id, "path": self.path} + if self.label is not None: + formatted["label"] = self.label + return formatted + + def __eq__(self, other: object) -> bool: + if isinstance(other, dict): + return ( + other.get("id") == self.id + and (other.get("path") or None) == (self.path or None) + and (other.get("label") or None) == (self.label or None) + ) + + if isinstance(other, tuple): + size = len(other) + return 1 < size < 4 and (self.id, self.path, self.label)[:size] == other + return ( + isinstance(other, self.__class__) + and other.id == self.id + and other.path == self.path + and other.label == self.label + ) + + def __ne__(self, other: object) -> bool: + return not self == other + + +class FormattedCompletedResult(TypedDict, total=False): + """Formatted completed execution result""" + + id: str + errors: list[GraphQLFormattedError] + + +class CompletedResult: + """Completed execution result""" + + id: str + errors: list[GraphQLError] | None + + __slots__ = "errors", "id" + + def __init__( + self, + id: str, # noqa: A002 + errors: list[GraphQLError] | None = None, + ) -> None: + self.id = id + self.errors = errors + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [f"id={self.id!r}"] + if self.errors: + args.append(f"errors={self.errors!r}") + return f"{name}({', '.join(args)})" + + @property + def formatted(self) -> FormattedCompletedResult: + """Get completed result formatted according to the specification.""" + formatted: FormattedCompletedResult = {"id": self.id} + if self.errors is not None: + formatted["errors"] = [error.formatted for error in self.errors] + return formatted + + def __eq__(self, other: object) -> bool: + if isinstance(other, dict): + return other.get("id") == self.id and (other.get("errors") or None) == ( + self.errors or None + ) + if isinstance(other, tuple): + size = len(other) + return 1 < size < 3 and (self.id, self.errors)[:size] == other + return ( + isinstance(other, self.__class__) + and other.id == self.id + and other.errors == self.errors + ) + + def __ne__(self, other: object) -> bool: + return not self == other + + +class IncrementalUpdate(NamedTuple): + """Incremental update""" + + pending: list[PendingResult] + incremental: list[IncrementalResult] + completed: list[CompletedResult] + + +class FormattedExecutionResult(TypedDict, total=False): + """Formatted execution result""" + + data: dict[str, Any] | None + errors: list[GraphQLFormattedError] extensions: dict[str, Any] -class IncrementalDeferResult: - """Incremental deferred execution result""" +class ExecutionResult: + """The result of GraphQL execution. + + - ``data`` is the result of a successful execution of the query. + - ``errors`` is included when any errors occurred as a non-empty list. + - ``extensions`` is reserved for adding non-standard properties. + """ + + __slots__ = "data", "errors", "extensions" data: dict[str, Any] | None errors: list[GraphQLError] | None - path: list[str | int] | None - label: str | None extensions: dict[str, Any] | None - __slots__ = "data", "errors", "path", "label", "extensions" + def __init__( + self, + data: dict[str, Any] | None = None, + errors: list[GraphQLError] | None = None, + extensions: dict[str, Any] | None = None, + ) -> None: + self.data = data + self.errors = errors + self.extensions = extensions + + def __repr__(self) -> str: + name = self.__class__.__name__ + ext = "" if self.extensions is None else f", extensions={self.extensions!r}" + return f"{name}(data={self.data!r}, errors={self.errors!r}{ext})" + + def __iter__(self) -> Iterator[Any]: + return iter((self.data, self.errors)) + + @property + def formatted(self) -> FormattedExecutionResult: + """Get execution result formatted according to the specification.""" + formatted: FormattedExecutionResult = {"data": self.data} + if self.errors is not None: + formatted["errors"] = [error.formatted for error in self.errors] + if self.extensions is not None: + formatted["extensions"] = self.extensions + return formatted + + def __eq__(self, other: object) -> bool: + if isinstance(other, dict): + return ( + (other.get("data") == self.data) + and (other.get("errors") or None) == (self.errors or None) + and (other.get("extensions") or None) == (self.extensions or None) + ) + if isinstance(other, tuple): + if len(other) == 2: + return other == (self.data, self.errors) + return other == (self.data, self.errors, self.extensions) + return ( + isinstance(other, self.__class__) + and other.data == self.data + and other.errors == self.errors + and other.extensions == self.extensions + ) + + def __ne__(self, other: object) -> bool: + return not self == other + + +class FormattedInitialIncrementalExecutionResult(TypedDict, total=False): + """Formatted initial incremental execution result""" + + data: dict[str, Any] | None + errors: list[GraphQLFormattedError] + pending: list[FormattedPendingResult] + hasNext: bool + incremental: list[FormattedIncrementalResult] + extensions: dict[str, Any] + + +class InitialIncrementalExecutionResult: + """Initial incremental execution result.""" + + data: dict[str, Any] | None + errors: list[GraphQLError] | None + pending: list[PendingResult] + has_next: bool + extensions: dict[str, Any] | None + + __slots__ = "data", "errors", "extensions", "has_next", "pending" def __init__( self, data: dict[str, Any] | None = None, errors: list[GraphQLError] | None = None, - path: list[str | int] | None = None, - label: str | None = None, + pending: list[PendingResult] | None = None, + has_next: bool = False, extensions: dict[str, Any] | None = None, ) -> None: self.data = data self.errors = errors - self.path = path - self.label = label + self.pending = pending or [] + self.has_next = has_next self.extensions = extensions def __repr__(self) -> str: name = self.__class__.__name__ - args: list[str] = [f"data={self.data!r}, errors={self.errors!r}"] - if self.path: - args.append(f"path={self.path!r}") - if self.label: - args.append(f"label={self.label!r}") + args: list[str] = [f"data={self.data!r}"] + if self.errors: + args.append(f"errors={self.errors!r}") + if self.pending: + args.append(f"pending={self.pending!r}") + if self.has_next: + args.append("has_next") if self.extensions: - args.append(f"extensions={self.extensions}") + args.append(f"extensions={self.extensions!r}") return f"{name}({', '.join(args)})" @property - def formatted(self) -> FormattedIncrementalDeferResult: + def formatted(self) -> FormattedInitialIncrementalExecutionResult: """Get execution result formatted according to the specification.""" - formatted: FormattedIncrementalDeferResult = {"data": self.data} + formatted: FormattedInitialIncrementalExecutionResult = {"data": self.data} if self.errors is not None: formatted["errors"] = [error.formatted for error in self.errors] - if self.path is not None: - formatted["path"] = self.path - if self.label is not None: - formatted["label"] = self.label + formatted["pending"] = [pending.formatted for pending in self.pending] + formatted["hasNext"] = self.has_next if self.extensions is not None: formatted["extensions"] = self.extensions return formatted @@ -112,18 +321,119 @@ def __eq__(self, other: object) -> bool: if isinstance(other, dict): return ( other.get("data") == self.data - and other.get("errors") == self.errors - and ("path" not in other or other["path"] == self.path) - and ("label" not in other or other["label"] == self.label) + and (other.get("errors") or None) == (self.errors or None) + and (other.get("pending") or None) == (self.pending or None) + and (other.get("hasNext") or None) == (self.has_next or None) + and (other.get("extensions") or None) == (self.extensions or None) + ) + if isinstance(other, tuple): + size = len(other) + return ( + 1 < size < 6 and ( - "extensions" not in other or other["extensions"] == self.extensions - ) + self.data, + self.errors, + self.pending, + self.has_next, + self.extensions, + )[:size] + == other + ) + return ( + isinstance(other, self.__class__) + and other.data == self.data + and other.errors == self.errors + and other.pending == self.pending + and other.has_next == self.has_next + and other.extensions == self.extensions + ) + + def __ne__(self, other: object) -> bool: + return not self == other + + +class ExperimentalIncrementalExecutionResults(NamedTuple): + """Execution results when retrieved incrementally.""" + + initial_result: InitialIncrementalExecutionResult + subsequent_results: AsyncGenerator[SubsequentIncrementalExecutionResult, None] + + +class FormattedIncrementalDeferResult(TypedDict, total=False): + """Formatted incremental deferred execution result""" + + data: dict[str, Any] + id: str + subPath: list[str | int] + errors: list[GraphQLFormattedError] + extensions: dict[str, Any] + + +class IncrementalDeferResult: + """Incremental deferred execution result""" + + data: dict[str, Any] + id: str + sub_path: list[str | int] | None + errors: list[GraphQLError] | None + extensions: dict[str, Any] | None + + __slots__ = "data", "errors", "extensions", "id", "sub_path" + + def __init__( + self, + data: dict[str, Any], + id: str, # noqa: A002 + sub_path: list[str | int] | None = None, + errors: list[GraphQLError] | None = None, + extensions: dict[str, Any] | None = None, + ) -> None: + self.data = data + self.id = id + self.sub_path = sub_path + self.errors = errors + self.extensions = extensions + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [f"data={self.data!r}, id={self.id!r}"] + if self.sub_path is not None: + args.append(f"sub_path={self.sub_path!r}") + if self.errors is not None: + args.append(f"errors={self.errors!r}") + if self.extensions is not None: + args.append(f"extensions={self.extensions!r}") + return f"{name}({', '.join(args)})" + + @property + def formatted(self) -> FormattedIncrementalDeferResult: + """Get execution result formatted according to the specification.""" + formatted: FormattedIncrementalDeferResult = { + "data": self.data, + "id": self.id, + } + if self.sub_path is not None: + formatted["subPath"] = self.sub_path + if self.errors is not None: + formatted["errors"] = [error.formatted for error in self.errors] + if self.extensions is not None: + formatted["extensions"] = self.extensions + return formatted + + def __eq__(self, other: object) -> bool: + if isinstance(other, dict): + return ( + other.get("data") == self.data + and other.get("id") == self.id + and (other.get("subPath") or None) == (self.sub_path or None) + and (other.get("errors") or None) == (self.errors or None) + and (other.get("extensions") or None) == (self.extensions or None) ) if isinstance(other, tuple): size = len(other) return ( 1 < size < 6 - and (self.data, self.errors, self.path, self.label, self.extensions)[ + and (self.data, self.id, self.sub_path, self.errors, self.extensions)[ :size ] == other @@ -131,9 +441,9 @@ def __eq__(self, other: object) -> bool: return ( isinstance(other, self.__class__) and other.data == self.data + and other.id == self.id + and other.sub_path == self.sub_path and other.errors == self.errors - and other.path == self.path - and other.label == self.label and other.extensions == self.extensions ) @@ -144,59 +454,60 @@ def __ne__(self, other: object) -> bool: class FormattedIncrementalStreamResult(TypedDict, total=False): """Formatted incremental stream execution result""" - items: list[Any] | None + items: list[Any] + id: str + subPath: list[str | int] errors: list[GraphQLFormattedError] - path: list[str | int] - label: str extensions: dict[str, Any] class IncrementalStreamResult: """Incremental streamed execution result""" - items: list[Any] | None + items: list[Any] + id: str + sub_path: list[str | int] | None errors: list[GraphQLError] | None - path: list[str | int] | None - label: str | None extensions: dict[str, Any] | None - __slots__ = "items", "errors", "path", "label", "extensions" + __slots__ = "errors", "extensions", "id", "items", "label", "sub_path" def __init__( self, - items: list[Any] | None = None, + items: list[Any], + id: str, # noqa: A002 + sub_path: list[str | int] | None = None, errors: list[GraphQLError] | None = None, - path: list[str | int] | None = None, - label: str | None = None, extensions: dict[str, Any] | None = None, ) -> None: self.items = items + self.id = id + self.sub_path = sub_path self.errors = errors - self.path = path - self.label = label self.extensions = extensions def __repr__(self) -> str: name = self.__class__.__name__ - args: list[str] = [f"items={self.items!r}, errors={self.errors!r}"] - if self.path: - args.append(f"path={self.path!r}") - if self.label: - args.append(f"label={self.label!r}") - if self.extensions: - args.append(f"extensions={self.extensions}") + args: list[str] = [f"items={self.items!r}, id={self.id!r}"] + if self.sub_path is not None: + args.append(f"sub_path={self.sub_path!r}") + if self.errors is not None: + args.append(f"errors={self.errors!r}") + if self.extensions is not None: + args.append(f"extensions={self.extensions!r}") return f"{name}({', '.join(args)})" @property def formatted(self) -> FormattedIncrementalStreamResult: """Get execution result formatted according to the specification.""" - formatted: FormattedIncrementalStreamResult = {"items": self.items} + formatted: FormattedIncrementalStreamResult = { + "items": self.items, + "id": self.id, + } + if self.sub_path is not None: + formatted["subPath"] = self.sub_path if self.errors is not None: formatted["errors"] = [error.formatted for error in self.errors] - if self.path is not None: - formatted["path"] = self.path - if self.label is not None: - formatted["label"] = self.label if self.extensions is not None: formatted["extensions"] = self.extensions return formatted @@ -205,18 +516,16 @@ def __eq__(self, other: object) -> bool: if isinstance(other, dict): return ( other.get("items") == self.items - and other.get("errors") == self.errors - and ("path" not in other or other["path"] == self.path) - and ("label" not in other or other["label"] == self.label) - and ( - "extensions" not in other or other["extensions"] == self.extensions - ) + and other.get("id") == self.id + and (other.get("subPath", None) == (self.sub_path or None)) + and (other.get("errors") or None) == (self.errors or None) + and (other.get("extensions", None) == (self.extensions or None)) ) if isinstance(other, tuple): size = len(other) return ( 1 < size < 6 - and (self.items, self.errors, self.path, self.label, self.extensions)[ + and (self.items, self.id, self.sub_path, self.errors, self.extensions)[ :size ] == other @@ -224,9 +533,9 @@ def __eq__(self, other: object) -> bool: return ( isinstance(other, self.__class__) and other.items == self.items + and other.id == self.id + and other.sub_path == self.sub_path and other.errors == self.errors - and other.path == self.path - and other.label == self.label and other.extensions == self.extensions ) @@ -244,52 +553,64 @@ def __ne__(self, other: object) -> bool: class FormattedSubsequentIncrementalExecutionResult(TypedDict, total=False): """Formatted subsequent incremental execution result""" - incremental: list[FormattedIncrementalResult] hasNext: bool + pending: list[FormattedPendingResult] + incremental: list[FormattedIncrementalResult] + completed: list[FormattedCompletedResult] extensions: dict[str, Any] class SubsequentIncrementalExecutionResult: - """Subsequent incremental execution result. + """Subsequent incremental execution result.""" - - ``has_next`` is True if a future payload is expected. - - ``incremental`` is a list of the results from defer/stream directives. - """ - - __slots__ = "has_next", "incremental", "extensions" + __slots__ = "completed", "extensions", "has_next", "incremental", "pending" - incremental: Sequence[IncrementalResult] | None has_next: bool + pending: list[PendingResult] | None + incremental: list[IncrementalResult] | None + completed: list[CompletedResult] | None extensions: dict[str, Any] | None def __init__( self, - incremental: Sequence[IncrementalResult] | None = None, has_next: bool = False, + pending: list[PendingResult] | None = None, + incremental: list[IncrementalResult] | None = None, + completed: list[CompletedResult] | None = None, extensions: dict[str, Any] | None = None, ) -> None: - self.incremental = incremental self.has_next = has_next + self.pending = pending or [] + self.incremental = incremental + self.completed = completed self.extensions = extensions def __repr__(self) -> str: name = self.__class__.__name__ args: list[str] = [] - if self.incremental: - args.append(f"incremental[{len(self.incremental)}]") if self.has_next: args.append("has_next") + if self.pending: + args.append(f"pending[{len(self.pending)}]") + if self.incremental: + args.append(f"incremental[{len(self.incremental)}]") + if self.completed: + args.append(f"completed[{len(self.completed)}]") if self.extensions: - args.append(f"extensions={self.extensions}") + args.append(f"extensions={self.extensions!r}") return f"{name}({', '.join(args)})" @property def formatted(self) -> FormattedSubsequentIncrementalExecutionResult: """Get execution result formatted according to the specification.""" formatted: FormattedSubsequentIncrementalExecutionResult = {} + formatted["hasNext"] = self.has_next + if self.pending: + formatted["pending"] = [result.formatted for result in self.pending] if self.incremental: formatted["incremental"] = [result.formatted for result in self.incremental] - formatted["hasNext"] = self.has_next + if self.completed: + formatted["completed"] = [result.formatted for result in self.completed] if self.extensions is not None: formatted["extensions"] = self.extensions return formatted @@ -297,27 +618,31 @@ def formatted(self) -> FormattedSubsequentIncrementalExecutionResult: def __eq__(self, other: object) -> bool: if isinstance(other, dict): return ( - ("incremental" not in other or other["incremental"] == self.incremental) - and ("hasNext" in other and other["hasNext"] == self.has_next) - and ( - "extensions" not in other or other["extensions"] == self.extensions - ) + (other.get("hasNext") or None) == (self.has_next or None) + and (other.get("pending") or None) == (self.pending or None) + and (other.get("incremental") or None) == (self.incremental or None) + and (other.get("completed") or None) == (self.completed or None) + and (other.get("extensions") or None) == (self.extensions or None) ) if isinstance(other, tuple): size = len(other) return ( - 1 < size < 4 + 1 < size < 6 and ( - self.incremental, self.has_next, + self.pending, + self.incremental, + self.completed, self.extensions, )[:size] == other ) return ( isinstance(other, self.__class__) - and other.incremental == self.incremental and other.has_next == self.has_next + and self.pending == other.pending + and other.incremental == self.incremental + and other.completed == self.completed and other.extensions == self.extensions ) @@ -340,140 +665,132 @@ class IncrementalPublisher: The internal publishing state is managed as follows: - ``_released``: the set of Incremental Data records that are ready to be sent to the + ``_released``: the set of Subsequent Result records that are ready to be sent to the client, i.e. their parents have completed and they have also completed. - ``_pending``: the set of Incremental Data records that are definitely pending, i.e. + ``_pending``: the set of Subsequent Result records that are definitely pending, i.e. their parents have completed so that they can no longer be filtered. This includes - all Incremental Data records in `released`, as well as Incremental Data records that - have not yet completed. - - ``_initial_result``: a record containing the state of the initial result, - as follows: - ``is_completed``: indicates whether the initial result has completed. - ``children``: the set of Incremental Data records that can be be published when the - initial result is completed. - - Each Incremental Data record also contains similar metadata, i.e. these records also - contain similar ``is_completed`` and ``children`` properties. + all Subsequent Result records in `released`, as well as the records that have not + yet completed. Note: Instead of sets we use dicts (with values set to None) which preserve order and thereby achieve more deterministic results. """ - _initial_result: InitialResult - _released: dict[IncrementalDataRecord, None] - _pending: dict[IncrementalDataRecord, None] + _next_id: int + _released: dict[SubsequentResultRecord, None] + _pending: dict[SubsequentResultRecord, None] _resolve: Event | None + _tasks: set[Awaitable] def __init__(self) -> None: - self._initial_result = InitialResult({}, False) + self._next_id = 0 self._released = {} self._pending = {} self._resolve = None # lazy initialization - self._tasks: set[Awaitable] = set() - - def has_next(self) -> bool: - """Check whether there is a next incremental result.""" - return bool(self._pending) - - async def subscribe( - self, - ) -> AsyncGenerator[SubsequentIncrementalExecutionResult, None]: - """Subscribe to the incremental results.""" - is_done = False - pending = self._pending - - try: - while not is_done: - released = self._released - for item in released: - with suppress_key_error: - del pending[item] - self._released = {} + self._tasks = set() - result = self._get_incremental_result(released) + @staticmethod + def report_new_defer_fragment_record( + deferred_fragment_record: DeferredFragmentRecord, + parent_incremental_result_record: InitialResultRecord + | DeferredFragmentRecord + | StreamItemsRecord, + ) -> None: + """Report a new deferred fragment record.""" + parent_incremental_result_record.children[deferred_fragment_record] = None - if not self.has_next(): - is_done = True + @staticmethod + def report_new_deferred_grouped_filed_set_record( + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + ) -> None: + """Report a new deferred grouped field set record.""" + for ( + deferred_fragment_record + ) in deferred_grouped_field_set_record.deferred_fragment_records: + deferred_fragment_record._pending[deferred_grouped_field_set_record] = None # noqa: SLF001 + deferred_fragment_record.deferred_grouped_field_set_records[ + deferred_grouped_field_set_record + ] = None + + @staticmethod + def report_new_stream_items_record( + stream_items_record: StreamItemsRecord, + parent_incremental_data_record: IncrementalDataRecord, + ) -> None: + """Report a new stream items record.""" + if isinstance(parent_incremental_data_record, DeferredGroupedFieldSetRecord): + for parent in parent_incremental_data_record.deferred_fragment_records: + parent.children[stream_items_record] = None + else: + parent_incremental_data_record.children[stream_items_record] = None - if result is not None: - yield result - else: - resolve = self._resolve - if resolve is None: - self._resolve = resolve = Event() - await resolve.wait() - finally: - close_async_iterators = [] - for incremental_data_record in pending: - if isinstance( - incremental_data_record, StreamItemsRecord - ): # pragma: no cover - async_iterator = incremental_data_record.async_iterator - if async_iterator: - try: - close_async_iterator = async_iterator.aclose() # type: ignore - except AttributeError: - pass - else: - close_async_iterators.append(close_async_iterator) - await gather(*close_async_iterators) - - def prepare_new_deferred_fragment_record( + def complete_deferred_grouped_field_set( self, - label: str | None, - path: Path | None, - parent_context: IncrementalDataRecord | None, - ) -> DeferredFragmentRecord: - """Prepare a new deferred fragment record.""" - deferred_fragment_record = DeferredFragmentRecord(label, path, parent_context) - - context = parent_context or self._initial_result - context.children[deferred_fragment_record] = None - return deferred_fragment_record - - def prepare_new_stream_items_record( + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + data: dict[str, Any], + ) -> None: + """Complete the given deferred grouped field set record with the given data.""" + deferred_grouped_field_set_record.data = data + for ( + deferred_fragment_record + ) in deferred_grouped_field_set_record.deferred_fragment_records: + pending = deferred_fragment_record._pending # noqa: SLF001 + del pending[deferred_grouped_field_set_record] + if not pending: + self.complete_deferred_fragment_record(deferred_fragment_record) + + def mark_errored_deferred_grouped_field_set( self, - label: str | None, - path: Path | None, - parent_context: IncrementalDataRecord | None, - async_iterator: AsyncIterator[Any] | None = None, - ) -> StreamItemsRecord: - """Prepare a new stream items record.""" - stream_items_record = StreamItemsRecord( - label, path, parent_context, async_iterator - ) - - context = parent_context or self._initial_result - context.children[stream_items_record] = None - return stream_items_record + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + error: GraphQLError, + ) -> None: + """Mark the given deferred grouped field set record as errored.""" + for ( + deferred_fragment_record + ) in deferred_grouped_field_set_record.deferred_fragment_records: + deferred_fragment_record.errors.append(error) + self.complete_deferred_fragment_record(deferred_fragment_record) def complete_deferred_fragment_record( - self, - deferred_fragment_record: DeferredFragmentRecord, - data: dict[str, Any] | None, + self, deferred_fragment_record: DeferredFragmentRecord ) -> None: """Complete the given deferred fragment record.""" - deferred_fragment_record.data = data - deferred_fragment_record.is_completed = True self._release(deferred_fragment_record) def complete_stream_items_record( self, stream_items_record: StreamItemsRecord, - items: list[str] | None, + items: list[Any], ) -> None: """Complete the given stream items record.""" stream_items_record.items = items stream_items_record.is_completed = True self._release(stream_items_record) + def mark_errored_stream_items_record( + self, stream_items_record: StreamItemsRecord, error: GraphQLError + ) -> None: + """Mark the given stream items record as errored.""" + stream_items_record.stream_record.errors.append(error) + self.set_is_final_record(stream_items_record) + stream_items_record.is_completed = True + early_return = stream_items_record.stream_record.early_return + if early_return: + self._add_task(early_return()) + self._release(stream_items_record) + + @staticmethod + def set_is_final_record(stream_items_record: StreamItemsRecord) -> None: + """Mark stream items record as final.""" + stream_items_record.is_final_record = True + def set_is_completed_async_iterator( self, stream_items_record: StreamItemsRecord ) -> None: """Mark async iterator for stream items as completed.""" stream_items_record.is_completed_async_iterator = True + self.set_is_final_record(stream_items_record) def add_field_error( self, incremental_data_record: IncrementalDataRecord, error: GraphQLError @@ -481,39 +798,149 @@ def add_field_error( """Add a field error to the given incremental data record.""" incremental_data_record.errors.append(error) - def publish_initial(self) -> None: - """Publish the initial result.""" - for child in self._initial_result.children: + def build_data_response( + self, initial_result_record: InitialResultRecord, data: dict[str, Any] | None + ) -> ExecutionResult | ExperimentalIncrementalExecutionResults: + """Build response for the given data.""" + for child in initial_result_record.children: + if child.filtered: + continue self._publish(child) + errors = initial_result_record.errors or None + if errors: + errors.sort( + key=lambda error: ( + error.locations or [], + error.path or [], + error.message, + ) + ) + pending = self._pending + if pending: + pending_sources: RefSet[DeferredFragmentRecord | StreamRecord] = RefSet( + subsequent_result_record.stream_record + if isinstance(subsequent_result_record, StreamItemsRecord) + else subsequent_result_record + for subsequent_result_record in pending + ) + return ExperimentalIncrementalExecutionResults( + initial_result=InitialIncrementalExecutionResult( + data, + errors, + pending=self._pending_sources_to_results(pending_sources), + has_next=True, + ), + subsequent_results=self._subscribe(), + ) + return ExecutionResult(data, errors) + + def build_error_response( + self, initial_result_record: InitialResultRecord, error: GraphQLError + ) -> ExecutionResult: + """Build response for the given error.""" + errors = initial_result_record.errors + errors.append(error) + # Sort the error list in order to make it deterministic, since we might have + # been using parallel execution. + errors.sort( + key=lambda error: (error.locations or [], error.path or [], error.message) + ) + return ExecutionResult(None, errors) + def filter( self, - null_path: Path, - erroring_incremental_data_record: IncrementalDataRecord | None, + null_path: Path | None, + erroring_incremental_data_record: IncrementalDataRecord, ) -> None: """Filter out the given erroring incremental data record.""" - null_path_list = null_path.as_list() + null_path_list = null_path.as_list() if null_path else [] - children = (erroring_incremental_data_record or self._initial_result).children + streams: list[StreamRecord] = [] - for child in self._get_descendants(children): - if not self._matches_path(child.path, null_path_list): + children = self._get_children(erroring_incremental_data_record) + descendants = self._get_descendants(children) + + for child in descendants: + if not self._nulls_child_subsequent_result_record(child, null_path_list): continue - self._delete(child) - parent = child.parent_context or self._initial_result - with suppress_key_error: - del parent.children[child] + child.filtered = True if isinstance(child, StreamItemsRecord): - async_iterator = child.async_iterator - if async_iterator: - try: - close_async_iterator = async_iterator.aclose() # type:ignore - except AttributeError: # pragma: no cover - pass - else: - self._add_task(close_async_iterator) + streams.append(child.stream_record) + + early_returns = [] + for stream in streams: + early_return = stream.early_return + if early_return: + early_returns.append(early_return()) + if early_returns: + self._add_task(gather(*early_returns)) + + def _pending_sources_to_results( + self, + pending_sources: RefSet[DeferredFragmentRecord | StreamRecord], + ) -> list[PendingResult]: + """Convert pending sources to pending results.""" + pending_results: list[PendingResult] = [] + for pending_source in pending_sources: + pending_source.pending_sent = True + id_ = self._get_next_id() + pending_source.id = id_ + pending_results.append( + PendingResult(id_, pending_source.path, pending_source.label) + ) + return pending_results + + def _get_next_id(self) -> str: + """Get the next ID for pending results.""" + id_ = self._next_id + self._next_id += 1 + return str(id_) + + async def _subscribe( + self, + ) -> AsyncGenerator[SubsequentIncrementalExecutionResult, None]: + """Subscribe to the incremental results.""" + is_done = False + pending = self._pending + + await sleep(0) # execute pending tasks + + try: + while not is_done: + released = self._released + for item in released: + with suppress_key_error: + del pending[item] + self._released = {} + + result = self._get_incremental_result(released) + + if not self._pending: + is_done = True + + if result is not None: + yield result + else: + resolve = self._resolve + if resolve is None: + self._resolve = resolve = Event() + await resolve.wait() + finally: + streams: list[StreamRecord] = [] + descendants = self._get_descendants(pending) + for subsequent_result_record in descendants: # pragma: no cover + if isinstance(subsequent_result_record, StreamItemsRecord): + streams.append(subsequent_result_record.stream_record) + early_returns = [] + for stream in streams: # pragma: no cover + early_return = stream.early_return + if early_return: + early_returns.append(early_return()) + if early_returns: # pragma: no cover + await gather(*early_returns) def _trigger(self) -> None: """Trigger the resolve event.""" @@ -522,87 +949,180 @@ def _trigger(self) -> None: resolve.set() self._resolve = Event() - def _introduce(self, item: IncrementalDataRecord) -> None: + def _introduce(self, item: SubsequentResultRecord) -> None: """Introduce a new IncrementalDataRecord.""" self._pending[item] = None - def _release(self, item: IncrementalDataRecord) -> None: + def _release(self, item: SubsequentResultRecord) -> None: """Release the given IncrementalDataRecord.""" if item in self._pending: self._released[item] = None self._trigger() - def _push(self, item: IncrementalDataRecord) -> None: + def _push(self, item: SubsequentResultRecord) -> None: """Push the given IncrementalDataRecord.""" self._released[item] = None self._pending[item] = None self._trigger() - def _delete(self, item: IncrementalDataRecord) -> None: - """Delete the given IncrementalDataRecord.""" - with suppress_key_error: - del self._released[item] - with suppress_key_error: - del self._pending[item] - self._trigger() - def _get_incremental_result( - self, completed_records: Collection[IncrementalDataRecord] + self, completed_records: Collection[SubsequentResultRecord] ) -> SubsequentIncrementalExecutionResult | None: """Get the incremental result with the completed records.""" + update = self._process_pending(completed_records) + pending, incremental, completed = ( + update.pending, + update.incremental, + update.completed, + ) + + has_next = bool(self._pending) + if not incremental and not completed and has_next: + return None + + return SubsequentIncrementalExecutionResult( + has_next, pending or None, incremental or None, completed or None + ) + + def _process_pending( + self, + completed_records: Collection[SubsequentResultRecord], + ) -> IncrementalUpdate: + """Process the pending records.""" + new_pending_sources: RefSet[DeferredFragmentRecord | StreamRecord] = RefSet() incremental_results: list[IncrementalResult] = [] - encountered_completed_async_iterator = False - append_result = incremental_results.append - for incremental_data_record in completed_records: - incremental_result: IncrementalResult - for child in incremental_data_record.children: + completed_results: list[CompletedResult] = [] + to_result = self._completed_record_to_result + for subsequent_result_record in completed_records: + for child in subsequent_result_record.children: + if child.filtered: + continue + pending_source: DeferredFragmentRecord | StreamRecord = ( + child.stream_record + if isinstance(child, StreamItemsRecord) + else child + ) + if not pending_source.pending_sent: + new_pending_sources.add(pending_source) self._publish(child) - if isinstance(incremental_data_record, StreamItemsRecord): - items = incremental_data_record.items - if incremental_data_record.is_completed_async_iterator: + incremental_result: IncrementalResult + if isinstance(subsequent_result_record, StreamItemsRecord): + if subsequent_result_record.is_final_record: + stream_record = subsequent_result_record.stream_record + new_pending_sources.discard(stream_record) + completed_results.append(to_result(stream_record)) + if subsequent_result_record.is_completed_async_iterator: # async iterable resolver finished but there may be pending payload - encountered_completed_async_iterator = True - continue # pragma: no cover + continue + if subsequent_result_record.stream_record.errors: + continue incremental_result = IncrementalStreamResult( - items, - incremental_data_record.errors - if incremental_data_record.errors - else None, - incremental_data_record.path, - incremental_data_record.label, + # safe because `items` is always defined + # when the record is completed + subsequent_result_record.items, + # safe because `id` is defined + # once the stream has been released as pending + subsequent_result_record.stream_record.id, # type: ignore ) + if subsequent_result_record.errors: + incremental_result.errors = subsequent_result_record.errors + incremental_results.append(incremental_result) else: - data = incremental_data_record.data - incremental_result = IncrementalDeferResult( - data, - incremental_data_record.errors - if incremental_data_record.errors - else None, - incremental_data_record.path, - incremental_data_record.label, - ) - append_result(incremental_result) + new_pending_sources.discard(subsequent_result_record) + completed_results.append(to_result(subsequent_result_record)) + if subsequent_result_record.errors: + continue + for ( + deferred_grouped_field_set_record + ) in subsequent_result_record.deferred_grouped_field_set_records: + if not deferred_grouped_field_set_record.sent: + deferred_grouped_field_set_record.sent = True + incremental_result = self._get_incremental_defer_result( + deferred_grouped_field_set_record + ) + if deferred_grouped_field_set_record.errors: + incremental_result.errors = ( + deferred_grouped_field_set_record.errors + ) + incremental_results.append(incremental_result) + return IncrementalUpdate( + self._pending_sources_to_results(new_pending_sources), + incremental_results, + completed_results, + ) - if incremental_results: - return SubsequentIncrementalExecutionResult( - incremental=incremental_results, has_next=self.has_next() - ) - if encountered_completed_async_iterator and not self.has_next(): - return SubsequentIncrementalExecutionResult(has_next=False) - return None + def _get_incremental_defer_result( + self, deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord + ) -> IncrementalDeferResult: + """Get the incremental defer result from the grouped field set record.""" + data = deferred_grouped_field_set_record.data + fragment_records = deferred_grouped_field_set_record.deferred_fragment_records + max_length = len(fragment_records[0].path) + max_index = 0 + for i in range(1, len(fragment_records)): + fragment_record = fragment_records[i] + length = len(fragment_record.path) + if length > max_length: + max_length = length + max_index = i + record_with_longest_path = fragment_records[max_index] + longest_path = record_with_longest_path.path + sub_path = deferred_grouped_field_set_record.path[len(longest_path) :] + id_ = record_with_longest_path.id + return IncrementalDeferResult( + # safe because `data` is always defined when the record is completed + data, # type: ignore + # safe because `id` is defined + # once the fragment has been released as pending + id_, # type: ignore + sub_path or None, + ) + + @staticmethod + def _completed_record_to_result( + completed_record: DeferredFragmentRecord | StreamRecord, + ) -> CompletedResult: + """Convert the completed record to a result.""" + return CompletedResult( + # safe because `id` is defined once the stream has been released as pending + completed_record.id, # type: ignore + completed_record.errors or None, + ) - def _publish(self, incremental_data_record: IncrementalDataRecord) -> None: + def _publish(self, subsequent_result_record: SubsequentResultRecord) -> None: """Publish the given incremental data record.""" - if incremental_data_record.is_completed: - self._push(incremental_data_record) + if isinstance(subsequent_result_record, StreamItemsRecord): + if subsequent_result_record.is_completed: + self._push(subsequent_result_record) + else: + self._introduce(subsequent_result_record) + elif subsequent_result_record._pending: # noqa: SLF001 + self._introduce(subsequent_result_record) else: - self._introduce(incremental_data_record) + self._push(subsequent_result_record) + + @staticmethod + def _get_children( + erroring_incremental_data_record: IncrementalDataRecord, + ) -> dict[SubsequentResultRecord, None]: + """Get the children of the given erroring incremental data record.""" + children: dict[SubsequentResultRecord, None] = {} + if isinstance(erroring_incremental_data_record, DeferredGroupedFieldSetRecord): + for ( + erroring_incremental_result_record + ) in erroring_incremental_data_record.deferred_fragment_records: + for child in erroring_incremental_result_record.children: + children[child] = None + else: + for child in erroring_incremental_data_record.children: + children[child] = None + return children def _get_descendants( self, - children: dict[IncrementalDataRecord, None], - descendants: dict[IncrementalDataRecord, None] | None = None, - ) -> dict[IncrementalDataRecord, None]: + children: dict[SubsequentResultRecord, None], + descendants: dict[SubsequentResultRecord, None] | None = None, + ) -> dict[SubsequentResultRecord, None]: """Get the descendants of the given children.""" if descendants is None: descendants = {} @@ -611,6 +1131,24 @@ def _get_descendants( self._get_descendants(child.children, descendants) return descendants + def _nulls_child_subsequent_result_record( + self, + subsequent_result_record: SubsequentResultRecord, + null_path: list[str | int], + ) -> bool: + """Check whether the given subsequent result record is nulled.""" + incremental_data_records: ( + list[SubsequentResultRecord] | dict[DeferredGroupedFieldSetRecord, None] + ) = ( + [subsequent_result_record] + if isinstance(subsequent_result_record, StreamItemsRecord) + else subsequent_result_record.deferred_grouped_field_set_records + ) + return any( + self._matches_path(incremental_data_record.path, null_path) + for incremental_data_record in incremental_data_records + ) + def _matches_path( self, test_path: list[str | int], base_path: list[str | int] ) -> bool: @@ -625,82 +1163,155 @@ def _add_task(self, awaitable: Awaitable[Any]) -> None: task.add_done_callback(tasks.discard) -class DeferredFragmentRecord: - """A record collecting data marked with the defer directive""" +class InitialResultRecord: + """Initial result record""" errors: list[GraphQLError] - label: str | None + children: dict[SubsequentResultRecord, None] + + def __init__(self) -> None: + self.errors = [] + self.children = {} + + +class DeferredGroupedFieldSetRecord: + """Deferred grouped field set record""" + path: list[str | int] + deferred_fragment_records: list[DeferredFragmentRecord] + grouped_field_set: GroupedFieldSet + should_initiate_defer: bool + errors: list[GraphQLError] data: dict[str, Any] | None - parent_context: IncrementalDataRecord | None - children: dict[IncrementalDataRecord, None] - is_completed: bool + sent: bool def __init__( self, - label: str | None, - path: Path | None, - parent_context: IncrementalDataRecord | None, + deferred_fragment_records: list[DeferredFragmentRecord], + grouped_field_set: GroupedFieldSet, + should_initiate_defer: bool, + path: Path | None = None, ) -> None: - self.label = label self.path = path.as_list() if path else [] - self.parent_context = parent_context + self.deferred_fragment_records = deferred_fragment_records + self.grouped_field_set = grouped_field_set + self.should_initiate_defer = should_initiate_defer self.errors = [] + self.sent = False + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [ + f"deferred_fragment_records={self.deferred_fragment_records!r}", + f"grouped_field_set={self.grouped_field_set!r}", + ] + if self.path: + args.append(f"path={self.path!r}") + return f"{name}({', '.join(args)})" + + +class DeferredFragmentRecord: + """Deferred fragment record""" + + path: list[str | int] + label: str | None + id: str | None + children: dict[SubsequentResultRecord, None] + deferred_grouped_field_set_records: dict[DeferredGroupedFieldSetRecord, None] + errors: list[GraphQLError] + filtered: bool + pending_sent: bool + _pending: dict[DeferredGroupedFieldSetRecord, None] + + def __init__(self, path: Path | None = None, label: str | None = None) -> None: + self.path = path.as_list() if path else [] + self.label = label + self.id = None self.children = {} - self.is_completed = False - self.data = None + self.filtered = False + self.pending_sent = False + self.deferred_grouped_field_set_records = {} + self.errors = [] + self._pending = {} + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [] + if self.path: + args.append(f"path={self.path!r}") + if self.label: + args.append(f"label={self.label!r}") + return f"{name}({', '.join(args)})" + + +class StreamRecord: + """Stream record""" + + label: str | None + path: list[str | int] + id: str | None + errors: list[GraphQLError] + early_return: Callable[[], Awaitable[Any]] | None + pending_sent: bool + + def __init__( + self, + path: Path, + label: str | None = None, + early_return: Callable[[], Awaitable[Any]] | None = None, + ) -> None: + self.path = path.as_list() + self.label = label + self.id = None + self.errors = [] + self.early_return = early_return + self.pending_sent = False def __repr__(self) -> str: name = self.__class__.__name__ - args: list[str] = [f"path={self.path!r}"] + args: list[str] = [] + if self.path: + args.append(f"path={self.path!r}") if self.label: args.append(f"label={self.label!r}") - if self.parent_context: - args.append("parent_context") - if self.data is not None: - args.append("data") return f"{name}({', '.join(args)})" class StreamItemsRecord: - """A record collecting items marked with the stream directive""" + """Stream items record""" errors: list[GraphQLError] - label: str | None + stream_record: StreamRecord path: list[str | int] - items: list[str] | None - parent_context: IncrementalDataRecord | None - children: dict[IncrementalDataRecord, None] - async_iterator: AsyncIterator[Any] | None + items: list[str] + children: dict[SubsequentResultRecord, None] + is_final_record: bool is_completed_async_iterator: bool is_completed: bool + filtered: bool def __init__( self, - label: str | None, - path: Path | None, - parent_context: IncrementalDataRecord | None, - async_iterator: AsyncIterator[Any] | None = None, + stream_record: StreamRecord, + path: Path | None = None, ) -> None: - self.label = label + self.stream_record = stream_record self.path = path.as_list() if path else [] - self.parent_context = parent_context - self.async_iterator = async_iterator - self.errors = [] self.children = {} + self.errors = [] self.is_completed_async_iterator = self.is_completed = False - self.items = None + self.is_final_record = self.filtered = False def __repr__(self) -> str: name = self.__class__.__name__ - args: list[str] = [f"path={self.path!r}"] - if self.label: - args.append(f"label={self.label!r}") - if self.parent_context: - args.append("parent_context") - if self.items is not None: - args.append("items") + args: list[str] = [f"stream_record={self.stream_record!r}"] + if self.path: + args.append(f"path={self.path!r}") return f"{name}({', '.join(args)})" -IncrementalDataRecord = Union[DeferredFragmentRecord, StreamItemsRecord] +IncrementalDataRecord = Union[ + InitialResultRecord, DeferredGroupedFieldSetRecord, StreamItemsRecord +] + +SubsequentResultRecord = Union[DeferredFragmentRecord, StreamItemsRecord] diff --git a/src/graphql/execution/middleware.py b/src/graphql/execution/middleware.py index de99e12b..6d999171 100644 --- a/src/graphql/execution/middleware.py +++ b/src/graphql/execution/middleware.py @@ -30,7 +30,7 @@ class MiddlewareManager: """ # allow custom attributes (not used internally) - __slots__ = "__dict__", "middlewares", "_middleware_resolvers", "_cached_resolvers" + __slots__ = "__dict__", "_cached_resolvers", "_middleware_resolvers", "middlewares" _cached_resolvers: dict[GraphQLFieldResolver, GraphQLFieldResolver] _middleware_resolvers: list[Callable] | None diff --git a/src/graphql/execution/values.py b/src/graphql/execution/values.py index 4810a8bd..5309996a 100644 --- a/src/graphql/execution/values.py +++ b/src/graphql/execution/values.py @@ -26,6 +26,7 @@ GraphQLDirective, GraphQLField, GraphQLSchema, + is_input_object_type, is_input_type, is_non_null_type, ) @@ -128,16 +129,20 @@ def coerce_variable_values( continue def on_input_value_error( - path: list[str | int], invalid_value: Any, error: GraphQLError + path: list[str | int], + invalid_value: Any, + error: GraphQLError, + var_name: str = var_name, + var_def_node: VariableDefinitionNode = var_def_node, ) -> None: invalid_str = inspect(invalid_value) - prefix = f"Variable '${var_name}' got invalid value {invalid_str}" # noqa: B023 + prefix = f"Variable '${var_name}' got invalid value {invalid_str}" if path: - prefix += f" at '{var_name}{print_path_list(path)}'" # noqa: B023 + prefix += f" at '{var_name}{print_path_list(path)}'" on_error( GraphQLError( prefix + "; " + error.message, - var_def_node, # noqa: B023 + var_def_node, original_error=error, ) ) @@ -167,12 +172,15 @@ def get_argument_values( argument_node = arg_node_map.get(name) if argument_node is None: - if arg_def.default_value is not Undefined: - coerced_values[arg_def.out_name or name] = arg_def.default_value + value = arg_def.default_value + if value is not Undefined: + if is_input_object_type(arg_def.type): + # coerce input value so that out_names are used + value = coerce_input_value(value, arg_def.type) + coerced_values[arg_def.out_name or name] = value elif is_non_null_type(arg_type): # pragma: no cover else msg = ( - f"Argument '{name}' of required type '{arg_type}'" - " was not provided." + f"Argument '{name}' of required type '{arg_type}' was not provided." ) raise GraphQLError(msg, node) continue # pragma: no cover @@ -183,8 +191,12 @@ def get_argument_values( if isinstance(value_node, VariableNode): variable_name = value_node.name.value if variable_values is None or variable_name not in variable_values: - if arg_def.default_value is not Undefined: - coerced_values[arg_def.out_name or name] = arg_def.default_value + value = arg_def.default_value + if value is not Undefined: + if is_input_object_type(arg_def.type): + # coerce input value so that out_names are used + value = coerce_input_value(value, arg_def.type) + coerced_values[arg_def.out_name or name] = value elif is_non_null_type(arg_type): # pragma: no cover else msg = ( f"Argument '{name}' of required type '{arg_type}'" @@ -193,7 +205,8 @@ def get_argument_values( ) raise GraphQLError(msg, value_node) continue # pragma: no cover - is_null = variable_values[variable_name] is None + variable_value = variable_values[variable_name] + is_null = variable_value is None or variable_value is Undefined if is_null and is_non_null_type(arg_type): msg = f"Argument '{name}' of non-null type '{arg_type}' must not be null." diff --git a/src/graphql/graphql.py b/src/graphql/graphql.py index aacc7326..fe1dd5c7 100644 --- a/src/graphql/graphql.py +++ b/src/graphql/graphql.py @@ -96,9 +96,9 @@ async def graphql( ) if default_is_awaitable(result): - return await cast(Awaitable[ExecutionResult], result) + return await cast("Awaitable[ExecutionResult]", result) - return cast(ExecutionResult, result) + return cast("ExecutionResult", result) def assume_not_awaitable(_value: Any) -> bool: @@ -149,11 +149,11 @@ def graphql_sync( # Assert that the execution was synchronous. if default_is_awaitable(result): - ensure_future(cast(Awaitable[ExecutionResult], result)).cancel() + ensure_future(cast("Awaitable[ExecutionResult]", result)).cancel() msg = "GraphQL execution failed to complete synchronously." raise RuntimeError(msg) - return cast(ExecutionResult, result) + return cast("ExecutionResult", result) def graphql_impl( diff --git a/src/graphql/language/__init__.py b/src/graphql/language/__init__.py index 2f105a98..bd5e7be1 100644 --- a/src/graphql/language/__init__.py +++ b/src/graphql/language/__init__.py @@ -115,104 +115,104 @@ from .directive_locations import DirectiveLocation __all__ = [ - "get_location", - "SourceLocation", - "FormattedSourceLocation", - "print_location", - "print_source_location", - "TokenKind", - "Lexer", - "parse", - "parse_value", - "parse_const_value", - "parse_type", - "print_ast", - "Source", - "visit", - "Visitor", - "ParallelVisitor", - "VisitorAction", - "VisitorKeyMap", "BREAK", - "SKIP", - "REMOVE", "IDLE", - "Location", - "Token", + "REMOVE", + "SKIP", + "ArgumentNode", + "BooleanValueNode", + "ConstArgumentNode", + "ConstDirectiveNode", + "ConstListValueNode", + "ConstObjectFieldNode", + "ConstObjectValueNode", + "ConstValueNode", + "DefinitionNode", + "DirectiveDefinitionNode", "DirectiveLocation", - "Node", - "NameNode", + "DirectiveNode", "DocumentNode", - "DefinitionNode", + "EnumTypeDefinitionNode", + "EnumTypeExtensionNode", + "EnumValueDefinitionNode", + "EnumValueNode", + "ErrorBoundaryNode", "ExecutableDefinitionNode", - "OperationDefinitionNode", - "OperationType", - "VariableDefinitionNode", - "VariableNode", - "SelectionSetNode", - "SelectionNode", + "FieldDefinitionNode", "FieldNode", - "NullabilityAssertionNode", - "NonNullAssertionNode", - "ErrorBoundaryNode", - "ListNullabilityOperatorNode", - "ArgumentNode", - "ConstArgumentNode", + "FloatValueNode", + "FormattedSourceLocation", + "FragmentDefinitionNode", "FragmentSpreadNode", "InlineFragmentNode", - "FragmentDefinitionNode", - "ValueNode", - "ConstValueNode", + "InputObjectTypeDefinitionNode", + "InputObjectTypeExtensionNode", + "InputValueDefinitionNode", "IntValueNode", - "FloatValueNode", - "StringValueNode", - "BooleanValueNode", - "NullValueNode", - "EnumValueNode", + "InterfaceTypeDefinitionNode", + "InterfaceTypeExtensionNode", + "Lexer", + "ListNullabilityOperatorNode", + "ListTypeNode", "ListValueNode", - "ConstListValueNode", - "ObjectValueNode", - "ConstObjectValueNode", - "ObjectFieldNode", - "ConstObjectFieldNode", - "DirectiveNode", - "ConstDirectiveNode", - "TypeNode", + "Location", + "NameNode", "NamedTypeNode", - "ListTypeNode", + "Node", + "NonNullAssertionNode", "NonNullTypeNode", - "TypeSystemDefinitionNode", - "SchemaDefinitionNode", + "NullValueNode", + "NullabilityAssertionNode", + "ObjectFieldNode", + "ObjectTypeDefinitionNode", + "ObjectTypeExtensionNode", + "ObjectValueNode", + "OperationDefinitionNode", + "OperationType", "OperationTypeDefinitionNode", - "TypeDefinitionNode", + "ParallelVisitor", "ScalarTypeDefinitionNode", - "ObjectTypeDefinitionNode", - "FieldDefinitionNode", - "InputValueDefinitionNode", - "InterfaceTypeDefinitionNode", - "UnionTypeDefinitionNode", - "EnumTypeDefinitionNode", - "EnumValueDefinitionNode", - "InputObjectTypeDefinitionNode", - "DirectiveDefinitionNode", - "TypeSystemExtensionNode", + "ScalarTypeExtensionNode", + "SchemaDefinitionNode", "SchemaExtensionNode", + "SelectionNode", + "SelectionSetNode", + "Source", + "SourceLocation", + "StringValueNode", + "Token", + "TokenKind", + "TypeDefinitionNode", "TypeExtensionNode", - "ScalarTypeExtensionNode", - "ObjectTypeExtensionNode", - "InterfaceTypeExtensionNode", + "TypeNode", + "TypeSystemDefinitionNode", + "TypeSystemExtensionNode", + "UnionTypeDefinitionNode", "UnionTypeExtensionNode", - "EnumTypeExtensionNode", - "InputObjectTypeExtensionNode", + "ValueNode", + "VariableDefinitionNode", + "VariableNode", + "Visitor", + "VisitorAction", + "VisitorKeyMap", + "get_location", + "is_const_value_node", "is_definition_node", "is_executable_definition_node", "is_nullability_assertion_node", "is_selection_node", - "is_value_node", - "is_const_value_node", + "is_type_definition_node", + "is_type_extension_node", "is_type_node", "is_type_system_definition_node", - "is_type_definition_node", "is_type_system_extension_node", - "is_type_extension_node", + "is_value_node", + "parse", + "parse_const_value", + "parse_type", + "parse_value", + "print_ast", + "print_location", + "print_source_location", + "visit", ] diff --git a/src/graphql/language/ast.py b/src/graphql/language/ast.py index 5b61767d..a67ee1ea 100644 --- a/src/graphql/language/ast.py +++ b/src/graphql/language/ast.py @@ -19,73 +19,73 @@ __all__ = [ - "Location", - "Token", - "Node", - "NameNode", - "DocumentNode", + "QUERY_DOCUMENT_KEYS", + "ArgumentNode", + "BooleanValueNode", + "ConstArgumentNode", + "ConstDirectiveNode", + "ConstListValueNode", + "ConstObjectFieldNode", + "ConstObjectValueNode", + "ConstValueNode", "DefinitionNode", + "DirectiveDefinitionNode", + "DirectiveNode", + "DocumentNode", + "EnumTypeDefinitionNode", + "EnumTypeExtensionNode", + "EnumValueDefinitionNode", + "EnumValueNode", + "ErrorBoundaryNode", "ExecutableDefinitionNode", - "OperationDefinitionNode", - "VariableDefinitionNode", - "SelectionSetNode", - "SelectionNode", + "FieldDefinitionNode", "FieldNode", - "NullabilityAssertionNode", - "NonNullAssertionNode", - "ErrorBoundaryNode", - "ListNullabilityOperatorNode", - "ArgumentNode", - "ConstArgumentNode", + "FloatValueNode", + "FragmentDefinitionNode", "FragmentSpreadNode", "InlineFragmentNode", - "FragmentDefinitionNode", - "ValueNode", - "ConstValueNode", - "VariableNode", + "InputObjectTypeDefinitionNode", + "InputObjectTypeExtensionNode", + "InputValueDefinitionNode", "IntValueNode", - "FloatValueNode", - "StringValueNode", - "BooleanValueNode", - "NullValueNode", - "EnumValueNode", + "InterfaceTypeDefinitionNode", + "InterfaceTypeExtensionNode", + "ListNullabilityOperatorNode", + "ListTypeNode", "ListValueNode", - "ConstListValueNode", - "ObjectValueNode", - "ConstObjectValueNode", - "ObjectFieldNode", - "ConstObjectFieldNode", - "DirectiveNode", - "ConstDirectiveNode", - "TypeNode", + "Location", + "NameNode", "NamedTypeNode", - "ListTypeNode", + "Node", + "NonNullAssertionNode", "NonNullTypeNode", - "TypeSystemDefinitionNode", - "SchemaDefinitionNode", + "NullValueNode", + "NullabilityAssertionNode", + "ObjectFieldNode", + "ObjectTypeDefinitionNode", + "ObjectTypeExtensionNode", + "ObjectValueNode", + "OperationDefinitionNode", "OperationType", "OperationTypeDefinitionNode", - "TypeDefinitionNode", "ScalarTypeDefinitionNode", - "ObjectTypeDefinitionNode", - "FieldDefinitionNode", - "InputValueDefinitionNode", - "InterfaceTypeDefinitionNode", - "UnionTypeDefinitionNode", - "EnumTypeDefinitionNode", - "EnumValueDefinitionNode", - "InputObjectTypeDefinitionNode", - "DirectiveDefinitionNode", + "ScalarTypeExtensionNode", + "SchemaDefinitionNode", "SchemaExtensionNode", + "SelectionNode", + "SelectionSetNode", + "StringValueNode", + "Token", + "TypeDefinitionNode", "TypeExtensionNode", + "TypeNode", + "TypeSystemDefinitionNode", "TypeSystemExtensionNode", - "ScalarTypeExtensionNode", - "ObjectTypeExtensionNode", - "InterfaceTypeExtensionNode", + "UnionTypeDefinitionNode", "UnionTypeExtensionNode", - "EnumTypeExtensionNode", - "InputObjectTypeExtensionNode", - "QUERY_DOCUMENT_KEYS", + "ValueNode", + "VariableDefinitionNode", + "VariableNode", ] @@ -95,7 +95,7 @@ class Token: Represents a range of characters represented by a lexical token within a Source. """ - __slots__ = "kind", "start", "end", "line", "column", "prev", "next", "value" + __slots__ = "column", "end", "kind", "line", "next", "prev", "start", "value" kind: TokenKind # the kind of token start: int # the character offset at which this Node begins @@ -202,11 +202,11 @@ class Location: """ __slots__ = ( - "start", "end", - "start_token", "end_token", "source", + "start", + "start_token", ) start: int # character offset at which this Node begins @@ -345,7 +345,7 @@ class Node: """AST nodes""" # allow custom attributes and weak references (not used internally) - __slots__ = "__dict__", "__weakref__", "loc", "_hash" + __slots__ = "__dict__", "__weakref__", "_hash", "loc" loc: Location | None @@ -457,7 +457,7 @@ class DefinitionNode(Node): class ExecutableDefinitionNode(DefinitionNode): - __slots__ = "name", "directives", "variable_definitions", "selection_set" + __slots__ = "directives", "name", "selection_set", "variable_definitions" name: NameNode | None directives: tuple[DirectiveNode, ...] @@ -472,7 +472,7 @@ class OperationDefinitionNode(ExecutableDefinitionNode): class VariableDefinitionNode(Node): - __slots__ = "variable", "type", "default_value", "directives" + __slots__ = "default_value", "directives", "type", "variable" variable: VariableNode type: TypeNode @@ -493,7 +493,7 @@ class SelectionNode(Node): class FieldNode(SelectionNode): - __slots__ = "alias", "name", "arguments", "nullability_assertion", "selection_set" + __slots__ = "alias", "arguments", "name", "nullability_assertion", "selection_set" alias: NameNode | None name: NameNode @@ -542,7 +542,7 @@ class FragmentSpreadNode(SelectionNode): class InlineFragmentNode(SelectionNode): - __slots__ = "type_condition", "selection_set" + __slots__ = "selection_set", "type_condition" type_condition: NamedTypeNode selection_set: SelectionSetNode @@ -581,7 +581,7 @@ class FloatValueNode(ValueNode): class StringValueNode(ValueNode): - __slots__ = "value", "block" + __slots__ = "block", "value" value: str block: bool | None @@ -650,7 +650,7 @@ class ConstObjectFieldNode(ObjectFieldNode): class DirectiveNode(Node): - __slots__ = "name", "arguments" + __slots__ = "arguments", "name" name: NameNode arguments: tuple[ArgumentNode, ...] @@ -711,7 +711,7 @@ class OperationTypeDefinitionNode(Node): class TypeDefinitionNode(TypeSystemDefinitionNode): - __slots__ = "description", "name", "directives" + __slots__ = "description", "directives", "name" description: StringValueNode | None name: NameNode @@ -725,7 +725,7 @@ class ScalarTypeDefinitionNode(TypeDefinitionNode): class ObjectTypeDefinitionNode(TypeDefinitionNode): - __slots__ = "interfaces", "fields" + __slots__ = "fields", "interfaces" interfaces: tuple[NamedTypeNode, ...] directives: tuple[ConstDirectiveNode, ...] @@ -733,7 +733,7 @@ class ObjectTypeDefinitionNode(TypeDefinitionNode): class FieldDefinitionNode(DefinitionNode): - __slots__ = "description", "name", "directives", "arguments", "type" + __slots__ = "arguments", "description", "directives", "name", "type" description: StringValueNode | None name: NameNode @@ -743,7 +743,7 @@ class FieldDefinitionNode(DefinitionNode): class InputValueDefinitionNode(DefinitionNode): - __slots__ = "description", "name", "directives", "type", "default_value" + __slots__ = "default_value", "description", "directives", "name", "type" description: StringValueNode | None name: NameNode @@ -775,7 +775,7 @@ class EnumTypeDefinitionNode(TypeDefinitionNode): class EnumValueDefinitionNode(DefinitionNode): - __slots__ = "description", "name", "directives" + __slots__ = "description", "directives", "name" description: StringValueNode | None name: NameNode @@ -793,7 +793,7 @@ class InputObjectTypeDefinitionNode(TypeDefinitionNode): class DirectiveDefinitionNode(TypeSystemDefinitionNode): - __slots__ = "description", "name", "arguments", "repeatable", "locations" + __slots__ = "arguments", "description", "locations", "name", "repeatable" description: StringValueNode | None name: NameNode @@ -816,7 +816,7 @@ class SchemaExtensionNode(Node): class TypeExtensionNode(TypeSystemDefinitionNode): - __slots__ = "name", "directives" + __slots__ = "directives", "name" name: NameNode directives: tuple[ConstDirectiveNode, ...] @@ -830,14 +830,14 @@ class ScalarTypeExtensionNode(TypeExtensionNode): class ObjectTypeExtensionNode(TypeExtensionNode): - __slots__ = "interfaces", "fields" + __slots__ = "fields", "interfaces" interfaces: tuple[NamedTypeNode, ...] fields: tuple[FieldDefinitionNode, ...] class InterfaceTypeExtensionNode(TypeExtensionNode): - __slots__ = "interfaces", "fields" + __slots__ = "fields", "interfaces" interfaces: tuple[NamedTypeNode, ...] fields: tuple[FieldDefinitionNode, ...] diff --git a/src/graphql/language/block_string.py b/src/graphql/language/block_string.py index d784c236..248927b4 100644 --- a/src/graphql/language/block_string.py +++ b/src/graphql/language/block_string.py @@ -149,8 +149,7 @@ def print_block_string(value: str, minimize: bool = False) -> str: skip_leading_new_line = is_single_line and value and value[0] in " \t" before = ( "\n" - if print_as_multiple_lines - and not skip_leading_new_line + if (print_as_multiple_lines and not skip_leading_new_line) or force_leading_new_line else "" ) diff --git a/src/graphql/language/character_classes.py b/src/graphql/language/character_classes.py index 628bd60f..5d870576 100644 --- a/src/graphql/language/character_classes.py +++ b/src/graphql/language/character_classes.py @@ -1,6 +1,6 @@ """Character classes""" -__all__ = ["is_digit", "is_letter", "is_name_start", "is_name_continue"] +__all__ = ["is_digit", "is_letter", "is_name_continue", "is_name_start"] def is_digit(char: str) -> bool: diff --git a/src/graphql/language/lexer.py b/src/graphql/language/lexer.py index f93bd3b7..9ec37427 100644 --- a/src/graphql/language/lexer.py +++ b/src/graphql/language/lexer.py @@ -342,7 +342,7 @@ def read_escaped_unicode_variable_width(self, position: int) -> EscapeSequence: raise GraphQLSyntaxError( self.source, position, - f"Invalid Unicode escape sequence: '{body[position: position + size]}'.", + f"Invalid Unicode escape sequence: '{body[position : position + size]}'.", ) def read_escaped_unicode_fixed_width(self, position: int) -> EscapeSequence: @@ -368,7 +368,7 @@ def read_escaped_unicode_fixed_width(self, position: int) -> EscapeSequence: raise GraphQLSyntaxError( self.source, position, - f"Invalid Unicode escape sequence: '{body[position: position + 6]}'.", + f"Invalid Unicode escape sequence: '{body[position : position + 6]}'.", ) def read_escaped_character(self, position: int) -> EscapeSequence: @@ -380,7 +380,7 @@ def read_escaped_character(self, position: int) -> EscapeSequence: raise GraphQLSyntaxError( self.source, position, - f"Invalid character escape sequence: '{body[position: position + 2]}'.", + f"Invalid character escape sequence: '{body[position : position + 2]}'.", ) def read_block_string(self, start: int) -> Token: diff --git a/src/graphql/language/location.py b/src/graphql/language/location.py index 8b1ee38d..7af55082 100644 --- a/src/graphql/language/location.py +++ b/src/graphql/language/location.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from .source import Source -__all__ = ["get_location", "SourceLocation", "FormattedSourceLocation"] +__all__ = ["FormattedSourceLocation", "SourceLocation", "get_location"] class FormattedSourceLocation(TypedDict): diff --git a/src/graphql/language/parser.py b/src/graphql/language/parser.py index 78d308d0..59299a1d 100644 --- a/src/graphql/language/parser.py +++ b/src/graphql/language/parser.py @@ -77,7 +77,7 @@ from typing_extensions import TypeAlias -__all__ = ["parse", "parse_type", "parse_value", "parse_const_value"] +__all__ = ["parse", "parse_const_value", "parse_type", "parse_value"] T = TypeVar("T") @@ -255,7 +255,7 @@ def __init__( experimental_client_controlled_nullability: bool = False, ) -> None: if not is_source(source): - source = Source(cast(str, source)) + source = Source(cast("str", source)) self._no_location = no_location self._max_tokens = max_tokens @@ -319,7 +319,7 @@ def parse_definition(self) -> DefinitionNode: ) if keyword_token.kind is TokenKind.NAME: - token_name = cast(str, keyword_token.value) + token_name = cast("str", keyword_token.value) method_name = self._parse_type_system_definition_method_names.get( token_name ) @@ -471,8 +471,11 @@ def parse_nullability_assertion(self) -> NullabilityAssertionNode | None: def parse_arguments(self, is_const: bool) -> list[ArgumentNode]: """Arguments[Const]: (Argument[?Const]+)""" item = self.parse_const_argument if is_const else self.parse_argument - item = cast(Callable[[], ArgumentNode], item) - return self.optional_many(TokenKind.PAREN_L, item, TokenKind.PAREN_R) + return self.optional_many( + TokenKind.PAREN_L, + cast("Callable[[], ArgumentNode]", item), + TokenKind.PAREN_R, + ) def parse_argument(self, is_const: bool = False) -> ArgumentNode: """Argument[Const]: Name : Value[?Const]""" @@ -486,7 +489,7 @@ def parse_argument(self, is_const: bool = False) -> ArgumentNode: def parse_const_argument(self) -> ConstArgumentNode: """Argument[Const]: Name : Value[Const]""" - return cast(ConstArgumentNode, self.parse_argument(True)) + return cast("ConstArgumentNode", self.parse_argument(True)) # Implement the parsing rules in the Fragments section. @@ -640,7 +643,7 @@ def parse_variable_value(self, is_const: bool) -> VariableNode: return self.parse_variable() def parse_const_value_literal(self) -> ConstValueNode: - return cast(ConstValueNode, self.parse_value_literal(True)) + return cast("ConstValueNode", self.parse_value_literal(True)) # Implement the parsing rules in the Directives section. @@ -653,7 +656,7 @@ def parse_directives(self, is_const: bool) -> list[DirectiveNode]: return directives def parse_const_directives(self) -> list[ConstDirectiveNode]: - return cast(List[ConstDirectiveNode], self.parse_directives(True)) + return cast("List[ConstDirectiveNode]", self.parse_directives(True)) def parse_directive(self, is_const: bool) -> DirectiveNode: """Directive[Const]: @ Name Arguments[?Const]?""" @@ -703,7 +706,7 @@ def parse_type_system_extension(self) -> TypeSystemExtensionNode: keyword_token = self._lexer.lookahead() if keyword_token.kind == TokenKind.NAME: method_name = self._parse_type_extension_method_names.get( - cast(str, keyword_token.value) + cast("str", keyword_token.value) ) if method_name: # pragma: no cover return getattr(self, f"parse_{method_name}")() diff --git a/src/graphql/language/predicates.py b/src/graphql/language/predicates.py index b65b1982..280662f8 100644 --- a/src/graphql/language/predicates.py +++ b/src/graphql/language/predicates.py @@ -26,17 +26,17 @@ __all__ = [ + "is_const_value_node", "is_definition_node", "is_executable_definition_node", "is_nullability_assertion_node", "is_selection_node", - "is_value_node", - "is_const_value_node", + "is_type_definition_node", + "is_type_extension_node", "is_type_node", "is_type_system_definition_node", - "is_type_definition_node", "is_type_system_extension_node", - "is_type_extension_node", + "is_value_node", ] diff --git a/src/graphql/language/print_location.py b/src/graphql/language/print_location.py index 03509732..21fb1b8a 100644 --- a/src/graphql/language/print_location.py +++ b/src/graphql/language/print_location.py @@ -73,7 +73,7 @@ def print_source_location(source: Source, source_location: SourceLocation) -> st def print_prefixed_lines(*lines: tuple[str, str | None]) -> str: """Print lines specified like this: ("prefix", "string")""" existing_lines = [ - cast(Tuple[str, str], line) for line in lines if line[1] is not None + cast("Tuple[str, str]", line) for line in lines if line[1] is not None ] pad_len = max(len(line[0]) for line in existing_lines) return "\n".join( diff --git a/src/graphql/language/source.py b/src/graphql/language/source.py index 01bb013f..d54bf969 100644 --- a/src/graphql/language/source.py +++ b/src/graphql/language/source.py @@ -21,7 +21,7 @@ class Source: """A representation of source input to GraphQL.""" # allow custom attributes and weak references (not used internally) - __slots__ = "__weakref__", "__dict__", "body", "name", "location_offset" + __slots__ = "__dict__", "__weakref__", "body", "location_offset", "name" def __init__( self, diff --git a/src/graphql/language/visitor.py b/src/graphql/language/visitor.py index be410466..c9901230 100644 --- a/src/graphql/language/visitor.py +++ b/src/graphql/language/visitor.py @@ -25,15 +25,15 @@ __all__ = [ - "Visitor", + "BREAK", + "IDLE", + "REMOVE", + "SKIP", "ParallelVisitor", + "Visitor", "VisitorAction", "VisitorKeyMap", "visit", - "BREAK", - "SKIP", - "REMOVE", - "IDLE", ] @@ -289,7 +289,7 @@ def visit( else: stack = Stack(in_array, idx, keys, edits, stack) in_array = isinstance(node, tuple) - keys = node if in_array else visitor_keys.get(node.kind, ()) + keys = node if in_array else visitor_keys.get(node.kind, ()) # type: ignore idx = -1 edits = [] if parent: diff --git a/src/graphql/pyutils/__init__.py b/src/graphql/pyutils/__init__.py index e1aefd6a..28ad1a92 100644 --- a/src/graphql/pyutils/__init__.py +++ b/src/graphql/pyutils/__init__.py @@ -33,34 +33,38 @@ from .print_path_list import print_path_list from .simple_pub_sub import SimplePubSub, SimplePubSubIterator from .undefined import Undefined, UndefinedType +from .ref_map import RefMap +from .ref_set import RefSet __all__ = [ + "AwaitableOrValue", + "Description", + "FrozenError", + "Path", + "RefMap", + "RefSet", + "SimplePubSub", + "SimplePubSubIterator", + "Undefined", + "UndefinedType", + "and_list", "async_reduce", - "camel_to_snake", - "snake_to_camel", "cached_property", + "camel_to_snake", "did_you_mean", - "or_list", - "and_list", - "Description", "group_by", - "is_description", - "register_description", - "unregister_description", "identity_func", "inspect", "is_awaitable", "is_collection", + "is_description", "is_iterable", "merge_kwargs", "natural_comparison_key", - "AwaitableOrValue", - "suggestion_list", - "FrozenError", - "Path", + "or_list", "print_path_list", - "SimplePubSub", - "SimplePubSubIterator", - "Undefined", - "UndefinedType", + "register_description", + "snake_to_camel", + "suggestion_list", + "unregister_description", ] diff --git a/src/graphql/pyutils/async_reduce.py b/src/graphql/pyutils/async_reduce.py index 33d97f9c..4eb79748 100644 --- a/src/graphql/pyutils/async_reduce.py +++ b/src/graphql/pyutils/async_reduce.py @@ -36,10 +36,12 @@ def async_reduce( async def async_callback( current_accumulator: Awaitable[U], current_value: T ) -> U: - result = callback(await current_accumulator, current_value) - return await cast(Awaitable, result) if is_awaitable(result) else result + result: AwaitableOrValue[U] = callback( + await current_accumulator, current_value + ) + return await result if is_awaitable(result) else result # type: ignore - accumulator = async_callback(cast(Awaitable[U], accumulator), value) + accumulator = async_callback(cast("Awaitable[U]", accumulator), value) else: - accumulator = callback(cast(U, accumulator), value) + accumulator = callback(cast("U", accumulator), value) return accumulator diff --git a/src/graphql/pyutils/description.py b/src/graphql/pyutils/description.py index 812d61fe..9d43a86d 100644 --- a/src/graphql/pyutils/description.py +++ b/src/graphql/pyutils/description.py @@ -51,7 +51,7 @@ def unregister(cls, base: type) -> None: msg = "Only types can be unregistered." raise TypeError(msg) if isinstance(cls.bases, tuple): - if base in cls.bases: + if base in cls.bases: # pragma: no branch cls.bases = tuple(b for b in cls.bases if b is not base) if not cls.bases: cls.bases = object diff --git a/src/graphql/pyutils/format_list.py b/src/graphql/pyutils/format_list.py index 87184728..368e7ae0 100644 --- a/src/graphql/pyutils/format_list.py +++ b/src/graphql/pyutils/format_list.py @@ -4,7 +4,7 @@ from typing import Sequence -__all__ = ["or_list", "and_list"] +__all__ = ["and_list", "or_list"] def or_list(items: Sequence[str]) -> str: diff --git a/src/graphql/pyutils/identity_func.py b/src/graphql/pyutils/identity_func.py index 2876c570..1a13936b 100644 --- a/src/graphql/pyutils/identity_func.py +++ b/src/graphql/pyutils/identity_func.py @@ -11,7 +11,7 @@ T = TypeVar("T") -DEFAULT_VALUE = cast(Any, Undefined) +DEFAULT_VALUE = cast("Any", Undefined) def identity_func(x: T = DEFAULT_VALUE, *_args: Any) -> T: diff --git a/src/graphql/pyutils/is_awaitable.py b/src/graphql/pyutils/is_awaitable.py index ce8c93c0..158bcd40 100644 --- a/src/graphql/pyutils/is_awaitable.py +++ b/src/graphql/pyutils/is_awaitable.py @@ -27,8 +27,10 @@ def is_awaitable(value: Any) -> TypeGuard[Awaitable]: # check for coroutine objects isinstance(value, CoroutineType) # check for old-style generator based coroutine objects - or isinstance(value, GeneratorType) # for Python < 3.11 - and bool(value.gi_code.co_flags & CO_ITERABLE_COROUTINE) + or ( + isinstance(value, GeneratorType) # for Python < 3.11 + and bool(value.gi_code.co_flags & CO_ITERABLE_COROUTINE) + ) # check for other awaitables (e.g. futures) or hasattr(value, "__await__") ) diff --git a/src/graphql/pyutils/merge_kwargs.py b/src/graphql/pyutils/merge_kwargs.py index c7cace3e..21144524 100644 --- a/src/graphql/pyutils/merge_kwargs.py +++ b/src/graphql/pyutils/merge_kwargs.py @@ -9,4 +9,4 @@ def merge_kwargs(base_dict: T, **kwargs: Any) -> T: """Return arbitrary typed dictionary with some keyword args merged in.""" - return cast(T, {**cast(Dict, base_dict), **kwargs}) + return cast("T", {**cast("Dict", base_dict), **kwargs}) diff --git a/src/graphql/pyutils/ref_map.py b/src/graphql/pyutils/ref_map.py new file mode 100644 index 00000000..0cffd533 --- /dev/null +++ b/src/graphql/pyutils/ref_map.py @@ -0,0 +1,79 @@ +"""A Map class that work similar to JavaScript.""" + +from __future__ import annotations + +from collections.abc import MutableMapping + +try: + MutableMapping[str, int] +except TypeError: # Python < 3.9 + from typing import MutableMapping +from typing import Any, Iterable, Iterator, TypeVar + +__all__ = ["RefMap"] + +K = TypeVar("K") +V = TypeVar("V") + + +class RefMap(MutableMapping[K, V]): + """A dictionary like object that allows mutable objects as keys. + + This class keeps the insertion order like a normal dictionary. + + Note that the implementation is limited to what is needed internally. + """ + + _map: dict[int, tuple[K, V]] + + def __init__(self, items: Iterable[tuple[K, V]] | None = None) -> None: + super().__init__() + self._map = {} + if items: + self.update(items) + + def __setitem__(self, key: K, value: V) -> None: + self._map[id(key)] = (key, value) + + def __getitem__(self, key: K) -> Any: + return self._map[id(key)][1] + + def __delitem__(self, key: K) -> None: + del self._map[id(key)] + + def __contains__(self, key: Any) -> bool: + return id(key) in self._map + + def __len__(self) -> int: + return len(self._map) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({list(self.items())!r})" + + def get(self, key: Any, default: Any = None) -> Any: + """Get the mapped value for the given key.""" + try: + return self._map[id(key)][1] + except KeyError: + return default + + def __iter__(self) -> Iterator[K]: + return self.keys() + + def keys(self) -> Iterator[K]: # type: ignore + """Return an iterator over the keys of the map.""" + return (item[0] for item in self._map.values()) + + def values(self) -> Iterator[V]: # type: ignore + """Return an iterator over the values of the map.""" + return (item[1] for item in self._map.values()) + + def items(self) -> Iterator[tuple[K, V]]: # type: ignore + """Return an iterator over the key/value-pairs of the map.""" + return self._map.values() # type: ignore + + def update(self, items: Iterable[tuple[K, V]] | None = None) -> None: # type: ignore + """Update the map with the given key/value-pairs.""" + if items: + for key, value in items: + self[key] = value diff --git a/src/graphql/pyutils/ref_set.py b/src/graphql/pyutils/ref_set.py new file mode 100644 index 00000000..731c021d --- /dev/null +++ b/src/graphql/pyutils/ref_set.py @@ -0,0 +1,67 @@ +"""A Set class that work similar to JavaScript.""" + +from __future__ import annotations + +from collections.abc import MutableSet + +try: + MutableSet[int] +except TypeError: # Python < 3.9 + from typing import MutableSet +from contextlib import suppress +from typing import Any, Iterable, Iterator, TypeVar + +from .ref_map import RefMap + +__all__ = ["RefSet"] + + +T = TypeVar("T") + + +class RefSet(MutableSet[T]): + """A set like object that allows mutable objects as elements. + + This class keeps the insertion order unlike a normal set. + + Note that the implementation is limited to what is needed internally. + """ + + _map: RefMap[T, None] + + def __init__(self, values: Iterable[T] | None = None) -> None: + super().__init__() + self._map = RefMap() + if values: + self.update(values) + + def __contains__(self, key: Any) -> bool: + return key in self._map + + def __iter__(self) -> Iterator[T]: + return iter(self._map) + + def __len__(self) -> int: + return len(self._map) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({list(self)!r})" + + def add(self, value: T) -> None: + """Add the given item to the set.""" + self._map[value] = None + + def remove(self, value: T) -> None: + """Remove the given item from the set.""" + del self._map[value] + + def discard(self, value: T) -> None: + """Remove the given item from the set if it exists.""" + with suppress(KeyError): + self.remove(value) + + def update(self, values: Iterable[T] | None = None) -> None: + """Update the set with the given items.""" + if values: + for item in values: + self.add(item) diff --git a/src/graphql/pyutils/suggestion_list.py b/src/graphql/pyutils/suggestion_list.py index 6abeefed..35240c77 100644 --- a/src/graphql/pyutils/suggestion_list.py +++ b/src/graphql/pyutils/suggestion_list.py @@ -99,8 +99,7 @@ def measure(self, option: str, threshold: int) -> int | None: double_diagonal_cell = rows[(i - 2) % 3][j - 2] current_cell = min(current_cell, double_diagonal_cell + 1) - if current_cell < smallest_cell: - smallest_cell = current_cell + smallest_cell = min(current_cell, smallest_cell) current_row[j] = current_cell diff --git a/src/graphql/type/__init__.py b/src/graphql/type/__init__.py index 4db6516d..8c41bd28 100644 --- a/src/graphql/type/__init__.py +++ b/src/graphql/type/__init__.py @@ -137,6 +137,7 @@ GraphQLStreamDirective, GraphQLDeprecatedDirective, GraphQLSpecifiedByDirective, + GraphQLOneOfDirective, # Keyword Args GraphQLDirectiveKwargs, # Constant Deprecation Reason @@ -176,133 +177,134 @@ from .validate import validate_schema, assert_valid_schema __all__ = [ - "is_schema", - "assert_schema", - "assert_name", - "assert_enum_value_name", - "GraphQLSchema", - "GraphQLSchemaKwargs", - "is_type", - "is_scalar_type", - "is_object_type", - "is_interface_type", - "is_union_type", - "is_enum_type", - "is_input_object_type", - "is_list_type", - "is_non_null_type", - "is_input_type", - "is_output_type", - "is_leaf_type", - "is_composite_type", - "is_abstract_type", - "is_wrapping_type", - "is_nullable_type", - "is_named_type", - "is_required_argument", - "is_required_input_field", - "assert_type", - "assert_scalar_type", - "assert_object_type", - "assert_interface_type", - "assert_union_type", - "assert_enum_type", - "assert_input_object_type", - "assert_list_type", - "assert_non_null_type", - "assert_input_type", - "assert_output_type", - "assert_leaf_type", - "assert_composite_type", - "assert_abstract_type", - "assert_wrapping_type", - "assert_nullable_type", - "assert_named_type", - "get_nullable_type", - "get_named_type", - "resolve_thunk", - "GraphQLScalarType", - "GraphQLObjectType", - "GraphQLInterfaceType", - "GraphQLUnionType", - "GraphQLEnumType", - "GraphQLInputObjectType", - "GraphQLInputType", - "GraphQLArgument", - "GraphQLList", - "GraphQLNonNull", - "GraphQLType", - "GraphQLInputType", - "GraphQLOutputType", - "GraphQLLeafType", - "GraphQLCompositeType", + "DEFAULT_DEPRECATION_REASON", + "GRAPHQL_MAX_INT", + "GRAPHQL_MIN_INT", "GraphQLAbstractType", - "GraphQLWrappingType", - "GraphQLNullableType", - "GraphQLNullableInputType", - "GraphQLNullableOutputType", - "GraphQLNamedType", - "GraphQLNamedInputType", - "GraphQLNamedOutputType", - "Thunk", - "ThunkCollection", - "ThunkMapping", "GraphQLArgument", + "GraphQLArgument", + "GraphQLArgumentKwargs", "GraphQLArgumentMap", + "GraphQLBoolean", + "GraphQLCompositeType", + "GraphQLDeferDirective", + "GraphQLDeprecatedDirective", + "GraphQLDirective", + "GraphQLDirectiveKwargs", + "GraphQLEnumType", + "GraphQLEnumTypeKwargs", "GraphQLEnumValue", + "GraphQLEnumValueKwargs", "GraphQLEnumValueMap", "GraphQLField", + "GraphQLFieldKwargs", "GraphQLFieldMap", + "GraphQLFieldResolver", + "GraphQLFloat", + "GraphQLID", + "GraphQLIncludeDirective", "GraphQLInputField", + "GraphQLInputFieldKwargs", "GraphQLInputFieldMap", "GraphQLInputFieldOutType", - "GraphQLScalarSerializer", - "GraphQLScalarValueParser", - "GraphQLScalarLiteralParser", - "GraphQLArgumentKwargs", - "GraphQLEnumTypeKwargs", - "GraphQLEnumValueKwargs", - "GraphQLFieldKwargs", - "GraphQLInputFieldKwargs", + "GraphQLInputObjectType", "GraphQLInputObjectTypeKwargs", + "GraphQLInputType", + "GraphQLInputType", + "GraphQLInt", + "GraphQLInterfaceType", "GraphQLInterfaceTypeKwargs", + "GraphQLIsTypeOfFn", + "GraphQLLeafType", + "GraphQLList", + "GraphQLNamedInputType", + "GraphQLNamedOutputType", + "GraphQLNamedType", "GraphQLNamedTypeKwargs", + "GraphQLNonNull", + "GraphQLNullableInputType", + "GraphQLNullableOutputType", + "GraphQLNullableType", + "GraphQLObjectType", "GraphQLObjectTypeKwargs", - "GraphQLScalarTypeKwargs", - "GraphQLUnionTypeKwargs", - "GraphQLFieldResolver", - "GraphQLTypeResolver", - "GraphQLIsTypeOfFn", + "GraphQLOneOfDirective", + "GraphQLOutputType", "GraphQLResolveInfo", - "ResponsePath", - "is_directive", - "assert_directive", - "is_specified_directive", - "specified_directives", - "GraphQLDirective", - "GraphQLIncludeDirective", + "GraphQLScalarLiteralParser", + "GraphQLScalarSerializer", + "GraphQLScalarType", + "GraphQLScalarTypeKwargs", + "GraphQLScalarValueParser", + "GraphQLSchema", + "GraphQLSchemaKwargs", "GraphQLSkipDirective", - "GraphQLDeferDirective", - "GraphQLStreamDirective", - "GraphQLDeprecatedDirective", "GraphQLSpecifiedByDirective", - "GraphQLDirectiveKwargs", - "DEFAULT_DEPRECATION_REASON", - "is_specified_scalar_type", - "specified_scalar_types", - "GraphQLInt", - "GraphQLFloat", + "GraphQLStreamDirective", "GraphQLString", - "GraphQLBoolean", - "GraphQLID", - "GRAPHQL_MAX_INT", - "GRAPHQL_MIN_INT", - "is_introspection_type", - "introspection_types", - "TypeKind", + "GraphQLType", + "GraphQLTypeResolver", + "GraphQLUnionType", + "GraphQLUnionTypeKwargs", + "GraphQLWrappingType", + "ResponsePath", "SchemaMetaFieldDef", + "Thunk", + "ThunkCollection", + "ThunkMapping", + "TypeKind", "TypeMetaFieldDef", "TypeNameMetaFieldDef", - "validate_schema", + "assert_abstract_type", + "assert_composite_type", + "assert_directive", + "assert_enum_type", + "assert_enum_value_name", + "assert_input_object_type", + "assert_input_type", + "assert_interface_type", + "assert_leaf_type", + "assert_list_type", + "assert_name", + "assert_named_type", + "assert_non_null_type", + "assert_nullable_type", + "assert_object_type", + "assert_output_type", + "assert_scalar_type", + "assert_schema", + "assert_type", + "assert_union_type", "assert_valid_schema", + "assert_wrapping_type", + "get_named_type", + "get_nullable_type", + "introspection_types", + "is_abstract_type", + "is_composite_type", + "is_directive", + "is_enum_type", + "is_input_object_type", + "is_input_type", + "is_interface_type", + "is_introspection_type", + "is_leaf_type", + "is_list_type", + "is_named_type", + "is_non_null_type", + "is_nullable_type", + "is_object_type", + "is_output_type", + "is_required_argument", + "is_required_input_field", + "is_scalar_type", + "is_schema", + "is_specified_directive", + "is_specified_scalar_type", + "is_type", + "is_union_type", + "is_wrapping_type", + "resolve_thunk", + "specified_directives", + "specified_scalar_types", + "validate_schema", ] diff --git a/src/graphql/type/assert_name.py b/src/graphql/type/assert_name.py index b7e94e2d..1a8f7689 100644 --- a/src/graphql/type/assert_name.py +++ b/src/graphql/type/assert_name.py @@ -3,7 +3,7 @@ from ..error import GraphQLError from ..language.character_classes import is_name_continue, is_name_start -__all__ = ["assert_name", "assert_enum_value_name"] +__all__ = ["assert_enum_value_name", "assert_name"] def assert_name(name: str) -> str: diff --git a/src/graphql/type/definition.py b/src/graphql/type/definition.py index dbca4e66..c334488d 100644 --- a/src/graphql/type/definition.py +++ b/src/graphql/type/definition.py @@ -2,7 +2,6 @@ from __future__ import annotations -from enum import Enum from typing import ( TYPE_CHECKING, Any, @@ -19,6 +18,18 @@ overload, ) +try: + from typing import TypedDict +except ImportError: # Python < 3.8 + from typing_extensions import TypedDict +try: + from typing import TypeAlias, TypeGuard +except ImportError: # Python < 3.10 + from typing_extensions import TypeAlias, TypeGuard + +if TYPE_CHECKING: + from enum import Enum + from ..error import GraphQLError from ..language import ( EnumTypeDefinitionNode, @@ -57,58 +68,11 @@ from ..utilities.value_from_ast_untyped import value_from_ast_untyped from .assert_name import assert_enum_value_name, assert_name -try: - from typing import TypedDict -except ImportError: # Python < 3.8 - from typing_extensions import TypedDict -try: - from typing import TypeAlias, TypeGuard -except ImportError: # Python < 3.10 - from typing_extensions import TypeAlias, TypeGuard - if TYPE_CHECKING: from .schema import GraphQLSchema + __all__ = [ - "is_type", - "is_scalar_type", - "is_object_type", - "is_interface_type", - "is_union_type", - "is_enum_type", - "is_input_object_type", - "is_list_type", - "is_non_null_type", - "is_input_type", - "is_output_type", - "is_leaf_type", - "is_composite_type", - "is_abstract_type", - "is_wrapping_type", - "is_nullable_type", - "is_named_type", - "is_required_argument", - "is_required_input_field", - "assert_type", - "assert_scalar_type", - "assert_object_type", - "assert_interface_type", - "assert_union_type", - "assert_enum_type", - "assert_input_object_type", - "assert_list_type", - "assert_non_null_type", - "assert_input_type", - "assert_output_type", - "assert_leaf_type", - "assert_composite_type", - "assert_abstract_type", - "assert_wrapping_type", - "assert_nullable_type", - "assert_named_type", - "get_nullable_type", - "get_named_type", - "resolve_thunk", "GraphQLAbstractType", "GraphQLArgument", "GraphQLArgumentKwargs", @@ -135,23 +99,23 @@ "GraphQLIsTypeOfFn", "GraphQLLeafType", "GraphQLList", - "GraphQLNamedType", - "GraphQLNamedTypeKwargs", "GraphQLNamedInputType", "GraphQLNamedOutputType", - "GraphQLNullableType", + "GraphQLNamedType", + "GraphQLNamedTypeKwargs", + "GraphQLNonNull", "GraphQLNullableInputType", "GraphQLNullableOutputType", - "GraphQLNonNull", + "GraphQLNullableType", + "GraphQLObjectType", + "GraphQLObjectTypeKwargs", + "GraphQLOutputType", "GraphQLResolveInfo", + "GraphQLScalarLiteralParser", + "GraphQLScalarSerializer", "GraphQLScalarType", "GraphQLScalarTypeKwargs", - "GraphQLScalarSerializer", "GraphQLScalarValueParser", - "GraphQLScalarLiteralParser", - "GraphQLObjectType", - "GraphQLObjectTypeKwargs", - "GraphQLOutputType", "GraphQLType", "GraphQLTypeResolver", "GraphQLUnionType", @@ -160,6 +124,45 @@ "Thunk", "ThunkCollection", "ThunkMapping", + "assert_abstract_type", + "assert_composite_type", + "assert_enum_type", + "assert_input_object_type", + "assert_input_type", + "assert_interface_type", + "assert_leaf_type", + "assert_list_type", + "assert_named_type", + "assert_non_null_type", + "assert_nullable_type", + "assert_object_type", + "assert_output_type", + "assert_scalar_type", + "assert_type", + "assert_union_type", + "assert_wrapping_type", + "get_named_type", + "get_nullable_type", + "is_abstract_type", + "is_composite_type", + "is_enum_type", + "is_input_object_type", + "is_input_type", + "is_interface_type", + "is_leaf_type", + "is_list_type", + "is_named_type", + "is_non_null_type", + "is_nullable_type", + "is_object_type", + "is_output_type", + "is_required_argument", + "is_required_input_field", + "is_scalar_type", + "is_type", + "is_union_type", + "is_wrapping_type", + "resolve_thunk", ] @@ -386,8 +389,7 @@ def __init__( self.parse_literal = parse_literal # type: ignore if parse_literal is not None and parse_value is None: msg = ( - f"{name} must provide" - " both 'parse_value' and 'parse_literal' functions." + f"{name} must provide both 'parse_value' and 'parse_literal' functions." ) raise TypeError(msg) self.specified_by_url = specified_by_url @@ -504,7 +506,7 @@ def __init__( args = { assert_name(name): value if isinstance(value, GraphQLArgument) - else GraphQLArgument(cast(GraphQLInputType, value)) + else GraphQLArgument(cast("GraphQLInputType", value)) for name, value in args.items() } else: @@ -790,7 +792,7 @@ def fields(self) -> GraphQLFieldMap: return { assert_name(name): value if isinstance(value, GraphQLField) - else GraphQLField(value) # type: ignore + else GraphQLField(value) for name, value in fields.items() } @@ -895,7 +897,7 @@ def fields(self) -> GraphQLFieldMap: return { assert_name(name): value if isinstance(value, GraphQLField) - else GraphQLField(value) # type: ignore + else GraphQLField(value) for name, value in fields.items() } @@ -1078,7 +1080,7 @@ def __init__( extension_ast_nodes=extension_ast_nodes, ) try: # check for enum - values = cast(Enum, values).__members__ # type: ignore + values = cast("Enum", values).__members__ # type: ignore except AttributeError: if not isinstance(values, Mapping) or not all( isinstance(name, str) for name in values @@ -1091,9 +1093,9 @@ def __init__( " with value names as keys." ) raise TypeError(msg) from error - values = cast(Dict[str, Any], values) + values = cast("Dict[str, Any]", values) else: - values = cast(Dict[str, Enum], values) + values = cast("Dict[str, Enum]", values) if names_as_values is False: values = {key: value.value for key, value in values.items()} elif names_as_values is True: @@ -1272,6 +1274,7 @@ class GraphQLInputObjectTypeKwargs(GraphQLNamedTypeKwargs, total=False): fields: GraphQLInputFieldMap out_type: GraphQLInputFieldOutType | None + is_one_of: bool class GraphQLInputObjectType(GraphQLNamedType): @@ -1284,7 +1287,7 @@ class GraphQLInputObjectType(GraphQLNamedType): Example:: - NonNullFloat = GraphQLNonNull(GraphQLFloat()) + NonNullFloat = GraphQLNonNull(GraphQLFloat) class GeoPoint(GraphQLInputObjectType): name = 'GeoPoint' @@ -1292,7 +1295,7 @@ class GeoPoint(GraphQLInputObjectType): 'lat': GraphQLInputField(NonNullFloat), 'lon': GraphQLInputField(NonNullFloat), 'alt': GraphQLInputField( - GraphQLFloat(), default_value=0) + GraphQLFloat, default_value=0) } The outbound values will be Python dictionaries by default, but you can have them @@ -1301,6 +1304,7 @@ class GeoPoint(GraphQLInputObjectType): ast_node: InputObjectTypeDefinitionNode | None extension_ast_nodes: tuple[InputObjectTypeExtensionNode, ...] + is_one_of: bool def __init__( self, @@ -1311,6 +1315,7 @@ def __init__( extensions: dict[str, Any] | None = None, ast_node: InputObjectTypeDefinitionNode | None = None, extension_ast_nodes: Collection[InputObjectTypeExtensionNode] | None = None, + is_one_of: bool = False, ) -> None: super().__init__( name=name, @@ -1322,6 +1327,7 @@ def __init__( self._fields = fields if out_type is not None: self.out_type = out_type # type: ignore + self.is_one_of = is_one_of @staticmethod def out_type(value: dict[str, Any]) -> Any: @@ -1340,6 +1346,7 @@ def to_kwargs(self) -> GraphQLInputObjectTypeKwargs: out_type=None if self.out_type is GraphQLInputObjectType.out_type else self.out_type, + is_one_of=self.is_one_of, ) def __copy__(self) -> GraphQLInputObjectType: # pragma: no cover @@ -1357,7 +1364,7 @@ def fields(self) -> GraphQLInputFieldMap: return { assert_name(name): value if isinstance(value, GraphQLInputField) - else GraphQLInputField(value) # type: ignore + else GraphQLInputField(value) for name, value in fields.items() } @@ -1507,7 +1514,7 @@ class GraphQLNonNull(GraphQLWrappingType[GNT_co]): class RowType(GraphQLObjectType): name = 'Row' fields = { - 'id': GraphQLField(GraphQLNonNull(GraphQLString())) + 'id': GraphQLField(GraphQLNonNull(GraphQLString)) } Note: the enforcement of non-nullability occurs within the executor. @@ -1658,7 +1665,7 @@ def get_nullable_type( """Unwrap possible non-null type""" if is_non_null_type(type_): type_ = type_.of_type - return cast(Optional[GraphQLNullableType], type_) + return cast("Optional[GraphQLNullableType]", type_) # These named types do not include modifiers like List or NonNull. @@ -1703,7 +1710,7 @@ def get_named_type(type_: GraphQLType | None) -> GraphQLNamedType | None: unwrapped_type = type_ while is_wrapping_type(unwrapped_type): unwrapped_type = unwrapped_type.of_type - return cast(GraphQLNamedType, unwrapped_type) + return cast("GraphQLNamedType", unwrapped_type) return None diff --git a/src/graphql/type/directives.py b/src/graphql/type/directives.py index 17e8083c..ecd201c2 100644 --- a/src/graphql/type/directives.py +++ b/src/graphql/type/directives.py @@ -20,20 +20,20 @@ from typing_extensions import TypeGuard __all__ = [ - "is_directive", - "assert_directive", - "is_specified_directive", - "specified_directives", + "DEFAULT_DEPRECATION_REASON", + "DirectiveLocation", "GraphQLDeferDirective", + "GraphQLDeprecatedDirective", "GraphQLDirective", "GraphQLDirectiveKwargs", "GraphQLIncludeDirective", "GraphQLSkipDirective", - "GraphQLStreamDirective", - "GraphQLDeprecatedDirective", "GraphQLSpecifiedByDirective", - "DirectiveLocation", - "DEFAULT_DEPRECATION_REASON", + "GraphQLStreamDirective", + "assert_directive", + "is_directive", + "is_specified_directive", + "specified_directives", ] @@ -79,7 +79,7 @@ def __init__( locations = tuple( value if isinstance(value, DirectiveLocation) - else DirectiveLocation[cast(str, value)] + else DirectiveLocation[cast("str", value)] for value in locations ) except (KeyError, TypeError) as error: @@ -92,7 +92,7 @@ def __init__( args = { assert_name(name): value if isinstance(value, GraphQLArgument) - else GraphQLArgument(cast(GraphQLInputType, value)) + else GraphQLArgument(cast("GraphQLInputType", value)) for name, value in args.items() } else: @@ -248,17 +248,26 @@ def assert_directive(directive: Any) -> GraphQLDirective: description="Marks an element of a GraphQL schema as no longer supported.", ) -# Used to provide a URL for specifying the behaviour of custom scalar definitions: +# Used to provide a URL for specifying the behavior of custom scalar definitions: GraphQLSpecifiedByDirective = GraphQLDirective( name="specifiedBy", locations=[DirectiveLocation.SCALAR], args={ "url": GraphQLArgument( GraphQLNonNull(GraphQLString), - description="The URL that specifies the behaviour of this scalar.", + description="The URL that specifies the behavior of this scalar.", ) }, - description="Exposes a URL that specifies the behaviour of this scalar.", + description="Exposes a URL that specifies the behavior of this scalar.", +) + +# Used to indicate an Input Object is a OneOf Input Object. +GraphQLOneOfDirective = GraphQLDirective( + name="oneOf", + locations=[DirectiveLocation.INPUT_OBJECT], + args={}, + description="Indicates exactly one field must be supplied" + " and this field must not be `null`.", ) specified_directives: tuple[GraphQLDirective, ...] = ( @@ -266,6 +275,7 @@ def assert_directive(directive: Any) -> GraphQLDirective: GraphQLSkipDirective, GraphQLDeprecatedDirective, GraphQLSpecifiedByDirective, + GraphQLOneOfDirective, ) """A tuple with all directives from the GraphQL specification""" diff --git a/src/graphql/type/introspection.py b/src/graphql/type/introspection.py index 866a0499..313c3679 100644 --- a/src/graphql/type/introspection.py +++ b/src/graphql/type/introspection.py @@ -305,6 +305,7 @@ def __new__(cls): resolve=cls.input_fields, ), "ofType": GraphQLField(_Type, resolve=cls.of_type), + "isOneOf": GraphQLField(GraphQLBoolean, resolve=cls.is_one_of), } @staticmethod @@ -396,6 +397,10 @@ def input_fields(type_, _info, includeDeprecated=False): def of_type(type_, _info): return getattr(type_, "of_type", None) + @staticmethod + def is_one_of(type_, _info): + return type_.is_one_of if is_input_object_type(type_) else None + _Type: GraphQLObjectType = GraphQLObjectType( name="__Type", @@ -634,8 +639,7 @@ class TypeKind(Enum): ), "NON_NULL": GraphQLEnumValue( TypeKind.NON_NULL, - description="Indicates this type is a non-null." - " `ofType` is a valid field.", + description="Indicates this type is a non-null. `ofType` is a valid field.", ), }, ) diff --git a/src/graphql/type/scalars.py b/src/graphql/type/scalars.py index 22669c80..d35e6e26 100644 --- a/src/graphql/type/scalars.py +++ b/src/graphql/type/scalars.py @@ -23,15 +23,15 @@ from typing_extensions import TypeGuard __all__ = [ - "is_specified_scalar_type", - "specified_scalar_types", - "GraphQLInt", - "GraphQLFloat", - "GraphQLString", - "GraphQLBoolean", - "GraphQLID", "GRAPHQL_MAX_INT", "GRAPHQL_MIN_INT", + "GraphQLBoolean", + "GraphQLFloat", + "GraphQLID", + "GraphQLInt", + "GraphQLString", + "is_specified_scalar_type", + "specified_scalar_types", ] # As per the GraphQL Spec, Integers are only treated as valid @@ -315,7 +315,7 @@ def parse_id_literal(value_node: ValueNode, _variables: Any = None) -> str: GraphQLBoolean, GraphQLID, ) -} +} # pyright: ignore def is_specified_scalar_type(type_: GraphQLNamedType) -> TypeGuard[GraphQLScalarType]: diff --git a/src/graphql/type/schema.py b/src/graphql/type/schema.py index 5e546298..f8ab756b 100644 --- a/src/graphql/type/schema.py +++ b/src/graphql/type/schema.py @@ -21,6 +21,7 @@ GraphQLAbstractType, GraphQLCompositeType, GraphQLField, + GraphQLInputType, GraphQLInterfaceType, GraphQLNamedType, GraphQLObjectType, @@ -49,7 +50,7 @@ except ImportError: # Python < 3.10 from typing_extensions import TypeAlias, TypeGuard -__all__ = ["GraphQLSchema", "GraphQLSchemaKwargs", "is_schema", "assert_schema"] +__all__ = ["GraphQLSchema", "GraphQLSchemaKwargs", "assert_schema", "is_schema"] TypeMap: TypeAlias = Dict[str, GraphQLNamedType] @@ -293,12 +294,15 @@ def __deepcopy__(self, memo_: dict) -> GraphQLSchema: directive if is_specified_directive(directive) else copy(directive) for directive in self.directives ] + for directive in directives: + remap_directive(directive, type_map) return self.__class__( - self.query_type and cast(GraphQLObjectType, type_map[self.query_type.name]), + self.query_type + and cast("GraphQLObjectType", type_map[self.query_type.name]), self.mutation_type - and cast(GraphQLObjectType, type_map[self.mutation_type.name]), + and cast("GraphQLObjectType", type_map[self.mutation_type.name]), self.subscription_type - and cast(GraphQLObjectType, type_map[self.subscription_type.name]), + and cast("GraphQLObjectType", type_map[self.subscription_type.name]), types, directives, self.description, @@ -324,7 +328,7 @@ def get_possible_types( abstract_type.types if is_union_type(abstract_type) else self.get_implementations( - cast(GraphQLInterfaceType, abstract_type) + cast("GraphQLInterfaceType", abstract_type) ).objects ) @@ -351,7 +355,7 @@ def is_sub_type( add(type_.name) else: implementations = self.get_implementations( - cast(GraphQLInterfaceType, abstract_type) + cast("GraphQLInterfaceType", abstract_type) ) for type_ in implementations.objects: add(type_.name) @@ -407,7 +411,7 @@ class TypeSet(Dict[GraphQLNamedType, None]): @classmethod def with_initial_types(cls, types: Collection[GraphQLType]) -> TypeSet: - return cast(TypeSet, super().fromkeys(types)) + return cast("TypeSet", super().fromkeys(types)) def collect_referenced_types(self, type_: GraphQLType) -> None: """Recursive function supplementing the type starting from an initial type.""" @@ -452,17 +456,13 @@ def remapped_type(type_: GraphQLType, type_map: TypeMap) -> GraphQLType: """Get a copy of the given type that uses this type map.""" if is_wrapping_type(type_): return type_.__class__(remapped_type(type_.of_type, type_map)) - type_ = cast(GraphQLNamedType, type_) + type_ = cast("GraphQLNamedType", type_) return type_map.get(type_.name, type_) def remap_named_type(type_: GraphQLNamedType, type_map: TypeMap) -> None: """Change all references in the given named type to use this type map.""" - if is_union_type(type_): - type_.types = [ - type_map.get(member_type.name, member_type) for member_type in type_.types - ] - elif is_object_type(type_) or is_interface_type(type_): + if is_object_type(type_) or is_interface_type(type_): type_.interfaces = [ type_map.get(interface_type.name, interface_type) for interface_type in type_.interfaces @@ -477,9 +477,22 @@ def remap_named_type(type_: GraphQLNamedType, type_map: TypeMap) -> None: arg.type = remapped_type(arg.type, type_map) args[arg_name] = arg fields[field_name] = field + elif is_union_type(type_): + type_.types = [ + type_map.get(member_type.name, member_type) for member_type in type_.types + ] elif is_input_object_type(type_): fields = type_.fields for field_name, field in fields.items(): field = copy(field) # noqa: PLW2901 field.type = remapped_type(field.type, type_map) fields[field_name] = field + + +def remap_directive(directive: GraphQLDirective, type_map: TypeMap) -> None: + """Change all references in the given directive to use this type map.""" + args = directive.args + for arg_name, arg in args.items(): + arg = copy(arg) # noqa: PLW2901 + arg.type = cast("GraphQLInputType", remapped_type(arg.type, type_map)) + args[arg_name] = arg diff --git a/src/graphql/type/validate.py b/src/graphql/type/validate.py index 8a6b7257..9b22f44e 100644 --- a/src/graphql/type/validate.py +++ b/src/graphql/type/validate.py @@ -16,7 +16,7 @@ SchemaDefinitionNode, SchemaExtensionNode, ) -from ..pyutils import and_list, inspect +from ..pyutils import Undefined, and_list, inspect from ..utilities.type_comparators import is_equal_type, is_type_sub_type_of from .definition import ( GraphQLEnumType, @@ -41,7 +41,7 @@ from .introspection import is_introspection_type from .schema import GraphQLSchema, assert_schema -__all__ = ["validate_schema", "assert_valid_schema"] +__all__ = ["assert_valid_schema", "validate_schema"] def validate_schema(schema: GraphQLSchema) -> list[GraphQLError]: @@ -101,7 +101,7 @@ def report_error( ) -> None: if nodes and not isinstance(nodes, Node): nodes = [node for node in nodes if node] - nodes = cast(Optional[Collection[Node]], nodes) + nodes = cast("Optional[Collection[Node]]", nodes) self.errors.append(GraphQLError(message, nodes)) def validate_root_types(self) -> None: @@ -183,7 +183,7 @@ def validate_name(self, node: Any, name: str | None = None) -> None: try: if not name: name = node.name - name = cast(str, name) + name = cast("str", name) ast_node = node.ast_node except AttributeError: # pragma: no cover pass @@ -454,8 +454,7 @@ def validate_input_fields(self, input_obj: GraphQLInputObjectType) -> None: if not fields: self.report_error( - f"Input Object type {input_obj.name}" - " must define one or more fields.", + f"Input Object type {input_obj.name} must define one or more fields.", [input_obj.ast_node, *input_obj.extension_ast_nodes], ) @@ -482,6 +481,28 @@ def validate_input_fields(self, input_obj: GraphQLInputObjectType) -> None: ], ) + if input_obj.is_one_of: + self.validate_one_of_input_object_field(input_obj, field_name, field) + + def validate_one_of_input_object_field( + self, + type_: GraphQLInputObjectType, + field_name: str, + field: GraphQLInputField, + ) -> None: + if is_non_null_type(field.type): + self.report_error( + f"OneOf input field {type_.name}.{field_name} must be nullable.", + field.ast_node and field.ast_node.type, + ) + + if field.default_value is not Undefined: + self.report_error( + f"OneOf input field {type_.name}.{field_name}" + " cannot have a default value.", + field.ast_node, + ) + def get_operation_type_node( schema: GraphQLSchema, operation: OperationType @@ -540,7 +561,7 @@ def __call__(self, input_obj: GraphQLInputObjectType) -> None: " within itself through a series of non-null fields:" f" '{'.'.join(field_names)}'.", cast( - Collection[Node], + "Collection[Node]", map(attrgetter("ast_node"), map(itemgetter(1), cycle_path)), ), ) diff --git a/src/graphql/utilities/__init__.py b/src/graphql/utilities/__init__.py index f528bdcc..5aadcc31 100644 --- a/src/graphql/utilities/__init__.py +++ b/src/graphql/utilities/__init__.py @@ -100,14 +100,14 @@ "find_dangerous_changes", "get_introspection_query", "get_operation_ast", + "introspection_from_schema", "is_equal_type", "is_type_sub_type_of", - "introspection_from_schema", "lexicographic_sort_schema", - "print_schema", - "print_type", "print_directive", "print_introspection_schema", + "print_schema", + "print_type", "print_value", "separate_operations", "strip_ignored_characters", diff --git a/src/graphql/utilities/ast_to_dict.py b/src/graphql/utilities/ast_to_dict.py index fea70b32..3a2b3504 100644 --- a/src/graphql/utilities/ast_to_dict.py +++ b/src/graphql/utilities/ast_to_dict.py @@ -37,9 +37,8 @@ def ast_to_dict( ) -> Any: """Convert a language AST to a nested Python dictionary. - Set `location` to True in order to get the locations as well. + Set `locations` to True in order to get the locations as well. """ - """Convert a node to a nested Python dictionary.""" if isinstance(node, Node): if cache is None: cache = {} diff --git a/src/graphql/utilities/build_ast_schema.py b/src/graphql/utilities/build_ast_schema.py index 8736e979..26ccfea2 100644 --- a/src/graphql/utilities/build_ast_schema.py +++ b/src/graphql/utilities/build_ast_schema.py @@ -68,11 +68,11 @@ def build_ast_schema( # validation with validate_schema() will produce more actionable results. type_name = type_.name if type_name == "Query": - schema_kwargs["query"] = cast(GraphQLObjectType, type_) + schema_kwargs["query"] = cast("GraphQLObjectType", type_) elif type_name == "Mutation": - schema_kwargs["mutation"] = cast(GraphQLObjectType, type_) + schema_kwargs["mutation"] = cast("GraphQLObjectType", type_) elif type_name == "Subscription": - schema_kwargs["subscription"] = cast(GraphQLObjectType, type_) + schema_kwargs["subscription"] = cast("GraphQLObjectType", type_) # If specified directives were not explicitly declared, add them. directives = schema_kwargs["directives"] diff --git a/src/graphql/utilities/build_client_schema.py b/src/graphql/utilities/build_client_schema.py index c4d05ccc..0e2cbd0e 100644 --- a/src/graphql/utilities/build_client_schema.py +++ b/src/graphql/utilities/build_client_schema.py @@ -3,7 +3,7 @@ from __future__ import annotations from itertools import chain -from typing import Callable, Collection, cast +from typing import TYPE_CHECKING, Callable, Collection, cast from ..language import DirectiveLocation, parse_value from ..pyutils import Undefined, inspect @@ -33,22 +33,25 @@ is_output_type, specified_scalar_types, ) -from .get_introspection_query import ( - IntrospectionDirective, - IntrospectionEnumType, - IntrospectionField, - IntrospectionInputObjectType, - IntrospectionInputValue, - IntrospectionInterfaceType, - IntrospectionObjectType, - IntrospectionQuery, - IntrospectionScalarType, - IntrospectionType, - IntrospectionTypeRef, - IntrospectionUnionType, -) from .value_from_ast import value_from_ast +if TYPE_CHECKING: + from .get_introspection_query import ( + IntrospectionDirective, + IntrospectionEnumType, + IntrospectionField, + IntrospectionInputObjectType, + IntrospectionInputValue, + IntrospectionInterfaceType, + IntrospectionObjectType, + IntrospectionQuery, + IntrospectionScalarType, + IntrospectionType, + IntrospectionTypeRef, + IntrospectionUnionType, + ) + + __all__ = ["build_client_schema"] @@ -90,17 +93,17 @@ def get_type(type_ref: IntrospectionTypeRef) -> GraphQLType: if not item_ref: msg = "Decorated type deeper than introspection query." raise TypeError(msg) - item_ref = cast(IntrospectionTypeRef, item_ref) + item_ref = cast("IntrospectionTypeRef", item_ref) return GraphQLList(get_type(item_ref)) if kind == TypeKind.NON_NULL.name: nullable_ref = type_ref.get("ofType") if not nullable_ref: msg = "Decorated type deeper than introspection query." raise TypeError(msg) - nullable_ref = cast(IntrospectionTypeRef, nullable_ref) + nullable_ref = cast("IntrospectionTypeRef", nullable_ref) nullable_type = get_type(nullable_ref) return GraphQLNonNull(assert_nullable_type(nullable_type)) - type_ref = cast(IntrospectionType, type_ref) + type_ref = cast("IntrospectionType", type_ref) return get_named_type(type_ref) def get_named_type(type_ref: IntrospectionType) -> GraphQLNamedType: @@ -145,7 +148,7 @@ def build_scalar_def( ) -> GraphQLScalarType: name = scalar_introspection["name"] try: - return cast(GraphQLScalarType, GraphQLScalarType.reserved_types[name]) + return cast("GraphQLScalarType", GraphQLScalarType.reserved_types[name]) except KeyError: return GraphQLScalarType( name=name, @@ -168,7 +171,7 @@ def build_implementations_list( f" {inspect(implementing_introspection)}." ) raise TypeError(msg) - interfaces = cast(Collection[IntrospectionInterfaceType], maybe_interfaces) + interfaces = cast("Collection[IntrospectionInterfaceType]", maybe_interfaces) return [get_interface_type(interface) for interface in interfaces] def build_object_def( @@ -176,7 +179,7 @@ def build_object_def( ) -> GraphQLObjectType: name = object_introspection["name"] try: - return cast(GraphQLObjectType, GraphQLObjectType.reserved_types[name]) + return cast("GraphQLObjectType", GraphQLObjectType.reserved_types[name]) except KeyError: return GraphQLObjectType( name=name, @@ -205,7 +208,9 @@ def build_union_def( f" {inspect(union_introspection)}." ) raise TypeError(msg) - possible_types = cast(Collection[IntrospectionObjectType], maybe_possible_types) + possible_types = cast( + "Collection[IntrospectionObjectType]", maybe_possible_types + ) return GraphQLUnionType( name=union_introspection["name"], description=union_introspection.get("description"), @@ -221,7 +226,7 @@ def build_enum_def(enum_introspection: IntrospectionEnumType) -> GraphQLEnumType raise TypeError(msg) name = enum_introspection["name"] try: - return cast(GraphQLEnumType, GraphQLEnumType.reserved_types[name]) + return cast("GraphQLEnumType", GraphQLEnumType.reserved_types[name]) except KeyError: return GraphQLEnumType( name=name, @@ -275,7 +280,7 @@ def build_field_def_map( } def build_field(field_introspection: IntrospectionField) -> GraphQLField: - type_introspection = cast(IntrospectionType, field_introspection["type"]) + type_introspection = cast("IntrospectionType", field_introspection["type"]) type_ = get_type(type_introspection) if not is_output_type(type_): msg = ( @@ -310,7 +315,7 @@ def build_argument_def_map( def build_argument( argument_introspection: IntrospectionInputValue, ) -> GraphQLArgument: - type_introspection = cast(IntrospectionType, argument_introspection["type"]) + type_introspection = cast("IntrospectionType", argument_introspection["type"]) type_ = get_type(type_introspection) if not is_input_type(type_): msg = ( @@ -345,7 +350,9 @@ def build_input_value_def_map( def build_input_value( input_value_introspection: IntrospectionInputValue, ) -> GraphQLInputField: - type_introspection = cast(IntrospectionType, input_value_introspection["type"]) + type_introspection = cast( + "IntrospectionType", input_value_introspection["type"] + ) type_ = get_type(type_introspection) if not is_input_type(type_): msg = ( @@ -388,7 +395,7 @@ def build_directive( is_repeatable=directive_introspection.get("isRepeatable", False), locations=list( cast( - Collection[DirectiveLocation], + "Collection[DirectiveLocation]", directive_introspection.get("locations"), ) ), diff --git a/src/graphql/utilities/coerce_input_value.py b/src/graphql/utilities/coerce_input_value.py index db74d272..b7452ec3 100644 --- a/src/graphql/utilities/coerce_input_value.py +++ b/src/graphql/utilities/coerce_input_value.py @@ -130,13 +130,37 @@ def coerce_input_value( + did_you_mean(suggestions) ), ) + + if type_.is_one_of: + keys = list(coerced_dict) + if len(keys) != 1: + on_error( + path.as_list() if path else [], + input_value, + GraphQLError( + "Exactly one key must be specified" + f" for OneOf type '{type_.name}'.", + ), + ) + else: + key = keys[0] + value = coerced_dict[key] + if value is None: + on_error( + (path.as_list() if path else []) + [key], + value, + GraphQLError( + f"Field '{key}' must be non-null.", + ), + ) + return type_.out_type(coerced_dict) if is_leaf_type(type_): # Scalars and Enums determine if an input value is valid via `parse_value()`, # which can throw to indicate failure. If it throws, maintain a reference # to the original error. - type_ = cast(GraphQLScalarType, type_) + type_ = cast("GraphQLScalarType", type_) try: parse_result = type_.parse_value(input_value) except GraphQLError as error: diff --git a/src/graphql/utilities/extend_schema.py b/src/graphql/utilities/extend_schema.py index c5af8669..aebdd2b3 100644 --- a/src/graphql/utilities/extend_schema.py +++ b/src/graphql/utilities/extend_schema.py @@ -65,6 +65,7 @@ GraphQLNullableType, GraphQLObjectType, GraphQLObjectTypeKwargs, + GraphQLOneOfDirective, GraphQLOutputType, GraphQLScalarType, GraphQLSchema, @@ -91,8 +92,8 @@ from .value_from_ast import value_from_ast __all__ = [ - "extend_schema", "ExtendSchemaImpl", + "extend_schema", ] @@ -229,8 +230,12 @@ def extend_schema_args( return schema_kwargs self = cls(type_extensions) - for existing_type in schema_kwargs["types"] or (): - self.type_map[existing_type.name] = self.extend_named_type(existing_type) + + self.type_map = { + type_.name: self.extend_named_type(type_) + for type_ in schema_kwargs["types"] or () + } + for type_node in type_defs: name = type_node.name.value self.type_map[name] = std_type_map.get(name) or self.build_type(type_node) @@ -400,7 +405,7 @@ def extend_object_type_interfaces( ) -> list[GraphQLInterfaceType]: """Extend a GraphQL object type interface.""" return [ - cast(GraphQLInterfaceType, self.replace_named_type(interface)) + cast("GraphQLInterfaceType", self.replace_named_type(interface)) for interface in kwargs["interfaces"] ] + self.build_interfaces(extensions) @@ -438,7 +443,7 @@ def extend_interface_type_interfaces( ) -> list[GraphQLInterfaceType]: """Extend GraphQL interface type interfaces.""" return [ - cast(GraphQLInterfaceType, self.replace_named_type(interface)) + cast("GraphQLInterfaceType", self.replace_named_type(interface)) for interface in kwargs["interfaces"] ] + self.build_interfaces(extensions) @@ -478,7 +483,7 @@ def extend_union_type_types( ) -> list[GraphQLObjectType]: """Extend types of a GraphQL union type.""" return [ - cast(GraphQLObjectType, self.replace_named_type(member_type)) + cast("GraphQLObjectType", self.replace_named_type(member_type)) for member_type in kwargs["types"] ] + self.build_union_types(extensions) @@ -546,9 +551,9 @@ def get_wrapped_type(self, node: TypeNode) -> GraphQLType: return GraphQLList(self.get_wrapped_type(node.type)) if isinstance(node, NonNullTypeNode): return GraphQLNonNull( - cast(GraphQLNullableType, self.get_wrapped_type(node.type)) + cast("GraphQLNullableType", self.get_wrapped_type(node.type)) ) - return self.get_named_type(cast(NamedTypeNode, node)) + return self.get_named_type(cast("NamedTypeNode", node)) def build_directive(self, node: DirectiveDefinitionNode) -> GraphQLDirective: """Build a GraphQL directive for a given directive definition node.""" @@ -580,7 +585,7 @@ def build_field_map( # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. field_map[field.name.value] = GraphQLField( - type_=cast(GraphQLOutputType, self.get_wrapped_type(field.type)), + type_=cast("GraphQLOutputType", self.get_wrapped_type(field.type)), description=field.description.value if field.description else None, args=self.build_argument_map(field.arguments), deprecation_reason=get_deprecation_reason(field), @@ -598,7 +603,7 @@ def build_argument_map( # Note: While this could make assertions to get the correctly typed # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. - type_ = cast(GraphQLInputType, self.get_wrapped_type(arg.type)) + type_ = cast("GraphQLInputType", self.get_wrapped_type(arg.type)) arg_map[arg.name.value] = GraphQLArgument( type_=type_, description=arg.description.value if arg.description else None, @@ -619,7 +624,7 @@ def build_input_field_map( # Note: While this could make assertions to get the correctly typed # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. - type_ = cast(GraphQLInputType, self.get_wrapped_type(field.type)) + type_ = cast("GraphQLInputType", self.get_wrapped_type(field.type)) input_field_map[field.name.value] = GraphQLInputField( type_=type_, description=field.description.value if field.description else None, @@ -663,7 +668,7 @@ def build_interfaces( # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. return [ - cast(GraphQLInterfaceType, self.get_named_type(type_)) + cast("GraphQLInterfaceType", self.get_named_type(type_)) for node in nodes for type_ in node.interfaces or [] ] @@ -677,7 +682,7 @@ def build_union_types( # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. return [ - cast(GraphQLObjectType, self.get_named_type(type_)) + cast("GraphQLObjectType", self.get_named_type(type_)) for node in nodes for type_ in node.types or [] ] @@ -777,6 +782,7 @@ def build_input_object_type( fields=partial(self.build_input_field_map, all_nodes), ast_node=ast_node, extension_ast_nodes=extension_nodes, + is_one_of=is_one_of(ast_node), ) def build_type(self, ast_node: TypeDefinitionNode) -> GraphQLNamedType: @@ -822,3 +828,10 @@ def get_specified_by_url( specified_by_url = get_directive_values(GraphQLSpecifiedByDirective, node) return specified_by_url["url"] if specified_by_url else None + + +def is_one_of(node: InputObjectTypeDefinitionNode) -> bool: + """Given an input object node, returns if the node should be OneOf.""" + from ..execution import get_directive_values + + return get_directive_values(GraphQLOneOfDirective, node) is not None diff --git a/src/graphql/utilities/find_breaking_changes.py b/src/graphql/utilities/find_breaking_changes.py index c88c1265..d2a03ad2 100644 --- a/src/graphql/utilities/find_breaking_changes.py +++ b/src/graphql/utilities/find_breaking_changes.py @@ -216,11 +216,8 @@ def find_type_changes( schema_changes.extend(find_union_type_changes(old_type, new_type)) elif is_input_object_type(old_type) and is_input_object_type(new_type): schema_changes.extend(find_input_object_type_changes(old_type, new_type)) - elif ( - is_object_type(old_type) - and is_object_type(new_type) - or is_interface_type(old_type) - and is_interface_type(new_type) + elif (is_object_type(old_type) and is_object_type(new_type)) or ( + is_interface_type(old_type) and is_interface_type(new_type) ): schema_changes.extend(find_field_changes(old_type, new_type)) schema_changes.extend( @@ -297,7 +294,7 @@ def find_union_type_changes( schema_changes.append( DangerousChange( DangerousChangeType.TYPE_ADDED_TO_UNION, - f"{possible_type.name} was added" f" to union type {old_type.name}.", + f"{possible_type.name} was added to union type {old_type.name}.", ) ) @@ -410,7 +407,7 @@ def find_arg_changes( schema_changes.append( BreakingChange( BreakingChangeType.ARG_REMOVED, - f"{old_type.name}.{field_name} arg" f" {arg_name} was removed.", + f"{old_type.name}.{field_name} arg {arg_name} was removed.", ) ) diff --git a/src/graphql/utilities/get_introspection_query.py b/src/graphql/utilities/get_introspection_query.py index cffaa12d..adf038ac 100644 --- a/src/graphql/utilities/get_introspection_query.py +++ b/src/graphql/utilities/get_introspection_query.py @@ -19,7 +19,6 @@ __all__ = [ - "get_introspection_query", "IntrospectionDirective", "IntrospectionEnumType", "IntrospectionField", @@ -35,6 +34,7 @@ "IntrospectionType", "IntrospectionTypeRef", "IntrospectionUnionType", + "get_introspection_query", ] @@ -149,6 +149,14 @@ def input_deprecation(string: str) -> str | None: ofType {{ kind name + ofType {{ + kind + name + ofType {{ + kind + name + }} + }} }} }} }} @@ -294,7 +302,9 @@ class IntrospectionSchema(MaybeWithDescription): directives: list[IntrospectionDirective] -class IntrospectionQuery(TypedDict): - """The root typed dictionary for schema introspections.""" - - __schema: IntrospectionSchema +# The root typed dictionary for schema introspections. +# Note: We don't use class syntax here since the key looks like a private attribute. +IntrospectionQuery = TypedDict( + "IntrospectionQuery", + {"__schema": IntrospectionSchema}, +) diff --git a/src/graphql/utilities/introspection_from_schema.py b/src/graphql/utilities/introspection_from_schema.py index cc1e60ce..a0440a32 100644 --- a/src/graphql/utilities/introspection_from_schema.py +++ b/src/graphql/utilities/introspection_from_schema.py @@ -51,4 +51,4 @@ def introspection_from_schema( if not result.data: # pragma: no cover msg = "Introspection did not return a result" raise GraphQLError(msg) - return cast(IntrospectionQuery, result.data) + return cast("IntrospectionQuery", result.data) diff --git a/src/graphql/utilities/lexicographic_sort_schema.py b/src/graphql/utilities/lexicographic_sort_schema.py index cf0c4959..de675a94 100644 --- a/src/graphql/utilities/lexicographic_sort_schema.py +++ b/src/graphql/utilities/lexicographic_sort_schema.py @@ -51,7 +51,7 @@ def replace_type( return GraphQLList(replace_type(type_.of_type)) if is_non_null_type(type_): return GraphQLNonNull(replace_type(type_.of_type)) - return replace_named_type(cast(GraphQLNamedType, type_)) + return replace_named_type(cast("GraphQLNamedType", type_)) def replace_named_type(type_: GraphQLNamedType) -> GraphQLNamedType: return type_map[type_.name] @@ -76,7 +76,7 @@ def sort_args(args_map: dict[str, GraphQLArgument]) -> dict[str, GraphQLArgument args[name] = GraphQLArgument( **merge_kwargs( arg.to_kwargs(), - type_=replace_type(cast(GraphQLNamedType, arg.type)), + type_=replace_type(cast("GraphQLNamedType", arg.type)), ) ) return args @@ -87,7 +87,7 @@ def sort_fields(fields_map: dict[str, GraphQLField]) -> dict[str, GraphQLField]: fields[name] = GraphQLField( **merge_kwargs( field.to_kwargs(), - type_=replace_type(cast(GraphQLNamedType, field.type)), + type_=replace_type(cast("GraphQLNamedType", field.type)), args=sort_args(field.args), ) ) @@ -99,7 +99,8 @@ def sort_input_fields( return { name: GraphQLInputField( cast( - GraphQLInputType, replace_type(cast(GraphQLNamedType, field.type)) + "GraphQLInputType", + replace_type(cast("GraphQLNamedType", field.type)), ), description=field.description, default_value=field.default_value, @@ -174,12 +175,14 @@ def sort_named_type(type_: GraphQLNamedType) -> GraphQLNamedType: sort_directive(directive) for directive in sorted(schema.directives, key=sort_by_name_key) ], - query=cast(Optional[GraphQLObjectType], replace_maybe_type(schema.query_type)), + query=cast( + "Optional[GraphQLObjectType]", replace_maybe_type(schema.query_type) + ), mutation=cast( - Optional[GraphQLObjectType], replace_maybe_type(schema.mutation_type) + "Optional[GraphQLObjectType]", replace_maybe_type(schema.mutation_type) ), subscription=cast( - Optional[GraphQLObjectType], replace_maybe_type(schema.subscription_type) + "Optional[GraphQLObjectType]", replace_maybe_type(schema.subscription_type) ), ast_node=schema.ast_node, ) diff --git a/src/graphql/utilities/print_schema.py b/src/graphql/utilities/print_schema.py index 44c876dc..dd68e54e 100644 --- a/src/graphql/utilities/print_schema.py +++ b/src/graphql/utilities/print_schema.py @@ -33,10 +33,10 @@ from .ast_from_value import ast_from_value __all__ = [ - "print_schema", - "print_type", "print_directive", "print_introspection_schema", + "print_schema", + "print_type", "print_value", ] diff --git a/src/graphql/utilities/strip_ignored_characters.py b/src/graphql/utilities/strip_ignored_characters.py index 6521d10b..9ffe1e26 100644 --- a/src/graphql/utilities/strip_ignored_characters.py +++ b/src/graphql/utilities/strip_ignored_characters.py @@ -68,7 +68,7 @@ def strip_ignored_characters(source: str | Source) -> str: """Type description""" type Foo{"""Field description""" bar:String} ''' if not is_source(source): - source = Source(cast(str, source)) + source = Source(cast("str", source)) body = source.body lexer = Lexer(source) diff --git a/src/graphql/utilities/type_comparators.py b/src/graphql/utilities/type_comparators.py index 3ab50dc5..609c19b6 100644 --- a/src/graphql/utilities/type_comparators.py +++ b/src/graphql/utilities/type_comparators.py @@ -11,7 +11,7 @@ is_object_type, ) -__all__ = ["is_equal_type", "is_type_sub_type_of", "do_types_overlap"] +__all__ = ["do_types_overlap", "is_equal_type", "is_type_sub_type_of"] def is_equal_type(type_a: GraphQLType, type_b: GraphQLType) -> bool: diff --git a/src/graphql/utilities/type_from_ast.py b/src/graphql/utilities/type_from_ast.py index c082ebc1..10acd68f 100644 --- a/src/graphql/utilities/type_from_ast.py +++ b/src/graphql/utilities/type_from_ast.py @@ -58,7 +58,7 @@ def type_from_ast( return GraphQLList(inner_type) if inner_type else None if isinstance(type_node, NonNullTypeNode): inner_type = type_from_ast(schema, type_node.type) - inner_type = cast(GraphQLNullableType, inner_type) + inner_type = cast("GraphQLNullableType", inner_type) return GraphQLNonNull(inner_type) if inner_type else None if isinstance(type_node, NamedTypeNode): return schema.get_type(type_node.name.value) diff --git a/src/graphql/utilities/value_from_ast.py b/src/graphql/utilities/value_from_ast.py index 67ed11dc..399cdcb4 100644 --- a/src/graphql/utilities/value_from_ast.py +++ b/src/graphql/utilities/value_from_ast.py @@ -118,12 +118,20 @@ def value_from_ast( return Undefined coerced_obj[field.out_name or field_name] = field_value + if type_.is_one_of: + keys = list(coerced_obj) + if len(keys) != 1: + return Undefined + + if coerced_obj[keys[0]] is None: + return Undefined + return type_.out_type(coerced_obj) if is_leaf_type(type_): # Scalars fulfill parsing a literal value via `parse_literal()`. Invalid values # represent a failure to parse correctly, in which case Undefined is returned. - type_ = cast(GraphQLScalarType, type_) + type_ = cast("GraphQLScalarType", type_) # noinspection PyBroadException try: if variables: diff --git a/src/graphql/validation/__init__.py b/src/graphql/validation/__init__.py index 8f67f9b7..ed6ca6c8 100644 --- a/src/graphql/validation/__init__.py +++ b/src/graphql/validation/__init__.py @@ -124,14 +124,8 @@ from .rules.custom.no_schema_introspection import NoSchemaIntrospectionCustomRule __all__ = [ - "validate", "ASTValidationContext", "ASTValidationRule", - "SDLValidationContext", - "SDLValidationRule", - "ValidationContext", - "ValidationRule", - "specified_rules", "DeferStreamDirectiveLabel", "DeferStreamDirectiveOnRootField", "DeferStreamDirectiveOnValidOperationsRule", @@ -143,33 +137,39 @@ "KnownFragmentNamesRule", "KnownTypeNamesRule", "LoneAnonymousOperationRule", + "LoneSchemaDefinitionRule", + "NoDeprecatedCustomRule", "NoFragmentCyclesRule", + "NoSchemaIntrospectionCustomRule", "NoUndefinedVariablesRule", "NoUnusedFragmentsRule", "NoUnusedVariablesRule", "OverlappingFieldsCanBeMergedRule", "PossibleFragmentSpreadsRule", + "PossibleTypeExtensionsRule", "ProvidedRequiredArgumentsRule", + "SDLValidationContext", + "SDLValidationRule", "ScalarLeafsRule", "SingleFieldSubscriptionsRule", "StreamDirectiveOnListField", + "UniqueArgumentDefinitionNamesRule", "UniqueArgumentNamesRule", + "UniqueDirectiveNamesRule", "UniqueDirectivesPerLocationRule", + "UniqueEnumValueNamesRule", + "UniqueFieldDefinitionNamesRule", "UniqueFragmentNamesRule", "UniqueInputFieldNamesRule", "UniqueOperationNamesRule", + "UniqueOperationTypesRule", + "UniqueTypeNamesRule", "UniqueVariableNamesRule", + "ValidationContext", + "ValidationRule", "ValuesOfCorrectTypeRule", "VariablesAreInputTypesRule", "VariablesInAllowedPositionRule", - "LoneSchemaDefinitionRule", - "UniqueOperationTypesRule", - "UniqueTypeNamesRule", - "UniqueEnumValueNamesRule", - "UniqueFieldDefinitionNamesRule", - "UniqueArgumentDefinitionNamesRule", - "UniqueDirectiveNamesRule", - "PossibleTypeExtensionsRule", - "NoDeprecatedCustomRule", - "NoSchemaIntrospectionCustomRule", + "specified_rules", + "validate", ] diff --git a/src/graphql/validation/rules/defer_stream_directive_on_root_field.py b/src/graphql/validation/rules/defer_stream_directive_on_root_field.py index 7a73a990..023fc2b2 100644 --- a/src/graphql/validation/rules/defer_stream_directive_on_root_field.py +++ b/src/graphql/validation/rules/defer_stream_directive_on_root_field.py @@ -29,7 +29,7 @@ def enter_directive( _path: Any, _ancestors: list[Node], ) -> None: - context = cast(ValidationContext, self.context) + context = cast("ValidationContext", self.context) parent_type = context.get_parent_type() if not parent_type: return diff --git a/src/graphql/validation/rules/defer_stream_directive_on_valid_operations_rule.py b/src/graphql/validation/rules/defer_stream_directive_on_valid_operations_rule.py index c412b89e..0159715d 100644 --- a/src/graphql/validation/rules/defer_stream_directive_on_valid_operations_rule.py +++ b/src/graphql/validation/rules/defer_stream_directive_on_valid_operations_rule.py @@ -66,7 +66,8 @@ def enter_directive( if ( isinstance(definition_node, FragmentDefinitionNode) and definition_node.name.value in self.fragments_used_on_subscriptions - or isinstance(definition_node, OperationDefinitionNode) + ) or ( + isinstance(definition_node, OperationDefinitionNode) and definition_node.operation == OperationType.SUBSCRIPTION ): if node.name.value == GraphQLDeferDirective.name: diff --git a/src/graphql/validation/rules/executable_definitions.py b/src/graphql/validation/rules/executable_definitions.py index 1f702210..6ca01a9d 100644 --- a/src/graphql/validation/rules/executable_definitions.py +++ b/src/graphql/validation/rules/executable_definitions.py @@ -39,7 +39,7 @@ def enter_document(self, node: DocumentNode, *_args: Any) -> VisitorAction: ) else "'{}'".format( cast( - Union[DirectiveDefinitionNode, TypeDefinitionNode], + "Union[DirectiveDefinitionNode, TypeDefinitionNode]", definition, ).name.value ) diff --git a/src/graphql/validation/rules/known_argument_names.py b/src/graphql/validation/rules/known_argument_names.py index dadfd34a..643300d0 100644 --- a/src/graphql/validation/rules/known_argument_names.py +++ b/src/graphql/validation/rules/known_argument_names.py @@ -16,7 +16,7 @@ from ...type import specified_directives from . import ASTValidationRule, SDLValidationContext, ValidationContext -__all__ = ["KnownArgumentNamesRule", "KnownArgumentNamesOnDirectivesRule"] +__all__ = ["KnownArgumentNamesOnDirectivesRule", "KnownArgumentNamesRule"] class KnownArgumentNamesOnDirectivesRule(ASTValidationRule): @@ -35,7 +35,7 @@ def __init__(self, context: ValidationContext | SDLValidationContext) -> None: schema = context.schema defined_directives = schema.directives if schema else specified_directives - for directive in cast(List, defined_directives): + for directive in cast("List", defined_directives): directive_args[directive.name] = list(directive.args) ast_definitions = context.document.definitions diff --git a/src/graphql/validation/rules/known_directives.py b/src/graphql/validation/rules/known_directives.py index 8a0c76c4..da31730b 100644 --- a/src/graphql/validation/rules/known_directives.py +++ b/src/graphql/validation/rules/known_directives.py @@ -35,7 +35,7 @@ def __init__(self, context: ValidationContext | SDLValidationContext) -> None: schema = context.schema defined_directives = ( - schema.directives if schema else cast(List, specified_directives) + schema.directives if schema else cast("List", specified_directives) ) for directive in defined_directives: locations_map[directive.name] = directive.locations @@ -111,7 +111,7 @@ def get_directive_location_for_ast_path( raise TypeError(msg) kind = applied_to.kind if kind == "operation_definition": - applied_to = cast(OperationDefinitionNode, applied_to) + applied_to = cast("OperationDefinitionNode", applied_to) return _operation_location[applied_to.operation.value] if kind == "input_value_definition": parent_node = ancestors[-3] diff --git a/src/graphql/validation/rules/known_type_names.py b/src/graphql/validation/rules/known_type_names.py index 118d7c0e..5dbac00b 100644 --- a/src/graphql/validation/rules/known_type_names.py +++ b/src/graphql/validation/rules/known_type_names.py @@ -94,7 +94,7 @@ def is_sdl_node( value is not None and not isinstance(value, list) and ( - is_type_system_definition_node(cast(Node, value)) - or is_type_system_extension_node(cast(Node, value)) + is_type_system_definition_node(cast("Node", value)) + or is_type_system_extension_node(cast("Node", value)) ) ) diff --git a/src/graphql/validation/rules/overlapping_fields_can_be_merged.py b/src/graphql/validation/rules/overlapping_fields_can_be_merged.py index b077958b..97939e56 100644 --- a/src/graphql/validation/rules/overlapping_fields_can_be_merged.py +++ b/src/graphql/validation/rules/overlapping_fields_can_be_merged.py @@ -44,8 +44,7 @@ def reason_message(reason: ConflictReasonMessage) -> str: if isinstance(reason, list): return " and ".join( - f"subfields '{response_name}' conflict" - f" because {reason_message(sub_reason)}" + f"subfields '{response_name}' conflict because {reason_message(sub_reason)}" for response_name, sub_reason in reason ) return reason @@ -539,8 +538,8 @@ def find_conflict( ) # The return type for each field. - type1 = cast(Optional[GraphQLOutputType], def1 and def1.type) - type2 = cast(Optional[GraphQLOutputType], def2 and def2.type) + type1 = cast("Optional[GraphQLOutputType]", def1 and def1.type) + type2 = cast("Optional[GraphQLOutputType]", def2 and def2.type) if not are_mutually_exclusive: # Two aliases must refer to the same field. @@ -740,7 +739,7 @@ def collect_fields_and_fragment_names( if not node_and_defs.get(response_name): node_and_defs[response_name] = [] node_and_defs[response_name].append( - cast(NodeAndDef, (parent_type, selection, field_def)) + cast("NodeAndDef", (parent_type, selection, field_def)) ) elif isinstance(selection, FragmentSpreadNode): fragment_names[selection.name.value] = True diff --git a/src/graphql/validation/rules/provided_required_arguments.py b/src/graphql/validation/rules/provided_required_arguments.py index a9313273..9c98065e 100644 --- a/src/graphql/validation/rules/provided_required_arguments.py +++ b/src/graphql/validation/rules/provided_required_arguments.py @@ -19,7 +19,7 @@ from ...type import GraphQLArgument, is_required_argument, is_type, specified_directives from . import ASTValidationRule, SDLValidationContext, ValidationContext -__all__ = ["ProvidedRequiredArgumentsRule", "ProvidedRequiredArgumentsOnDirectivesRule"] +__all__ = ["ProvidedRequiredArgumentsOnDirectivesRule", "ProvidedRequiredArgumentsRule"] class ProvidedRequiredArgumentsOnDirectivesRule(ASTValidationRule): @@ -41,7 +41,7 @@ def __init__(self, context: ValidationContext | SDLValidationContext) -> None: schema = context.schema defined_directives = schema.directives if schema else specified_directives - for directive in cast(List, defined_directives): + for directive in cast("List", defined_directives): required_args_map[directive.name] = { name: arg for name, arg in directive.args.items() @@ -71,7 +71,7 @@ def leave_directive(self, directive_node: DirectiveNode, *_args: Any) -> None: arg_type_str = ( str(arg_type) if is_type(arg_type) - else print_ast(cast(TypeNode, arg_type)) + else print_ast(cast("TypeNode", arg_type)) ) self.report_error( GraphQLError( diff --git a/src/graphql/validation/rules/single_field_subscriptions.py b/src/graphql/validation/rules/single_field_subscriptions.py index 9a689809..89235856 100644 --- a/src/graphql/validation/rules/single_field_subscriptions.py +++ b/src/graphql/validation/rules/single_field_subscriptions.py @@ -2,10 +2,10 @@ from __future__ import annotations -from typing import Any, cast +from typing import Any from ...error import GraphQLError -from ...execution.collect_fields import collect_fields +from ...execution.collect_fields import FieldGroup, collect_fields from ...language import ( FieldNode, FragmentDefinitionNode, @@ -17,6 +17,10 @@ __all__ = ["SingleFieldSubscriptionsRule"] +def to_nodes(field_group: FieldGroup) -> list[FieldNode]: + return [field_details.node for field_details in field_group.fields] + + class SingleFieldSubscriptionsRule(ValidationRule): """Subscriptions must only include a single non-introspection field. @@ -50,16 +54,12 @@ def enter_operation_definition( node, ).grouped_field_set if len(grouped_field_set) > 1: - field_selection_lists = list(grouped_field_set.values()) - extra_field_selection_lists = field_selection_lists[1:] + field_groups = list(grouped_field_set.values()) + extra_field_groups = field_groups[1:] extra_field_selection = [ - field - for fields in extra_field_selection_lists - for field in ( - fields - if isinstance(fields, list) - else [cast(FieldNode, fields)] - ) + node + for field_group in extra_field_groups + for node in to_nodes(field_group) ] self.report_error( GraphQLError( @@ -73,7 +73,7 @@ def enter_operation_definition( ) ) for field_group in grouped_field_set.values(): - field_name = field_group[0].name.value + field_name = to_nodes(field_group)[0].name.value if field_name.startswith("__"): self.report_error( GraphQLError( @@ -83,6 +83,6 @@ def enter_operation_definition( else f"Subscription '{operation_name}'" ) + " must not select an introspection top level field.", - field_group, + to_nodes(field_group), ) ) diff --git a/src/graphql/validation/rules/stream_directive_on_list_field.py b/src/graphql/validation/rules/stream_directive_on_list_field.py index 141984c2..03015cd0 100644 --- a/src/graphql/validation/rules/stream_directive_on_list_field.py +++ b/src/graphql/validation/rules/stream_directive_on_list_field.py @@ -28,7 +28,7 @@ def enter_directive( _path: Any, _ancestors: list[Node], ) -> None: - context = cast(ValidationContext, self.context) + context = cast("ValidationContext", self.context) field_def = context.get_field_def() parent_type = context.get_parent_type() if ( diff --git a/src/graphql/validation/rules/unique_directives_per_location.py b/src/graphql/validation/rules/unique_directives_per_location.py index de9a05d0..daab2935 100644 --- a/src/graphql/validation/rules/unique_directives_per_location.py +++ b/src/graphql/validation/rules/unique_directives_per_location.py @@ -38,7 +38,7 @@ def __init__(self, context: ValidationContext | SDLValidationContext) -> None: schema = context.schema defined_directives = ( - schema.directives if schema else cast(List, specified_directives) + schema.directives if schema else cast("List", specified_directives) ) for directive in defined_directives: unique_directive_map[directive.name] = not directive.is_repeatable @@ -60,7 +60,7 @@ def enter(self, node: Node, *_args: Any) -> None: directives = getattr(node, "directives", None) if not directives: return - directives = cast(List[DirectiveNode], directives) + directives = cast("List[DirectiveNode]", directives) if isinstance(node, (SchemaDefinitionNode, SchemaExtensionNode)): seen_directives = self.schema_directives diff --git a/src/graphql/validation/rules/unique_field_definition_names.py b/src/graphql/validation/rules/unique_field_definition_names.py index 8451bc27..39df7203 100644 --- a/src/graphql/validation/rules/unique_field_definition_names.py +++ b/src/graphql/validation/rules/unique_field_definition_names.py @@ -47,8 +47,7 @@ def check_field_uniqueness( elif field_name in field_names: self.report_error( GraphQLError( - f"Field '{type_name}.{field_name}'" - " can only be defined once.", + f"Field '{type_name}.{field_name}' can only be defined once.", [field_names[field_name], field_def.name], ) ) diff --git a/src/graphql/validation/rules/values_of_correct_type.py b/src/graphql/validation/rules/values_of_correct_type.py index 8951a2d9..ea4c4a3c 100644 --- a/src/graphql/validation/rules/values_of_correct_type.py +++ b/src/graphql/validation/rules/values_of_correct_type.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Any, cast +from typing import Any, Mapping, cast from ...error import GraphQLError from ...language import ( @@ -12,16 +12,20 @@ FloatValueNode, IntValueNode, ListValueNode, + NonNullTypeNode, NullValueNode, ObjectFieldNode, ObjectValueNode, StringValueNode, ValueNode, + VariableDefinitionNode, + VariableNode, VisitorAction, print_ast, ) from ...pyutils import Undefined, did_you_mean, suggestion_list from ...type import ( + GraphQLInputObjectType, GraphQLScalarType, get_named_type, get_nullable_type, @@ -31,7 +35,7 @@ is_non_null_type, is_required_input_field, ) -from . import ValidationRule +from . import ValidationContext, ValidationRule __all__ = ["ValuesOfCorrectTypeRule"] @@ -45,6 +49,18 @@ class ValuesOfCorrectTypeRule(ValidationRule): See https://spec.graphql.org/draft/#sec-Values-of-Correct-Type """ + def __init__(self, context: ValidationContext) -> None: + super().__init__(context) + self.variable_definitions: dict[str, VariableDefinitionNode] = {} + + def enter_operation_definition(self, *_args: Any) -> None: + self.variable_definitions.clear() + + def enter_variable_definition( + self, definition: VariableDefinitionNode, *_args: Any + ) -> None: + self.variable_definitions[definition.variable.name.value] = definition + def enter_list_value(self, node: ListValueNode, *_args: Any) -> VisitorAction: # Note: TypeInfo will traverse into a list's item type, so look to the parent # input type to check if it is a list. @@ -72,6 +88,10 @@ def enter_object_value(self, node: ObjectValueNode, *_args: Any) -> VisitorActio node, ) ) + if type_.is_one_of: + validate_one_of_input_object( + self.context, node, type_, field_node_map, self.variable_definitions + ) return None def enter_object_field(self, node: ObjectFieldNode, *_args: Any) -> None: @@ -137,7 +157,7 @@ def is_valid_value_node(self, node: ValueNode) -> None: # Scalars determine if a literal value is valid via `parse_literal()` which may # throw or return an invalid value to indicate failure. - type_ = cast(GraphQLScalarType, type_) + type_ = cast("GraphQLScalarType", type_) try: parse_result = type_.parse_literal(node) if parse_result is Undefined: @@ -162,3 +182,51 @@ def is_valid_value_node(self, node: ValueNode) -> None: ) return + + +def validate_one_of_input_object( + context: ValidationContext, + node: ObjectValueNode, + type_: GraphQLInputObjectType, + field_node_map: Mapping[str, ObjectFieldNode], + variable_definitions: dict[str, VariableDefinitionNode], +) -> None: + keys = list(field_node_map) + is_not_exactly_one_filed = len(keys) != 1 + + if is_not_exactly_one_filed: + context.report_error( + GraphQLError( + f"OneOf Input Object '{type_.name}' must specify exactly one key.", + node, + ) + ) + return + + object_field_node = field_node_map.get(keys[0]) + value = object_field_node.value if object_field_node else None + is_null_literal = not value or isinstance(value, NullValueNode) + + if is_null_literal: + context.report_error( + GraphQLError( + f"Field '{type_.name}.{keys[0]}' must be non-null.", + node, + ) + ) + return + + is_variable = value and isinstance(value, VariableNode) + if is_variable: + variable_name = cast("VariableNode", value).name.value + definition = variable_definitions[variable_name] + is_nullable_variable = not isinstance(definition.type, NonNullTypeNode) + + if is_nullable_variable: + context.report_error( + GraphQLError( + f"Variable '{variable_name}' must be non-nullable" + f" to be used for OneOf Input Object '{type_.name}'.", + node, + ) + ) diff --git a/src/graphql/validation/validate.py b/src/graphql/validation/validate.py index 1439f7e4..8e59821c 100644 --- a/src/graphql/validation/validate.py +++ b/src/graphql/validation/validate.py @@ -14,7 +14,13 @@ if TYPE_CHECKING: from .rules import ASTValidationRule -__all__ = ["assert_valid_sdl", "assert_valid_sdl_extension", "validate", "validate_sdl"] +__all__ = [ + "ValidationAbortedError", + "assert_valid_sdl", + "assert_valid_sdl_extension", + "validate", + "validate_sdl", +] class ValidationAbortedError(GraphQLError): diff --git a/src/graphql/validation/validation_context.py b/src/graphql/validation/validation_context.py index dec21042..055b4231 100644 --- a/src/graphql/validation/validation_context.py +++ b/src/graphql/validation/validation_context.py @@ -143,7 +143,7 @@ def get_fragment_spreads(self, node: SelectionSetNode) -> list[FragmentSpreadNod append_spread(selection) else: set_to_visit = cast( - NodeWithSelectionSet, selection + "NodeWithSelectionSet", selection ).selection_set if set_to_visit: append_set(set_to_visit) diff --git a/src/graphql/version.py b/src/graphql/version.py index 29166e49..311c74a0 100644 --- a/src/graphql/version.py +++ b/src/graphql/version.py @@ -5,12 +5,12 @@ import re from typing import NamedTuple -__all__ = ["version", "version_info", "version_js", "version_info_js"] +__all__ = ["version", "version_info", "version_info_js", "version_js"] -version = "3.3.0a6" +version = "3.3.0a7" -version_js = "17.0.0a2" +version_js = "17.0.0a3" _re_version = re.compile(r"(\d+)\.(\d+)\.(\d+)(\D*)(\d*)") diff --git a/tests/benchmarks/test_visit.py b/tests/benchmarks/test_visit.py index 53bfc98e..4e7a85a2 100644 --- a/tests/benchmarks/test_visit.py +++ b/tests/benchmarks/test_visit.py @@ -23,5 +23,5 @@ def test_visit_all_ast_nodes(benchmark, big_schema_sdl): # noqa: F811 def test_visit_all_ast_nodes_in_parallel(benchmark, big_schema_sdl): # noqa: F811 document_ast = parse(big_schema_sdl) visitor = DummyVisitor() - parallel_visitor = ParallelVisitor([visitor] * 50) + parallel_visitor = ParallelVisitor([visitor] * 25) benchmark(lambda: visit(document_ast, parallel_visitor)) diff --git a/tests/error/test_graphql_error.py b/tests/error/test_graphql_error.py index d01e1e8a..03b85dcf 100644 --- a/tests/error/test_graphql_error.py +++ b/tests/error/test_graphql_error.py @@ -25,7 +25,7 @@ ast = parse(source) operation_node = ast.definitions[0] -operation_node = cast(OperationDefinitionNode, operation_node) +operation_node = cast("OperationDefinitionNode", operation_node) assert operation_node assert operation_node.kind == "operation_definition" field_node = operation_node.selection_set.selections[0] @@ -224,7 +224,7 @@ def serializes_to_include_all_standard_fields(): extensions = {"foo": "bar "} e_full = GraphQLError("msg", field_node, None, None, path, None, extensions) assert str(e_full) == ( - "msg\n\nGraphQL request:2:3\n" "1 | {\n2 | field\n | ^\n3 | }" + "msg\n\nGraphQL request:2:3\n1 | {\n2 | field\n | ^\n3 | }" ) assert repr(e_full) == ( "GraphQLError('msg', locations=[SourceLocation(line=2, column=3)]," @@ -299,7 +299,7 @@ def prints_an_error_with_nodes_from_different_sources(): ) ) op_a = doc_a.definitions[0] - op_a = cast(ObjectTypeDefinitionNode, op_a) + op_a = cast("ObjectTypeDefinitionNode", op_a) assert op_a assert op_a.kind == "object_type_definition" assert op_a.fields @@ -317,7 +317,7 @@ def prints_an_error_with_nodes_from_different_sources(): ) ) op_b = doc_b.definitions[0] - op_b = cast(ObjectTypeDefinitionNode, op_b) + op_b = cast("ObjectTypeDefinitionNode", op_b) assert op_b assert op_b.kind == "object_type_definition" assert op_b.fields diff --git a/tests/error/test_located_error.py b/tests/error/test_located_error.py index 593b24ad..f22f6fd4 100644 --- a/tests/error/test_located_error.py +++ b/tests/error/test_located_error.py @@ -11,7 +11,7 @@ def throws_without_an_original_error(): def passes_graphql_error_through(): path = ["path", 3, "to", "field"] - e = GraphQLError("msg", None, None, None, cast(Any, path)) + e = GraphQLError("msg", None, None, None, cast("Any", path)) assert located_error(e, [], []) == e def passes_graphql_error_ish_through(): @@ -21,7 +21,7 @@ def passes_graphql_error_ish_through(): def does_not_pass_through_elasticsearch_like_errors(): e = Exception("I am from elasticsearch") - cast(Any, e).path = "/something/feed/_search" + cast("Any", e).path = "/something/feed/_search" assert located_error(e, [], []) is not e def handles_lazy_error_messages(): diff --git a/tests/execution/test_abstract.py b/tests/execution/test_abstract.py index b5ebc45b..ddb01345 100644 --- a/tests/execution/test_abstract.py +++ b/tests/execution/test_abstract.py @@ -3,6 +3,7 @@ from typing import Any, NamedTuple import pytest + from graphql.execution import ExecutionResult, execute, execute_sync from graphql.language import parse from graphql.pyutils import is_awaitable @@ -22,14 +23,14 @@ def sync_and_async(spec): """Decorator for running a test synchronously and asynchronously.""" return pytest.mark.asyncio( - pytest.mark.parametrize("sync", (True, False), ids=("sync", "async"))(spec) + pytest.mark.parametrize("sync", [True, False], ids=("sync", "async"))(spec) ) def access_variants(spec): """Decorator for tests with dict and object access, including inheritance.""" return pytest.mark.asyncio( - pytest.mark.parametrize("access", ("dict", "object", "inheritance"))(spec) + pytest.mark.parametrize("access", ["dict", "object", "inheritance"])(spec) ) @@ -41,7 +42,7 @@ async def execute_query( assert isinstance(schema, GraphQLSchema) assert isinstance(query, str) document = parse(query) - result = (execute_sync if sync else execute)(schema, document, root_value) # type: ignore + result = (execute_sync if sync else execute)(schema, document, root_value) if not sync and is_awaitable(result): result = await result assert isinstance(result, ExecutionResult) diff --git a/tests/execution/test_customize.py b/tests/execution/test_customize.py index 23740237..bf1859a2 100644 --- a/tests/execution/test_customize.py +++ b/tests/execution/test_customize.py @@ -1,6 +1,7 @@ from inspect import isasyncgen import pytest + from graphql.execution import ExecutionContext, execute, subscribe from graphql.language import parse from graphql.type import GraphQLField, GraphQLObjectType, GraphQLSchema, GraphQLString @@ -9,7 +10,7 @@ anext # noqa: B018 except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator): # noqa: A001 + async def anext(iterator): """Return the next item from an async iterator.""" return await iterator.__anext__() @@ -42,27 +43,42 @@ def uses_a_custom_execution_context_class(): ) class TestExecutionContext(ExecutionContext): + def __init__(self, *args, **kwargs): + assert kwargs.pop("custom_arg", None) == "baz" + super().__init__(*args, **kwargs) + def execute_field( self, parent_type, source, field_group, path, - incremental_data_record=None, + incremental_data_record, + defer_map, ): result = super().execute_field( - parent_type, source, field_group, path, incremental_data_record + parent_type, + source, + field_group, + path, + incremental_data_record, + defer_map, ) return result * 2 # type: ignore - assert execute(schema, query, execution_context_class=TestExecutionContext) == ( + assert execute( + schema, + query, + execution_context_class=TestExecutionContext, + custom_arg="baz", + ) == ( {"foo": "barbar"}, None, ) def describe_customize_subscription(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def uses_a_custom_subscribe_field_resolver(): schema = GraphQLSchema( query=GraphQLObjectType("Query", {"foo": GraphQLField(GraphQLString)}), @@ -91,9 +107,13 @@ async def custom_foo(): await subscription.aclose() - @pytest.mark.asyncio() + @pytest.mark.asyncio async def uses_a_custom_execution_context_class(): class TestExecutionContext(ExecutionContext): + def __init__(self, *args, **kwargs): + assert kwargs.pop("custom_arg", None) == "baz" + super().__init__(*args, **kwargs) + def build_resolve_info(self, *args, **kwargs): resolve_info = super().build_resolve_info(*args, **kwargs) resolve_info.context["foo"] = "bar" @@ -125,6 +145,7 @@ def resolve_foo(message, _info): document, context_value={}, execution_context_class=TestExecutionContext, + custom_arg="baz", ) assert isasyncgen(subscription) diff --git a/tests/execution/test_defer.py b/tests/execution/test_defer.py index 6b39f74e..51133100 100644 --- a/tests/execution/test_defer.py +++ b/tests/execution/test_defer.py @@ -1,20 +1,29 @@ from __future__ import annotations from asyncio import sleep -from typing import Any, AsyncGenerator, NamedTuple +from typing import Any, AsyncGenerator, NamedTuple, cast import pytest + from graphql.error import GraphQLError from graphql.execution import ( ExecutionResult, ExperimentalIncrementalExecutionResults, IncrementalDeferResult, + IncrementalResult, InitialIncrementalExecutionResult, SubsequentIncrementalExecutionResult, execute, experimental_execute_incrementally, ) -from graphql.execution.incremental_publisher import DeferredFragmentRecord +from graphql.execution.incremental_publisher import ( + CompletedResult, + DeferredFragmentRecord, + DeferredGroupedFieldSetRecord, + PendingResult, + StreamItemsRecord, + StreamRecord, +) from graphql.language import DocumentNode, parse from graphql.pyutils import Path, is_awaitable from graphql.type import ( @@ -36,6 +45,79 @@ }, ) + +class Friend(NamedTuple): + id: int + name: str + + +friends = [Friend(2, "Han"), Friend(3, "Leia"), Friend(4, "C-3PO")] + +deeper_object = GraphQLObjectType( + "DeeperObject", + { + "foo": GraphQLField(GraphQLString), + "bar": GraphQLField(GraphQLString), + "baz": GraphQLField(GraphQLString), + "bak": GraphQLField(GraphQLString), + }, +) + +nested_object = GraphQLObjectType( + "NestedObject", + {"deeperObject": GraphQLField(deeper_object), "name": GraphQLField(GraphQLString)}, +) + +another_nested_object = GraphQLObjectType( + "AnotherNestedObject", {"deeperObject": GraphQLField(deeper_object)} +) + +hero = { + "name": "Luke", + "id": 1, + "friends": friends, + "nestedObject": nested_object, + "AnotherNestedObject": another_nested_object, +} + +c = GraphQLObjectType( + "c", + { + "d": GraphQLField(GraphQLString), + "nonNullErrorField": GraphQLField(GraphQLNonNull(GraphQLString)), + }, +) + +e = GraphQLObjectType( + "e", + { + "f": GraphQLField(GraphQLString), + }, +) + +b = GraphQLObjectType( + "b", + { + "c": GraphQLField(c), + "e": GraphQLField(e), + }, +) + +a = GraphQLObjectType( + "a", + { + "b": GraphQLField(b), + "someField": GraphQLField(GraphQLString), + }, +) + +g = GraphQLObjectType( + "g", + { + "h": GraphQLField(GraphQLString), + }, +) + hero_type = GraphQLObjectType( "Hero", { @@ -43,24 +125,19 @@ "name": GraphQLField(GraphQLString), "nonNullName": GraphQLField(GraphQLNonNull(GraphQLString)), "friends": GraphQLField(GraphQLList(friend_type)), + "nestedObject": GraphQLField(nested_object), + "anotherNestedObject": GraphQLField(another_nested_object), }, ) -query = GraphQLObjectType("Query", {"hero": GraphQLField(hero_type)}) +query = GraphQLObjectType( + "Query", + {"hero": GraphQLField(hero_type), "a": GraphQLField(a), "g": GraphQLField(g)}, +) schema = GraphQLSchema(query) -class Friend(NamedTuple): - id: int - name: str - - -friends = [Friend(2, "Han"), Friend(3, "Leia"), Friend(4, "C-3PO")] - -hero = {"id": 1, "name": "Luke", "friends": friends} - - class Resolvers: """Various resolver functions for testing.""" @@ -76,19 +153,23 @@ async def null_async(_info) -> None: @staticmethod async def slow(_info) -> str: - """Simulate a slow async resolver returning a value.""" + """Simulate a slow async resolver returning a non-null value.""" await sleep(0) return "slow" + @staticmethod + async def slow_null(_info) -> None: + """Simulate a slow async resolver returning a null value.""" + await sleep(0) + @staticmethod def bad(_info) -> str: """Simulate a bad resolver raising an error.""" raise RuntimeError("bad") @staticmethod - async def friends(_info) -> AsyncGenerator[Friend, None]: - """A slow async generator yielding the first friend.""" - await sleep(0) + async def first_friend(_info) -> AsyncGenerator[Friend, None]: + """An async generator yielding the first friend.""" yield friends[0] @@ -114,28 +195,77 @@ def modified_args(args: dict[str, Any], **modifications: Any) -> dict[str, Any]: def describe_execute_defer_directive(): + def can_format_and_print_pending_result(): + result = PendingResult("foo", []) + assert result.formatted == {"id": "foo", "path": []} + assert str(result) == "PendingResult(id='foo', path=[])" + + result = PendingResult(id="foo", path=["bar", 1], label="baz") + assert result.formatted == {"id": "foo", "path": ["bar", 1], "label": "baz"} + assert str(result) == "PendingResult(id='foo', path=['bar', 1], label='baz')" + + def can_compare_pending_result(): + args: dict[str, Any] = {"id": "foo", "path": ["bar", 1], "label": "baz"} + result = PendingResult(**args) + assert result == PendingResult(**args) + assert result != PendingResult(**modified_args(args, id="bar")) + assert result != PendingResult(**modified_args(args, path=["bar", 2])) + assert result != PendingResult(**modified_args(args, label="bar")) + assert result == tuple(args.values()) + assert result == tuple(args.values())[:2] + assert result != tuple(args.values())[:1] + assert result != tuple(args.values())[:1] + (["bar", 2],) + assert result == args + assert result != {**args, "id": "bar"} + assert result != {**args, "path": ["bar", 2]} + assert result != {**args, "label": "bar"} + + def can_format_and_print_completed_result(): + result = CompletedResult("foo") + assert result.formatted == {"id": "foo"} + assert str(result) == "CompletedResult(id='foo')" + + result = CompletedResult(id="foo", errors=[GraphQLError("oops")]) + assert result.formatted == {"id": "foo", "errors": [{"message": "oops"}]} + assert str(result) == "CompletedResult(id='foo', errors=[GraphQLError('oops')])" + + def can_compare_completed_result(): + args: dict[str, Any] = {"id": "foo", "errors": []} + result = CompletedResult(**args) + assert result == CompletedResult(**args) + assert result != CompletedResult(**modified_args(args, id="bar")) + assert result != CompletedResult( + **modified_args(args, errors=[GraphQLError("oops")]) + ) + assert result == tuple(args.values()) + assert result != tuple(args.values())[:1] + assert result != tuple(args.values())[:1] + ([GraphQLError("oops")],) + assert result == args + assert result != {**args, "id": "bar"} + assert result != {**args, "errors": [{"message": "oops"}]} + def can_format_and_print_incremental_defer_result(): - result = IncrementalDeferResult() - assert result.formatted == {"data": None} - assert str(result) == "IncrementalDeferResult(data=None, errors=None)" + result = IncrementalDeferResult(data={}, id="foo") + assert result.formatted == {"data": {}, "id": "foo"} + assert str(result) == "IncrementalDeferResult(data={}, id='foo')" result = IncrementalDeferResult( data={"hello": "world"}, - errors=[GraphQLError("msg")], - path=["foo", 1], - label="bar", + id="foo", + sub_path=["bar", 1], + errors=[GraphQLError("oops")], extensions={"baz": 2}, ) assert result.formatted == { "data": {"hello": "world"}, - "errors": [{"message": "msg"}], + "id": "foo", + "subPath": ["bar", 1], + "errors": [{"message": "oops"}], "extensions": {"baz": 2}, - "label": "bar", - "path": ["foo", 1], } assert ( str(result) == "IncrementalDeferResult(data={'hello': 'world'}," - " errors=[GraphQLError('msg')], path=['foo', 1], label='bar'," + " id='foo', sub_path=['bar', 1], errors=[GraphQLError('oops')]," " extensions={'baz': 2})" ) @@ -143,9 +273,9 @@ def can_format_and_print_incremental_defer_result(): def can_compare_incremental_defer_result(): args: dict[str, Any] = { "data": {"hello": "world"}, - "errors": [GraphQLError("msg")], - "path": ["foo", 1], - "label": "bar", + "id": "foo", + "sub_path": ["bar", 1], + "errors": [GraphQLError("oops")], "extensions": {"baz": 2}, } result = IncrementalDeferResult(**args) @@ -153,9 +283,11 @@ def can_compare_incremental_defer_result(): assert result != IncrementalDeferResult( **modified_args(args, data={"hello": "foo"}) ) + assert result != IncrementalDeferResult(**modified_args(args, id="bar")) + assert result != IncrementalDeferResult( + **modified_args(args, sub_path=["bar", 2]) + ) assert result != IncrementalDeferResult(**modified_args(args, errors=[])) - assert result != IncrementalDeferResult(**modified_args(args, path=["foo", 2])) - assert result != IncrementalDeferResult(**modified_args(args, label="baz")) assert result != IncrementalDeferResult( **modified_args(args, extensions={"baz": 1}) ) @@ -164,54 +296,50 @@ def can_compare_incremental_defer_result(): assert result == tuple(args.values())[:3] assert result == tuple(args.values())[:2] assert result != tuple(args.values())[:1] - assert result != ({"hello": "world"}, []) + assert result != ({"hello": "world"}, "bar") + args["subPath"] = args.pop("sub_path") assert result == args - assert result == dict(list(args.items())[:2]) - assert result == dict(list(args.items())[:3]) - assert result != dict(list(args.items())[:2] + [("path", ["foo", 2])]) - assert result != {**args, "label": "baz"} + assert result != {**args, "data": {"hello": "foo"}} + assert result != {**args, "id": "bar"} + assert result != {**args, "subPath": ["bar", 2]} + assert result != {**args, "errors": []} + assert result != {**args, "extensions": {"baz": 1}} def can_format_and_print_initial_incremental_execution_result(): result = InitialIncrementalExecutionResult() - assert result.formatted == {"data": None, "hasNext": False} - assert ( - str(result) == "InitialIncrementalExecutionResult(data=None, errors=None)" - ) + assert result.formatted == {"data": None, "hasNext": False, "pending": []} + assert str(result) == "InitialIncrementalExecutionResult(data=None)" result = InitialIncrementalExecutionResult(has_next=True) - assert result.formatted == {"data": None, "hasNext": True} - assert ( - str(result) - == "InitialIncrementalExecutionResult(data=None, errors=None, has_next)" - ) + assert result.formatted == {"data": None, "hasNext": True, "pending": []} + assert str(result) == "InitialIncrementalExecutionResult(data=None, has_next)" - incremental = [IncrementalDeferResult(label="foo")] result = InitialIncrementalExecutionResult( data={"hello": "world"}, errors=[GraphQLError("msg")], - incremental=incremental, + pending=[PendingResult("foo", ["bar"])], has_next=True, extensions={"baz": 2}, ) assert result.formatted == { "data": {"hello": "world"}, - "errors": [GraphQLError("msg")], - "incremental": [{"data": None, "label": "foo"}], + "errors": [{"message": "msg"}], + "pending": [{"id": "foo", "path": ["bar"]}], "hasNext": True, "extensions": {"baz": 2}, } assert ( str(result) == "InitialIncrementalExecutionResult(" - "data={'hello': 'world'}, errors=[GraphQLError('msg')], incremental[1]," - " has_next, extensions={'baz': 2})" + "data={'hello': 'world'}, errors=[GraphQLError('msg')]," + " pending=[PendingResult(id='foo', path=['bar'])], has_next," + " extensions={'baz': 2})" ) def can_compare_initial_incremental_execution_result(): - incremental = [IncrementalDeferResult(label="foo")] args: dict[str, Any] = { "data": {"hello": "world"}, "errors": [GraphQLError("msg")], - "incremental": incremental, + "pending": [PendingResult("foo", ["bar"])], "has_next": True, "extensions": {"baz": 2}, } @@ -224,7 +352,7 @@ def can_compare_initial_incremental_execution_result(): **modified_args(args, errors=[]) ) assert result != InitialIncrementalExecutionResult( - **modified_args(args, incremental=[]) + **modified_args(args, pending=[]) ) assert result != InitialIncrementalExecutionResult( **modified_args(args, has_next=False) @@ -233,6 +361,7 @@ def can_compare_initial_incremental_execution_result(): **modified_args(args, extensions={"baz": 1}) ) assert result == tuple(args.values()) + assert result == tuple(args.values())[:5] assert result == tuple(args.values())[:4] assert result == tuple(args.values())[:3] assert result == tuple(args.values())[:2] @@ -242,23 +371,40 @@ def can_compare_initial_incremental_execution_result(): assert result == { "data": {"hello": "world"}, "errors": [GraphQLError("msg")], - "incremental": incremental, + "pending": [PendingResult("foo", ["bar"])], "hasNext": True, "extensions": {"baz": 2}, } - assert result == { + assert result != { + "errors": [GraphQLError("msg")], + "pending": [PendingResult("foo", ["bar"])], + "hasNext": True, + "extensions": {"baz": 2}, + } + assert result != { + "data": {"hello": "world"}, + "pending": [PendingResult("foo", ["bar"])], + "hasNext": True, + "extensions": {"baz": 2}, + } + assert result != { "data": {"hello": "world"}, "errors": [GraphQLError("msg")], - "incremental": incremental, "hasNext": True, + "extensions": {"baz": 2}, } assert result != { "data": {"hello": "world"}, "errors": [GraphQLError("msg")], - "incremental": incremental, - "hasNext": False, + "pending": [PendingResult("foo", ["bar"])], "extensions": {"baz": 2}, } + assert result != { + "data": {"hello": "world"}, + "errors": [GraphQLError("msg")], + "pending": [PendingResult("foo", ["bar"])], + "hasNext": True, + } def can_format_and_print_subsequent_incremental_execution_result(): result = SubsequentIncrementalExecutionResult() @@ -269,31 +415,48 @@ def can_format_and_print_subsequent_incremental_execution_result(): assert result.formatted == {"hasNext": True} assert str(result) == "SubsequentIncrementalExecutionResult(has_next)" - incremental = [IncrementalDeferResult(label="foo")] + pending = [PendingResult("foo", ["bar"])] + incremental = [ + cast("IncrementalResult", IncrementalDeferResult({"foo": 1}, "bar")) + ] + completed = [CompletedResult("foo")] result = SubsequentIncrementalExecutionResult( - incremental=incremental, has_next=True, + pending=pending, + incremental=incremental, + completed=completed, extensions={"baz": 2}, ) assert result.formatted == { - "incremental": [{"data": None, "label": "foo"}], "hasNext": True, + "pending": [{"id": "foo", "path": ["bar"]}], + "incremental": [{"data": {"foo": 1}, "id": "bar"}], + "completed": [{"id": "foo"}], "extensions": {"baz": 2}, } assert ( - str(result) == "SubsequentIncrementalExecutionResult(incremental[1]," - " has_next, extensions={'baz': 2})" + str(result) == "SubsequentIncrementalExecutionResult(has_next," + " pending[1], incremental[1], completed[1], extensions={'baz': 2})" ) def can_compare_subsequent_incremental_execution_result(): - incremental = [IncrementalDeferResult(label="foo")] + pending = [PendingResult("foo", ["bar"])] + incremental = [ + cast("IncrementalResult", IncrementalDeferResult({"foo": 1}, "bar")) + ] + completed = [CompletedResult("foo")] args: dict[str, Any] = { - "incremental": incremental, "has_next": True, + "pending": pending, + "incremental": incremental, + "completed": completed, "extensions": {"baz": 2}, } result = SubsequentIncrementalExecutionResult(**args) assert result == SubsequentIncrementalExecutionResult(**args) + assert result != SubsequentIncrementalExecutionResult( + **modified_args(args, pending=[]) + ) assert result != SubsequentIncrementalExecutionResult( **modified_args(args, incremental=[]) ) @@ -304,36 +467,89 @@ def can_compare_subsequent_incremental_execution_result(): **modified_args(args, extensions={"baz": 1}) ) assert result == tuple(args.values()) + assert result == tuple(args.values())[:3] assert result == tuple(args.values())[:2] assert result != tuple(args.values())[:1] assert result != (incremental, False) assert result == { + "hasNext": True, + "pending": pending, + "incremental": incremental, + "completed": completed, + "extensions": {"baz": 2}, + } + assert result != { + "pending": pending, "incremental": incremental, + "completed": completed, + "extensions": {"baz": 2}, + } + assert result != { + "hasNext": True, + "incremental": incremental, + "completed": completed, + "extensions": {"baz": 2}, + } + assert result != { "hasNext": True, + "pending": pending, + "completed": completed, "extensions": {"baz": 2}, } - assert result == {"incremental": incremental, "hasNext": True} assert result != { + "hasNext": True, + "pending": pending, "incremental": incremental, - "hasNext": False, "extensions": {"baz": 2}, } + assert result != { + "hasNext": True, + "pending": pending, + "incremental": incremental, + "completed": completed, + } + + def can_print_deferred_grouped_field_set_record(): + record = DeferredGroupedFieldSetRecord([], {}, False) + assert ( + str(record) == "DeferredGroupedFieldSetRecord(" + "deferred_fragment_records=[], grouped_field_set={})" + ) + record = DeferredGroupedFieldSetRecord([], {}, True, Path(None, "foo", "Foo")) + assert ( + str(record) == "DeferredGroupedFieldSetRecord(" + "deferred_fragment_records=[], grouped_field_set={}, path=['foo'])" + ) def can_print_deferred_fragment_record(): - record = DeferredFragmentRecord(None, None, None) - assert str(record) == "DeferredFragmentRecord(path=[])" - record = DeferredFragmentRecord("foo", Path(None, "bar", "Bar"), record) + record = DeferredFragmentRecord(None, None) + assert str(record) == "DeferredFragmentRecord()" + record = DeferredFragmentRecord(Path(None, "bar", "Bar"), "foo") + assert str(record) == "DeferredFragmentRecord(path=['bar'], label='foo')" + + def can_print_stream_record(): + record = StreamRecord(Path(None, "bar", "Bar"), "foo") + assert str(record) == "StreamRecord(path=['bar'], label='foo')" + record.path = [] + assert str(record) == "StreamRecord(label='foo')" + record.label = None + assert str(record) == "StreamRecord()" + + def can_print_stream_items_record(): + record = StreamItemsRecord( + StreamRecord(Path(None, "bar", "Bar"), "foo"), + Path(None, "baz", "Baz"), + ) assert ( - str(record) == "DeferredFragmentRecord(" - "path=['bar'], label='foo', parent_context)" + str(record) == "StreamItemsRecord(stream_record=StreamRecord(" + "path=['bar'], label='foo'), path=['baz'])" ) - record.data = {"hello": "world"} + record = StreamItemsRecord(StreamRecord(Path(None, "bar", "Bar"))) assert ( - str(record) == "DeferredFragmentRecord(" - "path=['bar'], label='foo', parent_context, data)" + str(record) == "StreamItemsRecord(stream_record=StreamRecord(path=['bar']))" ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fragments_containing_scalar_types(): document = parse( """ @@ -351,14 +567,19 @@ async def can_defer_fragments_containing_scalar_types(): result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { - "incremental": [{"data": {"name": "Luke"}, "path": ["hero"]}], + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [{"data": {"name": "Luke"}, "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_disable_defer_using_if_argument(): document = parse( """ @@ -375,16 +596,9 @@ async def can_disable_defer_using_if_argument(): ) result = await complete(document) - assert result == { - "data": { - "hero": { - "id": "1", - "name": "Luke", - }, - }, - } + assert result == {"data": {"hero": {"id": "1", "name": "Luke"}}} - @pytest.mark.asyncio() + @pytest.mark.asyncio async def does_not_disable_defer_with_null_if_argument(): document = parse( """ @@ -402,14 +616,19 @@ async def does_not_disable_defer_with_null_if_argument(): result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { - "incremental": [{"data": {"name": "Luke"}, "path": ["hero"]}], + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [{"data": {"name": "Luke"}, "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws_an_error_for_defer_directive_with_non_string_label(): document = parse( """ @@ -430,7 +649,7 @@ async def throws_an_error_for_defer_directive_with_non_string_label(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fragments_on_the_top_level_query_field(): document = parse( """ @@ -447,16 +666,19 @@ async def can_defer_fragments_on_the_top_level_query_field(): result = await complete(document) assert result == [ - {"data": {}, "hasNext": True}, { - "incremental": [ - {"data": {"hero": {"id": "1"}}, "path": [], "label": "DeferQuery"} - ], + "data": {}, + "pending": [{"id": "0", "path": [], "label": "DeferQuery"}], + "hasNext": True, + }, + { + "incremental": [{"data": {"hero": {"id": "1"}}, "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fragments_with_errors_on_the_top_level_query_field(): document = parse( """ @@ -473,7 +695,11 @@ async def can_defer_fragments_with_errors_on_the_top_level_query_field(): result = await complete(document, {"hero": {**hero, "name": Resolvers.bad}}) assert result == [ - {"data": {}, "hasNext": True}, + { + "data": {}, + "pending": [{"id": "0", "path": [], "label": "DeferQuery"}], + "hasNext": True, + }, { "incremental": [ { @@ -485,15 +711,15 @@ async def can_defer_fragments_with_errors_on_the_top_level_query_field(): "path": ["hero", "name"], } ], - "path": [], - "label": "DeferQuery", + "id": "0", } ], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_a_fragment_within_an_already_deferred_fragment(): document = parse( """ @@ -516,9 +742,17 @@ async def can_defer_a_fragment_within_an_already_deferred_fragment(): result = await complete(document) assert result == [ - {"data": {"hero": {}}, "hasNext": True}, + { + "data": {"hero": {}}, + "pending": [ + {"id": "0", "path": ["hero"], "label": "DeferTop"}, + {"id": "1", "path": ["hero"], "label": "DeferNested"}, + ], + "hasNext": True, + }, { "incremental": [ + {"data": {"id": "1"}, "id": "0"}, { "data": { "friends": [ @@ -527,20 +761,15 @@ async def can_defer_a_fragment_within_an_already_deferred_fragment(): {"name": "C-3PO"}, ] }, - "path": ["hero"], - "label": "DeferNested", - }, - { - "data": {"id": "1"}, - "path": ["hero"], - "label": "DeferTop", + "id": "1", }, ], + "completed": [{"id": "0"}, {"id": "1"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_a_fragment_that_is_also_not_deferred_with_deferred_first(): document = parse( """ @@ -558,20 +787,15 @@ async def can_defer_a_fragment_that_is_also_not_deferred_with_deferred_first(): result = await complete(document) assert result == [ - {"data": {"hero": {"name": "Luke"}}, "hasNext": True}, { - "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["hero"], - "label": "DeferTop", - }, - ], - "hasNext": False, + "data": {"hero": {"name": "Luke"}}, + "pending": [{"id": "0", "path": ["hero"], "label": "DeferTop"}], + "hasNext": True, }, + {"completed": [{"id": "0"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_a_fragment_that_is_also_not_deferred_with_non_deferred_first(): document = parse( """ @@ -589,20 +813,15 @@ async def can_defer_a_fragment_that_is_also_not_deferred_with_non_deferred_first result = await complete(document) assert result == [ - {"data": {"hero": {"name": "Luke"}}, "hasNext": True}, { - "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["hero"], - "label": "DeferTop", - }, - ], - "hasNext": False, + "data": {"hero": {"name": "Luke"}}, + "pending": [{"id": "0", "path": ["hero"], "label": "DeferTop"}], + "hasNext": True, }, + {"completed": [{"id": "0"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_an_inline_fragment(): document = parse( """ @@ -619,108 +838,1254 @@ async def can_defer_an_inline_fragment(): result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { - "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["hero"], - "label": "InlineDeferred", - }, - ], + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"], "label": "InlineDeferred"}], + "hasNext": True, + }, + { + "incremental": [{"data": {"name": "Luke"}, "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() - async def handles_errors_thrown_in_deferred_fragments(): + @pytest.mark.asyncio + async def does_not_emit_empty_defer_fragments(): document = parse( """ query HeroNameQuery { hero { - id - ...NameFragment @defer + ... @defer { + name @skip(if: true) + } } } - fragment NameFragment on Hero { + fragment TopFragment on Hero { name } """ ) - result = await complete(document, {"hero": {**hero, "name": Resolvers.bad}}) + result = await complete(document) + + assert result == [ + { + "data": {"hero": {}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + {"completed": [{"id": "0"}], "hasNext": False}, + ] + + @pytest.mark.asyncio + async def separately_emits_defer_fragments_different_labels_varying_fields(): + document = parse( + """ + query HeroNameQuery { + hero { + ... @defer(label: "DeferID") { + id + } + ... @defer(label: "DeferName") { + name + } + } + } + """ + ) + result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, + { + "data": {"hero": {}}, + "pending": [ + {"id": "0", "path": ["hero"], "label": "DeferID"}, + {"id": "1", "path": ["hero"], "label": "DeferName"}, + ], + "hasNext": True, + }, { "incremental": [ - { - "data": {"name": None}, - "path": ["hero"], - "errors": [ - { - "message": "bad", - "locations": [{"line": 9, "column": 15}], - "path": ["hero", "name"], - } - ], - }, + {"data": {"id": "1"}, "id": "0"}, + {"data": {"name": "Luke"}, "id": "1"}, ], + "completed": [{"id": "0"}, {"id": "1"}], "hasNext": False, }, ] - @pytest.mark.asyncio() - async def handles_non_nullable_errors_thrown_in_deferred_fragments(): + @pytest.mark.asyncio + async def separately_emits_defer_fragments_different_labels_varying_subfields(): document = parse( """ query HeroNameQuery { - hero { - id - ...NameFragment @defer + ... @defer(label: "DeferID") { + hero { + id + } + } + ... @defer(label: "DeferName") { + hero { + name + } } - } - fragment NameFragment on Hero { - nonNullName } """ ) - result = await complete( - document, {"hero": {**hero, "nonNullName": Resolvers.null}} - ) + result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, + { + "data": {}, + "pending": [ + {"id": "0", "path": [], "label": "DeferID"}, + {"id": "1", "path": [], "label": "DeferName"}, + ], + "hasNext": True, + }, { "incremental": [ - { - "data": None, - "path": ["hero"], - "errors": [ - { - "message": "Cannot return null for non-nullable field" - " Hero.nonNullName.", - "locations": [{"line": 9, "column": 15}], - "path": ["hero", "nonNullName"], - } - ], - }, + {"data": {"hero": {}}, "id": "0"}, + {"data": {"id": "1"}, "id": "0", "subPath": ["hero"]}, + {"data": {"name": "Luke"}, "id": "1", "subPath": ["hero"]}, ], + "completed": [{"id": "0"}, {"id": "1"}], "hasNext": False, }, ] - @pytest.mark.asyncio() - async def handles_non_nullable_errors_thrown_outside_deferred_fragments(): + @pytest.mark.asyncio + async def separately_emits_defer_fragments_different_labels_var_subfields_async(): document = parse( """ query HeroNameQuery { - hero { - nonNullName - ...NameFragment @defer + ... @defer(label: "DeferID") { + hero { + id + } + } + ... @defer(label: "DeferName") { + hero { + name + } } - } - fragment NameFragment on Hero { - id + } + """ + ) + + async def resolve(value): + return value + + result = await complete( + document, + { + "hero": { + "id": lambda _info: resolve(1), + "name": lambda _info: resolve("Luke"), + } + }, + ) + + assert result == [ + { + "data": {}, + "pending": [ + {"id": "0", "path": [], "label": "DeferID"}, + {"id": "1", "path": [], "label": "DeferName"}, + ], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"hero": {}}, "id": "0"}, + {"data": {"id": "1"}, "id": "0", "subPath": ["hero"]}, + {"data": {"name": "Luke"}, "id": "1", "subPath": ["hero"]}, + ], + "completed": [{"id": "0"}, {"id": "1"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def separately_emits_defer_fragments_var_subfields_same_prio_diff_level(): + document = parse( + """ + query HeroNameQuery { + hero { + ... @defer(label: "DeferID") { + id + } + } + ... @defer(label: "DeferName") { + hero { + name + } + } + } + """ + ) + result = await complete(document) + + assert result == [ + { + "data": {"hero": {}}, + "pending": [ + {"id": "0", "path": [], "label": "DeferName"}, + {"id": "1", "path": ["hero"], "label": "DeferID"}, + ], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"id": "1"}, "id": "1"}, + {"data": {"name": "Luke"}, "id": "0", "subPath": ["hero"]}, + ], + "completed": [{"id": "1"}, {"id": "0"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def separately_emits_nested_defer_frags_var_subfields_same_prio_diff_level(): + document = parse( + """ + query HeroNameQuery { + ... @defer(label: "DeferName") { + hero { + name + ... @defer(label: "DeferID") { + id + } + } + } + } + """ + ) + result = await complete(document) + + assert result == [ + { + "data": {}, + "pending": [{"id": "0", "path": [], "label": "DeferName"}], + "hasNext": True, + }, + { + "pending": [{"id": "1", "path": ["hero"], "label": "DeferID"}], + "incremental": [{"data": {"hero": {"name": "Luke"}}, "id": "0"}], + "completed": [{"id": "0"}], + "hasNext": True, + }, + { + "incremental": [{"data": {"id": "1"}, "id": "1"}], + "completed": [{"id": "1"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def can_deduplicate_multiple_defers_on_the_same_object(): + document = parse( + """ + query { + hero { + friends { + ... @defer { + ...FriendFrag + ... @defer { + ...FriendFrag + ... @defer { + ...FriendFrag + ... @defer { + ...FriendFrag + } + } + } + } + } + } + } + + fragment FriendFrag on Friend { + id + name + } + """ + ) + result = await complete(document) + + assert result == [ + { + "data": {"hero": {"friends": [{}, {}, {}]}}, + "pending": [ + {"id": "0", "path": ["hero", "friends", 0]}, + {"id": "1", "path": ["hero", "friends", 0]}, + {"id": "2", "path": ["hero", "friends", 0]}, + {"id": "3", "path": ["hero", "friends", 0]}, + {"id": "4", "path": ["hero", "friends", 1]}, + {"id": "5", "path": ["hero", "friends", 1]}, + {"id": "6", "path": ["hero", "friends", 1]}, + {"id": "7", "path": ["hero", "friends", 1]}, + {"id": "8", "path": ["hero", "friends", 2]}, + {"id": "9", "path": ["hero", "friends", 2]}, + {"id": "10", "path": ["hero", "friends", 2]}, + {"id": "11", "path": ["hero", "friends", 2]}, + ], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"id": "2", "name": "Han"}, "id": "0"}, + {"data": {"id": "3", "name": "Leia"}, "id": "4"}, + {"data": {"id": "4", "name": "C-3PO"}, "id": "8"}, + ], + "completed": [ + {"id": "1"}, + {"id": "2"}, + {"id": "3"}, + {"id": "5"}, + {"id": "6"}, + {"id": "7"}, + {"id": "9"}, + {"id": "10"}, + {"id": "11"}, + {"id": "0"}, + {"id": "4"}, + {"id": "8"}, + ], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_present_in_the_initial_payload(): + document = parse( + """ + query { + hero { + nestedObject { + deeperObject { + foo + } + } + anotherNestedObject { + deeperObject { + foo + } + } + ... @defer { + nestedObject { + deeperObject { + bar + } + } + anotherNestedObject { + deeperObject { + foo + } + } + } + } + } + """ + ) + result = await complete( + document, + { + "hero": { + "nestedObject": {"deeperObject": {"foo": "foo", "bar": "bar"}}, + "anotherNestedObject": {"deeperObject": {"foo": "foo"}}, + } + }, + ) + + assert result == [ + { + "data": { + "hero": { + "nestedObject": {"deeperObject": {"foo": "foo"}}, + "anotherNestedObject": {"deeperObject": {"foo": "foo"}}, + } + }, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [ + { + "data": {"bar": "bar"}, + "id": "0", + "subPath": ["nestedObject", "deeperObject"], + }, + ], + "completed": [{"id": "0"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_present_in_a_parent_defer_payload(): + document = parse( + """ + query { + hero { + ... @defer { + nestedObject { + deeperObject { + foo + ... @defer { + foo + bar + } + } + } + } + } + } + """ + ) + result = await complete( + document, + {"hero": {"nestedObject": {"deeperObject": {"foo": "foo", "bar": "bar"}}}}, + ) + + assert result == [ + { + "data": {"hero": {}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "pending": [ + {"id": "1", "path": ["hero", "nestedObject", "deeperObject"]} + ], + "incremental": [ + { + "data": {"nestedObject": {"deeperObject": {"foo": "foo"}}}, + "id": "0", + }, + ], + "completed": [{"id": "0"}], + "hasNext": True, + }, + { + "incremental": [{"data": {"bar": "bar"}, "id": "1"}], + "completed": [{"id": "1"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_with_deferred_fragments_at_multiple_levels(): + document = parse( + """ + query { + hero { + nestedObject { + deeperObject { + foo + } + } + ... @defer { + nestedObject { + deeperObject { + foo + bar + } + ... @defer { + deeperObject { + foo + bar + baz + ... @defer { + foo + bar + baz + bak + } + } + } + } + } + } + } + """ + ) + result = await complete( + document, + { + "hero": { + "nestedObject": { + "deeperObject": { + "foo": "foo", + "bar": "bar", + "baz": "baz", + "bak": "bak", + } + } + } + }, + ) + + assert result == [ + { + "data": { + "hero": { + "nestedObject": { + "deeperObject": { + "foo": "foo", + }, + }, + }, + }, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "pending": [{"id": "1", "path": ["hero", "nestedObject"]}], + "incremental": [ + { + "data": {"bar": "bar"}, + "id": "0", + "subPath": ["nestedObject", "deeperObject"], + }, + ], + "completed": [{"id": "0"}], + "hasNext": True, + }, + { + "pending": [ + {"id": "2", "path": ["hero", "nestedObject", "deeperObject"]} + ], + "incremental": [ + {"data": {"baz": "baz"}, "id": "1", "subPath": ["deeperObject"]}, + ], + "hasNext": True, + "completed": [{"id": "1"}], + }, + { + "incremental": [{"data": {"bak": "bak"}, "id": "2"}], + "completed": [{"id": "2"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_from_deferred_fragments_branches_same_level(): + document = parse( + """ + query { + hero { + nestedObject { + deeperObject { + ... @defer { + foo + } + } + } + ... @defer { + nestedObject { + deeperObject { + ... @defer { + foo + bar + } + } + } + } + } + } + """ + ) + result = await complete( + document, + {"hero": {"nestedObject": {"deeperObject": {"foo": "foo", "bar": "bar"}}}}, + ) + + assert result == [ + { + "data": {"hero": {"nestedObject": {"deeperObject": {}}}}, + "pending": [ + {"id": "0", "path": ["hero"]}, + {"id": "1", "path": ["hero", "nestedObject", "deeperObject"]}, + ], + "hasNext": True, + }, + { + "pending": [ + {"id": "2", "path": ["hero", "nestedObject", "deeperObject"]} + ], + "incremental": [{"data": {"foo": "foo"}, "id": "1"}], + "completed": [{"id": "0"}, {"id": "1"}], + "hasNext": True, + }, + { + "incremental": [{"data": {"bar": "bar"}, "id": "2"}], + "completed": [{"id": "2"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_from_deferred_fragments_branches_multi_levels(): + document = parse( + """ + query { + a { + b { + c { + d + } + ... @defer { + e { + f + } + } + } + } + ... @defer { + a { + b { + e { + f + } + } + } + g { + h + } + } + } + """ + ) + result = await complete( + document, + {"a": {"b": {"c": {"d": "d"}, "e": {"f": "f"}}}, "g": {"h": "h"}}, + ) + + assert result == [ + { + "data": {"a": {"b": {"c": {"d": "d"}}}}, + "pending": [{"id": "0", "path": []}, {"id": "1", "path": ["a", "b"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"e": {"f": "f"}}, "id": "1"}, + {"data": {"g": {"h": "h"}}, "id": "0"}, + ], + "completed": [{"id": "1"}, {"id": "0"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def nulls_cross_defer_boundaries_null_first(): + document = parse( + """ + query { + ... @defer { + a { + someField + b { + c { + nonNullErrorField + } + } + } + } + a { + ... @defer { + b { + c { + d + } + } + } + } + } + """ + ) + result = await complete( + document, + {"a": {"b": {"c": {"d": "d"}}, "someField": "someField"}}, + ) + + assert result == [ + { + "data": {"a": {}}, + "pending": [{"id": "0", "path": []}, {"id": "1", "path": ["a"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"b": {"c": {}}}, "id": "1"}, + {"data": {"d": "d"}, "id": "1", "subPath": ["b", "c"]}, + ], + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field c.nonNullErrorField.", + "locations": [{"line": 8, "column": 23}], + "path": ["a", "b", "c", "nonNullErrorField"], + }, + ], + }, + {"id": "1"}, + ], + "hasNext": False, + }, + ] + + async def nulls_cross_defer_boundaries_value_first(): + document = parse( + """ + query { + ... @defer { + a { + b { + c { + d + } + } + } + } + a { + ... @defer { + someField + b { + c { + nonNullErrorField + } + } + } + } + } + """ + ) + result = await complete( + document, + { + "a": { + "b": {"c": {"d": "d"}, "nonNullErrorFIeld": None}, + "someField": "someField", + } + }, + ) + + assert result == [ + { + "data": {"a": {}}, + "pending": [{"id": "0", "path": []}, {"id": "1", "path": ["a"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"b": {"c": {}}}, "id": "1"}, + {"data": {"d": "d"}, "id": "0", "subPath": ["a", "b", "c"]}, + ], + "completed": [ + { + "id": "1", + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field c.nonNullErrorField.", + "locations": [{"line": 17, "column": 23}], + "path": ["a", "b", "c", "nonNullErrorField"], + }, + ], + }, + {"id": "0"}, + ], + "hasNext": False, + }, + ] + + async def filters_a_payload_with_a_null_that_cannot_be_merged(): + document = parse( + """ + query { + ... @defer { + a { + someField + b { + c { + nonNullErrorField + } + } + } + } + a { + ... @defer { + b { + c { + d + } + } + } + } + } + """ + ) + + result = await complete( + document, + { + "a": { + "b": {"c": {"d": "d", "nonNullErrorField": Resolvers.slow_null}}, + "someField": "someField", + } + }, + ) + + assert result == [ + { + "data": {"a": {}}, + "pending": [{"id": "0", "path": []}, {"id": "1", "path": ["a"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"b": {"c": {}}}, "id": "1"}, + {"data": {"d": "d"}, "id": "1", "subPath": ["b", "c"]}, + ], + "completed": [{"id": "1"}], + "hasNext": True, + }, + { + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field c.nonNullErrorField.", + "locations": [{"line": 8, "column": 23}], + "path": ["a", "b", "c", "nonNullErrorField"], + }, + ], + }, + ], + "hasNext": False, + }, + ] + + async def cancels_deferred_fields_when_initial_result_exhibits_null_bubbling(): + document = parse( + """ + query { + hero { + nonNullName + } + ... @defer { + hero { + name + } + } + } + """ + ) + result = await complete( + document, {"hero": {**hero, "nonNullName": lambda _info: None}} + ) + + assert result == { + "data": {"hero": None}, + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field Hero.nonNullName.", + "locations": [{"line": 4, "column": 17}], + "path": ["hero", "nonNullName"], + }, + ], + } + + async def cancels_deferred_fields_when_deferred_result_exhibits_null_bubbling(): + document = parse( + """ + query { + ... @defer { + hero { + nonNullName + name + } + } + } + """ + ) + result = await complete( + document, {"hero": {**hero, "nonNullName": lambda _info: None}} + ) + + assert result == [ + { + "data": {}, + "pending": [{"id": "0", "path": []}], + "hasNext": True, + }, + { + "incremental": [ + { + "data": {"hero": None}, + "id": "0", + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field Hero.nonNullName.", + "locations": [{"line": 5, "column": 19}], + "path": ["hero", "nonNullName"], + }, + ], + }, + ], + "completed": [{"id": "0"}], + "hasNext": False, + }, + ] + + async def deduplicates_list_fields(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + name + } + } + } + } + """ + ) + + result = await complete(document) + + assert result == [ + { + "data": { + "hero": { + "friends": [ + {"name": "Han"}, + {"name": "Leia"}, + {"name": "C-3PO"}, + ] + } + }, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + {"completed": [{"id": "0"}], "hasNext": False}, + ] + + async def deduplicates_async_iterable_list_fields(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + name + } + } + } + } + """ + ) + + result = await complete( + document, {"hero": {**hero, "friends": Resolvers.first_friend}} + ) + + assert result == [ + { + "data": {"hero": {"friends": [{"name": "Han"}]}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + {"completed": [{"id": "0"}], "hasNext": False}, + ] + + async def deduplicates_empty_async_iterable_list_fields(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + name + } + } + } + } + """ + ) + + async def resolve_friends(_info): + await sleep(0) + for friend in []: # type: ignore + yield friend # pragma: no cover + + result = await complete( + document, {"hero": {**hero, "friends": resolve_friends}} + ) + + assert result == [ + { + "data": {"hero": {"friends": []}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + {"completed": [{"id": "0"}], "hasNext": False}, + ] + + async def does_not_deduplicate_list_fields_with_non_overlapping_fields(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + id + } + } + } + } + """ + ) + result = await complete(document) + + assert result == [ + { + "data": { + "hero": { + "friends": [ + {"name": "Han"}, + {"name": "Leia"}, + {"name": "C-3PO"}, + ] + } + }, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"id": "2"}, "id": "0", "subPath": ["friends", 0]}, + {"data": {"id": "3"}, "id": "0", "subPath": ["friends", 1]}, + {"data": {"id": "4"}, "id": "0", "subPath": ["friends", 2]}, + ], + "completed": [{"id": "0"}], + "hasNext": False, + }, + ] + + async def deduplicates_list_fields_that_return_empty_lists(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + name + } + } + } + } + """ + ) + result = await complete( + document, {"hero": {**hero, "friends": lambda _info: []}} + ) + + assert result == [ + { + "data": {"hero": {"friends": []}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + {"completed": [{"id": "0"}], "hasNext": False}, + ] + + async def deduplicates_null_object_fields(): + document = parse( + """ + query { + hero { + nestedObject { + name + } + ... @defer { + nestedObject { + name + } + } + } + } + """ + ) + result = await complete( + document, {"hero": {**hero, "nestedObject": lambda _info: None}} + ) + + assert result == [ + { + "data": {"hero": {"nestedObject": None}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + {"completed": [{"id": "0"}], "hasNext": False}, + ] + + async def deduplicates_async_object_fields(): + document = parse( + """ + query { + hero { + nestedObject { + name + } + ... @defer { + nestedObject { + name + } + } + } + } + """ + ) + + async def resolve_nested_object(_info): + return {"name": "foo"} + + result = await complete( + document, {"hero": {"nestedObject": resolve_nested_object}} + ) + + assert result == [ + { + "data": {"hero": {"nestedObject": {"name": "foo"}}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + {"completed": [{"id": "0"}], "hasNext": False}, + ] + + @pytest.mark.asyncio + async def handles_errors_thrown_in_deferred_fragments(): + document = parse( + """ + query HeroNameQuery { + hero { + id + ...NameFragment @defer + } + } + fragment NameFragment on Hero { + name + } + """ + ) + result = await complete(document, {"hero": {**hero, "name": Resolvers.bad}}) + + assert result == [ + { + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [ + { + "data": {"name": None}, + "id": "0", + "errors": [ + { + "message": "bad", + "locations": [{"line": 9, "column": 15}], + "path": ["hero", "name"], + } + ], + }, + ], + "completed": [{"id": "0"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def handles_non_nullable_errors_thrown_in_deferred_fragments(): + document = parse( + """ + query HeroNameQuery { + hero { + id + ...NameFragment @defer + } + } + fragment NameFragment on Hero { + nonNullName + } + """ + ) + result = await complete( + document, {"hero": {**hero, "nonNullName": Resolvers.null}} + ) + + assert result == [ + { + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Cannot return null for non-nullable field" + " Hero.nonNullName.", + "locations": [{"line": 9, "column": 15}], + "path": ["hero", "nonNullName"], + } + ], + }, + ], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def handles_non_nullable_errors_thrown_outside_deferred_fragments(): + document = parse( + """ + query HeroNameQuery { + hero { + nonNullName + ...NameFragment @defer + } + } + fragment NameFragment on Hero { + id } """ ) @@ -740,7 +2105,7 @@ async def handles_non_nullable_errors_thrown_outside_deferred_fragments(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_non_nullable_errors_thrown_in_deferred_fragments(): document = parse( """ @@ -760,12 +2125,15 @@ async def handles_async_non_nullable_errors_thrown_in_deferred_fragments(): ) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { - "incremental": [ + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "completed": [ { - "data": None, - "path": ["hero"], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -780,7 +2148,7 @@ async def handles_async_non_nullable_errors_thrown_in_deferred_fragments(): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_payloads_in_correct_order(): document = parse( """ @@ -804,36 +2172,35 @@ async def returns_payloads_in_correct_order(): result = await complete(document, {"hero": {**hero, "name": Resolvers.slow}}) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "pending": [ + {"id": "1", "path": ["hero", "friends", 0]}, + {"id": "2", "path": ["hero", "friends", 1]}, + {"id": "3", "path": ["hero", "friends", 2]}, + ], "incremental": [ - { - "data": {"name": "slow", "friends": [{}, {}, {}]}, - "path": ["hero"], - } + {"data": {"name": "slow", "friends": [{}, {}, {}]}, "id": "0"} ], + "completed": [{"id": "0"}], "hasNext": True, }, { "incremental": [ - { - "data": {"name": "Han"}, - "path": ["hero", "friends", 0], - }, - { - "data": {"name": "Leia"}, - "path": ["hero", "friends", 1], - }, - { - "data": {"name": "C-3PO"}, - "path": ["hero", "friends", 2], - }, + {"data": {"name": "Han"}, "id": "1"}, + {"data": {"name": "Leia"}, "id": "2"}, + {"data": {"name": "C-3PO"}, "id": "3"}, ], + "completed": [{"id": "1"}, {"id": "2"}, {"id": "3"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_payloads_from_synchronous_data_in_correct_order(): document = parse( """ @@ -857,36 +2224,35 @@ async def returns_payloads_from_synchronous_data_in_correct_order(): result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "pending": [ + {"id": "1", "path": ["hero", "friends", 0]}, + {"id": "2", "path": ["hero", "friends", 1]}, + {"id": "3", "path": ["hero", "friends", 2]}, + ], "incremental": [ - { - "data": {"name": "Luke", "friends": [{}, {}, {}]}, - "path": ["hero"], - }, + {"data": {"name": "Luke", "friends": [{}, {}, {}]}, "id": "0"} ], + "completed": [{"id": "0"}], "hasNext": True, }, { "incremental": [ - { - "data": {"name": "Han"}, - "path": ["hero", "friends", 0], - }, - { - "data": {"name": "Leia"}, - "path": ["hero", "friends", 1], - }, - { - "data": {"name": "C-3PO"}, - "path": ["hero", "friends", 2], - }, + {"data": {"name": "Han"}, "id": "1"}, + {"data": {"name": "Leia"}, "id": "2"}, + {"data": {"name": "C-3PO"}, "id": "3"}, ], + "completed": [{"id": "1"}, {"id": "2"}, {"id": "3"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def filters_deferred_payloads_when_list_item_from_async_iterable_nulled(): document = parse( """ @@ -905,7 +2271,7 @@ async def filters_deferred_payloads_when_list_item_from_async_iterable_nulled(): ) result = await complete( - document, {"hero": {**hero, "friends": Resolvers.friends}} + document, {"hero": {**hero, "friends": Resolvers.first_friend}} ) assert result == { @@ -920,7 +2286,7 @@ async def filters_deferred_payloads_when_list_item_from_async_iterable_nulled(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def original_execute_function_throws_error_if_deferred_and_all_is_sync(): document = parse( """ @@ -938,7 +2304,7 @@ async def original_execute_function_throws_error_if_deferred_and_all_is_sync(): " multiple payloads (due to @defer or @stream directive)" ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def original_execute_function_throws_error_if_deferred_and_not_all_is_sync(): document = parse( """ diff --git a/tests/execution/test_execution_result.py b/tests/execution/test_execution_result.py index 28ba17af..96935d99 100644 --- a/tests/execution/test_execution_result.py +++ b/tests/execution/test_execution_result.py @@ -1,4 +1,5 @@ import pytest + from graphql.error import GraphQLError from graphql.execution import ExecutionResult @@ -54,15 +55,15 @@ def compares_to_dict(): res = ExecutionResult(data, errors) assert res == {"data": data, "errors": errors} assert res == {"data": data, "errors": errors, "extensions": None} - assert res != {"data": data, "errors": None} - assert res != {"data": None, "errors": errors} + assert res == {"data": data, "errors": errors, "extensions": {}} + assert res != {"errors": errors} + assert res != {"data": data} assert res != {"data": data, "errors": errors, "extensions": extensions} res = ExecutionResult(data, errors, extensions) - assert res == {"data": data, "errors": errors} assert res == {"data": data, "errors": errors, "extensions": extensions} - assert res != {"data": data, "errors": None} - assert res != {"data": None, "errors": errors} - assert res != {"data": data, "errors": errors, "extensions": None} + assert res != {"errors": errors, "extensions": extensions} + assert res != {"data": data, "extensions": extensions} + assert res != {"data": data, "errors": errors} def compares_to_tuple(): res = ExecutionResult(data, errors) diff --git a/tests/execution/test_executor.py b/tests/execution/test_executor.py index 5ea1f25b..a11c6b5e 100644 --- a/tests/execution/test_executor.py +++ b/tests/execution/test_executor.py @@ -4,6 +4,7 @@ from typing import Any, Awaitable, cast import pytest + from graphql.error import GraphQLError from graphql.execution import execute, execute_sync from graphql.language import FieldNode, OperationDefinitionNode, parse @@ -41,7 +42,7 @@ def accepts_positional_arguments(): assert result == ({"a": "rootValue"}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def executes_arbitrary_code(): # noinspection PyMethodMayBeStatic,PyMethodMayBeStatic class Data: @@ -244,16 +245,16 @@ def resolve(_obj, info): execute_sync(schema, document, root_value, variable_values=variable_values) assert len(resolved_infos) == 1 - operation = cast(OperationDefinitionNode, document.definitions[0]) + operation = cast("OperationDefinitionNode", document.definitions[0]) assert operation assert operation.kind == "operation_definition" - field = cast(FieldNode, operation.selection_set.selections[0]) + field = cast("FieldNode", operation.selection_set.selections[0]) assert resolved_infos[0] == GraphQLResolveInfo( field_name="test", field_nodes=[field], return_type=GraphQLString, - parent_type=cast(GraphQLObjectType, schema.query_type), + parent_type=cast("GraphQLObjectType", schema.query_type), path=ResponsePath(None, "result", "Test"), schema=schema, fragments={}, @@ -375,7 +376,7 @@ def resolve(_obj, _info, **args): assert len(resolved_args) == 1 assert resolved_args[0] == {"numArg": 123, "stringArg": "foo"} - @pytest.mark.asyncio() + @pytest.mark.asyncio async def nulls_out_error_subtrees(): document = parse( """ @@ -868,7 +869,7 @@ def resolves_to_an_error_if_schema_does_not_support_operation(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correct_field_ordering_despite_execution_order(): schema = GraphQLSchema( GraphQLObjectType( @@ -984,7 +985,7 @@ def does_not_include_arguments_that_were_not_set(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def fails_when_is_type_of_check_is_not_met(): class Special: value: str diff --git a/tests/execution/test_lists.py b/tests/execution/test_lists.py index 3d2bb8fa..a7f747fb 100644 --- a/tests/execution/test_lists.py +++ b/tests/execution/test_lists.py @@ -1,6 +1,7 @@ from typing import Any, AsyncGenerator import pytest + from graphql.execution import ExecutionResult, execute, execute_sync from graphql.language import parse from graphql.pyutils import is_awaitable @@ -49,6 +50,7 @@ def accepts_a_tuple_as_a_list_value(): result = _complete(list_field) assert result == ({"listField": list(list_field)}, None) + @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") def accepts_a_set_as_a_list_value(): # Note that sets are not ordered in Python. list_field = {"apple", "banana", "coconut"} @@ -171,7 +173,7 @@ async def _list_field( assert is_awaitable(result) return await result - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_an_async_generator_as_a_list_value(): async def list_field(): yield "two" @@ -183,7 +185,7 @@ async def list_field(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_a_custom_async_iterable_as_a_list_value(): class ListField: def __aiter__(self): @@ -202,7 +204,7 @@ async def __anext__(self): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_an_async_generator_that_throws(): async def list_field(): yield "two" @@ -214,7 +216,7 @@ async def list_field(): [{"message": "bad", "locations": [(1, 3)], "path": ["listField"]}], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_an_async_generator_where_intermediate_value_triggers_an_error(): async def list_field(): yield "two" @@ -232,7 +234,7 @@ async def list_field(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_errors_from_complete_value_in_async_iterables(): async def list_field(): yield "two" @@ -249,7 +251,7 @@ async def list_field(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_functions_from_complete_value_in_async_iterables(): async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: return data.index @@ -259,7 +261,7 @@ async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_single_async_functions_from_complete_value_in_async_iterables(): async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: return data.index @@ -269,7 +271,7 @@ async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_errors_from_complete_value_in_async_iterables(): async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: index = data.index @@ -288,7 +290,7 @@ async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_nulls_yielded_by_async_generator(): async def list_field(): yield 1 @@ -322,7 +324,7 @@ def execute_query(list_value: Any) -> Any: return result - @pytest.mark.asyncio() + @pytest.mark.asyncio async def contains_values(): list_field = [1, 2] assert await _complete(list_field, "[Int]") == ({"listField": [1, 2]}, None) @@ -330,7 +332,7 @@ async def contains_values(): assert await _complete(list_field, "[Int!]") == ({"listField": [1, 2]}, None) assert await _complete(list_field, "[Int!]!") == ({"listField": [1, 2]}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def contains_null(): list_field = [1, None, 2] errors = [ @@ -351,7 +353,7 @@ async def contains_null(): assert await _complete(list_field, "[Int!]") == ({"listField": None}, errors) assert await _complete(list_field, "[Int!]!") == (None, errors) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): list_field = None errors = [ @@ -366,7 +368,7 @@ async def returns_null(): assert await _complete(list_field, "[Int!]") == ({"listField": None}, None) assert await _complete(list_field, "[Int!]!") == (None, errors) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def contains_error(): list_field = [1, RuntimeError("bad"), 2] errors = [ @@ -393,7 +395,7 @@ async def contains_error(): errors, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def results_in_errors(): list_field = RuntimeError("bad") errors = [ diff --git a/tests/execution/test_map_async_iterable.py b/tests/execution/test_map_async_iterable.py index 055a61bc..eb3cddb8 100644 --- a/tests/execution/test_map_async_iterable.py +++ b/tests/execution/test_map_async_iterable.py @@ -1,11 +1,12 @@ import pytest + from graphql.execution import map_async_iterable try: # pragma: no cover anext # noqa: B018 except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator): # noqa: A001 + async def anext(iterator): """Return the next item from an async iterator.""" return await iterator.__anext__() @@ -21,7 +22,7 @@ async def throw(_x: int) -> int: def describe_map_async_iterable(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def maps_over_async_generator(): async def source(): yield 1 @@ -36,7 +37,7 @@ async def source(): with pytest.raises(StopAsyncIteration): assert await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def maps_over_async_iterable(): items = [1, 2, 3] @@ -57,7 +58,7 @@ async def __anext__(self): assert not items assert values == [2, 4, 6] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def compatible_with_async_for(): async def source(): yield 1 @@ -70,7 +71,7 @@ async def source(): assert values == [2, 4, 6] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_returning_early_from_mapped_async_generator(): async def source(): yield 1 @@ -91,7 +92,7 @@ async def source(): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_returning_early_from_mapped_async_iterable(): items = [1, 2, 3] @@ -119,7 +120,7 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_throwing_errors_through_async_iterable(): items = [1, 2, 3] @@ -150,7 +151,7 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_throwing_errors_with_traceback_through_async_iterables(): class Iterable: def __aiter__(self): @@ -177,7 +178,7 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(one) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def does_not_map_over_thrown_errors(): async def source(): yield 1 @@ -192,7 +193,7 @@ async def source(): assert str(exc_info.value) == "Goodbye" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def does_not_map_over_externally_thrown_errors(): async def source(): yield 1 @@ -206,7 +207,7 @@ async def source(): assert str(exc_info.value) == "Goodbye" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterable_is_closed_when_mapped_iterable_is_closed(): class Iterable: def __init__(self): @@ -230,7 +231,7 @@ async def aclose(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterable_is_closed_on_callback_error(): class Iterable: def __init__(self): @@ -253,7 +254,7 @@ async def aclose(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterable_exits_on_callback_error(): exited = False @@ -272,7 +273,7 @@ async def iterable(): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def mapped_iterable_is_closed_when_iterable_cannot_be_closed(): class Iterable: def __aiter__(self): @@ -287,7 +288,7 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def ignores_that_iterable_cannot_be_closed_on_callback_error(): class Iterable: def __aiter__(self): diff --git a/tests/execution/test_middleware.py b/tests/execution/test_middleware.py index d4abba95..50159995 100644 --- a/tests/execution/test_middleware.py +++ b/tests/execution/test_middleware.py @@ -2,6 +2,7 @@ from typing import Awaitable, cast import pytest + from graphql.execution import Middleware, MiddlewareManager, execute, subscribe from graphql.language.parser import parse from graphql.type import GraphQLField, GraphQLObjectType, GraphQLSchema, GraphQLString @@ -90,7 +91,7 @@ def capitalize_middleware(next_, *args, **kwargs): assert result.data == {"first": "Eno", "second": "Owt"} # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def single_async_function(): doc = parse("{ first second }") @@ -200,7 +201,7 @@ def resolve(self, next_, *args, **kwargs): ) assert result.data == {"field": "devloseR"} # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def with_async_function_and_object(): doc = parse("{ field }") @@ -237,7 +238,7 @@ async def resolve(self, next_, *args, **kwargs): result = await awaitable_result assert result.data == {"field": "devloseR"} - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscription_simple(): async def bar_resolve(_obj, _info): yield "bar" @@ -322,7 +323,7 @@ def bad_middleware_object(): GraphQLSchema(test_type), doc, None, - middleware=cast(Middleware, {"bad": "value"}), + middleware=cast("Middleware", {"bad": "value"}), ) assert str(exc_info.value) == ( diff --git a/tests/execution/test_mutations.py b/tests/execution/test_mutations.py index 20ee1c97..b03004de 100644 --- a/tests/execution/test_mutations.py +++ b/tests/execution/test_mutations.py @@ -4,6 +4,7 @@ from typing import Any, Awaitable import pytest + from graphql.execution import ( ExperimentalIncrementalExecutionResults, execute, @@ -106,7 +107,7 @@ async def promise_to_get_the_number(holder: NumberHolder, _info) -> int: def describe_execute_handles_mutation_execution_ordering(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def evaluates_mutations_serially(): document = parse( """ @@ -154,7 +155,7 @@ def does_not_include_illegal_mutation_fields_in_output(): result = execute_sync(schema=schema, document=document) assert result == ({}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def evaluates_mutations_correctly_in_presence_of_a_failed_mutation(): document = parse( """ @@ -211,7 +212,7 @@ async def evaluates_mutations_correctly_in_presence_of_a_failed_mutation(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def mutation_fields_with_defer_do_not_block_next_mutation(): document = parse( """ @@ -241,22 +242,19 @@ async def mutation_fields_with_defer_do_not_block_next_mutation(): patches.append(patch.formatted) assert patches == [ - {"data": {"first": {}, "second": {"theNumber": 2}}, "hasNext": True}, { - "incremental": [ - { - "label": "defer-label", - "path": ["first"], - "data": { - "promiseToGetTheNumber": 2, - }, - }, - ], + "data": {"first": {}, "second": {"theNumber": 2}}, + "pending": [{"id": "0", "path": ["first"], "label": "defer-label"}], + "hasNext": True, + }, + { + "incremental": [{"id": "0", "data": {"promiseToGetTheNumber": 2}}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def mutation_inside_of_a_fragment(): document = parse( """ @@ -282,7 +280,7 @@ async def mutation_inside_of_a_fragment(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def mutation_with_defer_is_not_executed_serially(): document = parse( """ @@ -312,17 +310,14 @@ async def mutation_with_defer_is_not_executed_serially(): patches.append(patch.formatted) assert patches == [ - {"data": {"second": {"theNumber": 2}}, "hasNext": True}, { - "incremental": [ - { - "label": "defer-label", - "path": [], - "data": { - "first": {"theNumber": 1}, - }, - }, - ], + "data": {"second": {"theNumber": 2}}, + "pending": [{"id": "0", "path": [], "label": "defer-label"}], + "hasNext": True, + }, + { + "incremental": [{"id": "0", "data": {"first": {"theNumber": 1}}}], + "completed": [{"id": "0"}], "hasNext": False, }, ] diff --git a/tests/execution/test_nonnull.py b/tests/execution/test_nonnull.py index 053009a9..6c98eb67 100644 --- a/tests/execution/test_nonnull.py +++ b/tests/execution/test_nonnull.py @@ -3,6 +3,7 @@ from typing import Any, Awaitable, cast import pytest + from graphql.execution import ExecutionResult, execute, execute_sync from graphql.language import parse from graphql.pyutils import AwaitableOrValue @@ -110,7 +111,7 @@ def patch(data: str) -> str: async def execute_sync_and_async(query: str, root_value: Any) -> ExecutionResult: sync_result = execute_sync(schema, parse(query), root_value) async_result = await cast( - Awaitable[ExecutionResult], execute(schema, parse(patch(query)), root_value) + "Awaitable[ExecutionResult]", execute(schema, parse(patch(query)), root_value) ) assert repr(async_result) == patch(repr(sync_result)) @@ -125,12 +126,12 @@ def describe_nulls_a_nullable_field(): } """ - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): result = await execute_sync_and_async(query, NullingData()) assert result == ({"sync": None}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws(): result = await execute_sync_and_async(query, ThrowingData()) assert result == ( @@ -153,7 +154,7 @@ def describe_nulls_a_returned_object_that_contains_a_non_null_field(): } """ - @pytest.mark.asyncio() + @pytest.mark.asyncio async def that_returns_null(): result = await execute_sync_and_async(query, NullingData()) assert result == ( @@ -168,7 +169,7 @@ async def that_returns_null(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def that_throws(): result = await execute_sync_and_async(query, ThrowingData()) assert result == ( @@ -214,17 +215,17 @@ def describe_nulls_a_complex_tree_of_nullable_fields_each(): }, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): result = await cast( - Awaitable[ExecutionResult], execute_query(query, NullingData()) + "Awaitable[ExecutionResult]", execute_query(query, NullingData()) ) assert result == (data, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws(): result = await cast( - Awaitable[ExecutionResult], execute_query(query, ThrowingData()) + "Awaitable[ExecutionResult]", execute_query(query, ThrowingData()) ) assert result == ( data, @@ -348,10 +349,10 @@ def describe_nulls_first_nullable_after_long_chain_of_non_null_fields(): "anotherPromiseNest": None, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): result = await cast( - Awaitable[ExecutionResult], execute_query(query, NullingData()) + "Awaitable[ExecutionResult]", execute_query(query, NullingData()) ) assert result == ( data, @@ -411,10 +412,10 @@ async def returns_null(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws(): result = await cast( - Awaitable[ExecutionResult], execute_query(query, ThrowingData()) + "Awaitable[ExecutionResult]", execute_query(query, ThrowingData()) ) assert result == ( data, @@ -477,7 +478,7 @@ def describe_nulls_the_top_level_if_non_nullable_field(): } """ - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): result = await execute_sync_and_async(query, NullingData()) await asyncio.sleep(0) # strangely needed to get coverage on Python 3.11 @@ -493,7 +494,7 @@ async def returns_null(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws(): result = await execute_sync_and_async(query, ThrowingData()) await asyncio.sleep(0) # strangely needed to get coverage on Python 3.11 diff --git a/tests/execution/test_oneof.py b/tests/execution/test_oneof.py new file mode 100644 index 00000000..2040b1a7 --- /dev/null +++ b/tests/execution/test_oneof.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from graphql.execution import ExecutionResult, execute +from graphql.language import parse +from graphql.utilities import build_schema + +if TYPE_CHECKING: + from graphql.pyutils import AwaitableOrValue + +schema = build_schema(""" + type Query { + test(input: TestInputObject!): TestObject + } + + input TestInputObject @oneOf { + a: String + b: Int + } + + type TestObject { + a: String + b: Int + } + """) + + +def execute_query( + query: str, root_value: Any, variable_values: dict[str, Any] | None = None +) -> AwaitableOrValue[ExecutionResult]: + return execute(schema, parse(query), root_value, variable_values=variable_values) + + +def describe_execute_handles_one_of_input_objects(): + def describe_one_of_input_objects(): + root_value = { + "test": lambda _info, input: input, # noqa: A006 + } + + def accepts_a_good_default_value(): + query = """ + query ($input: TestInputObject! = {a: "abc"}) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value) + + assert result == ({"test": {"a": "abc", "b": None}}, None) + + def rejects_a_bad_default_value(): + query = """ + query ($input: TestInputObject! = {a: "abc", b: 123}) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value) + + assert result == ( + {"test": None}, + [ + { + # This type of error would be caught at validation-time + # hence the vague error message here. + "message": "Argument 'input' of non-null type" + " 'TestInputObject!' must not be null.", + "locations": [(3, 31)], + "path": ["test"], + } + ], + ) + + def accepts_a_good_variable(): + query = """ + query ($input: TestInputObject!) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value, {"input": {"a": "abc"}}) + + assert result == ({"test": {"a": "abc", "b": None}}, None) + + def accepts_a_good_variable_with_an_undefined_key(): + query = """ + query ($input: TestInputObject!) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value, {"input": {"a": "abc"}}) + + assert result == ({"test": {"a": "abc", "b": None}}, None) + + def rejects_a_variable_with_multiple_non_null_keys(): + query = """ + query ($input: TestInputObject!) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value, {"input": {"a": "abc", "b": 123}}) + + assert result == ( + None, + [ + { + "message": "Variable '$input' got invalid value" + " {'a': 'abc', 'b': 123}; Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + "locations": [(2, 24)], + } + ], + ) + + def rejects_a_variable_with_multiple_nullable_keys(): + query = """ + query ($input: TestInputObject!) { + test(input: $input) { + a + b + } + } + """ + result = execute_query( + query, root_value, {"input": {"a": "abc", "b": None}} + ) + + assert result == ( + None, + [ + { + "message": "Variable '$input' got invalid value" + " {'a': 'abc', 'b': None}; Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + "locations": [(2, 24)], + } + ], + ) diff --git a/tests/execution/test_parallel.py b/tests/execution/test_parallel.py index faacd0c4..f4dc86b1 100644 --- a/tests/execution/test_parallel.py +++ b/tests/execution/test_parallel.py @@ -2,6 +2,7 @@ from typing import Awaitable import pytest + from graphql.execution import execute from graphql.language import parse from graphql.type import ( @@ -31,7 +32,7 @@ async def wait(self) -> bool: def describe_parallel_execution(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_single_field(): # make sure that the special case of resolving a single field works async def resolve(*_args): @@ -52,7 +53,7 @@ async def resolve(*_args): assert result == ({"foo": True}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_fields_in_parallel(): barrier = Barrier(2) @@ -78,7 +79,7 @@ async def resolve(*_args): assert result == ({"foo": True, "bar": True}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_single_element_list(): # make sure that the special case of resolving a single element list works async def resolve(*_args): @@ -97,7 +98,7 @@ async def resolve(*_args): assert result == ({"foo": [True]}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_list_in_parallel(): barrier = Barrier(2) @@ -127,7 +128,7 @@ async def resolve_list(*args): assert result == ({"foo": [True, True]}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_is_type_of_in_parallel(): FooType = GraphQLInterfaceType("Foo", {"foo": GraphQLField(GraphQLString)}) diff --git a/tests/execution/test_resolve.py b/tests/execution/test_resolve.py index 1c77af8b..db52d638 100644 --- a/tests/execution/test_resolve.py +++ b/tests/execution/test_resolve.py @@ -7,9 +7,11 @@ from graphql.type import ( GraphQLArgument, GraphQLField, + GraphQLID, GraphQLInputField, GraphQLInputObjectType, GraphQLInt, + GraphQLList, GraphQLObjectType, GraphQLSchema, GraphQLString, @@ -213,6 +215,91 @@ def execute_query(query: str, root_value: Any = None) -> ExecutionResult: None, ) + def transforms_default_values_using_out_names(): + # This is an extension of GraphQL.js. + resolver_kwargs: Any + + def search_resolver(_obj: None, _info, **kwargs): + nonlocal resolver_kwargs + resolver_kwargs = kwargs + return [{"id": "42"}] + + filters_type = GraphQLInputObjectType( + "SearchFilters", + {"pageSize": GraphQLInputField(GraphQLInt, out_name="page_size")}, + ) + result_type = GraphQLObjectType("SearchResult", {"id": GraphQLField(GraphQLID)}) + query = GraphQLObjectType( + "Query", + { + "search": GraphQLField( + GraphQLList(result_type), + { + "searchFilters": GraphQLArgument( + filters_type, {"pageSize": 10}, out_name="search_filters" + ) + }, + resolve=search_resolver, + ) + }, + ) + schema = GraphQLSchema(query) + + resolver_kwargs = None + result = execute_sync(schema, parse("{ search { id } }")) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 10}} + + resolver_kwargs = None + result = execute_sync( + schema, parse("{ search(searchFilters:{pageSize: 25}) { id } }") + ) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 25}} + + resolver_kwargs = None + result = execute_sync( + schema, + parse( + """ + query ($searchFilters: SearchFilters) { + search(searchFilters: $searchFilters) { id } + } + """ + ), + ) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 10}} + + resolver_kwargs = None + result = execute_sync( + schema, + parse( + """ + query ($searchFilters: SearchFilters) { + search(searchFilters: $searchFilters) { id } + } + """ + ), + variable_values={"searchFilters": {"pageSize": 25}}, + ) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 25}} + + resolver_kwargs = None + result = execute_sync( + schema, + parse( + """ + query ($searchFilters: SearchFilters = {pageSize: 25}) { + search(searchFilters: $searchFilters) { id } + } + """ + ), + ) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 25}} + def pass_error_from_resolver_wrapped_as_located_graphql_error(): def resolve(_obj, _info): raise ValueError("Some error") diff --git a/tests/execution/test_schema.py b/tests/execution/test_schema.py index a3448d89..7096c5fb 100644 --- a/tests/execution/test_schema.py +++ b/tests/execution/test_schema.py @@ -78,7 +78,7 @@ def __init__(self, id: int): # noqa: A002 "article": GraphQLField( BlogArticle, args={"id": GraphQLArgument(GraphQLID)}, - resolve=lambda _obj, _info, id: Article(id), # noqa: A002 + resolve=lambda _obj, _info, id: Article(id), # noqa: A006 ), "feed": GraphQLField( GraphQLList(BlogArticle), diff --git a/tests/execution/test_stream.py b/tests/execution/test_stream.py index 46a53b56..46237fc1 100644 --- a/tests/execution/test_stream.py +++ b/tests/execution/test_stream.py @@ -4,6 +4,7 @@ from typing import Any, Awaitable, NamedTuple import pytest + from graphql.error import GraphQLError from graphql.execution import ( ExecutionResult, @@ -11,7 +12,7 @@ IncrementalStreamResult, experimental_execute_incrementally, ) -from graphql.execution.incremental_publisher import StreamItemsRecord +from graphql.execution.incremental_publisher import StreamRecord from graphql.language import DocumentNode, parse from graphql.pyutils import Path from graphql.type import ( @@ -28,7 +29,7 @@ anext # noqa: B018 except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator): # noqa: A001 + async def anext(iterator): """Return the next item from an async iterator.""" return await iterator.__anext__() @@ -147,51 +148,39 @@ def modified_args(args: dict[str, Any], **modifications: Any) -> dict[str, Any]: def describe_execute_stream_directive(): def can_format_and_print_incremental_stream_result(): - result = IncrementalStreamResult() - assert result.formatted == {"items": None} - assert str(result) == "IncrementalStreamResult(items=None, errors=None)" + result = IncrementalStreamResult(items=["hello", "world"], id="foo") + assert result.formatted == {"items": ["hello", "world"], "id": "foo"} + assert ( + str(result) == "IncrementalStreamResult(items=['hello', 'world'], id='foo')" + ) result = IncrementalStreamResult( items=["hello", "world"], - errors=[GraphQLError("msg")], - path=["foo", 1], - label="bar", + id="foo", + sub_path=["bar", 1], + errors=[GraphQLError("oops")], extensions={"baz": 2}, ) assert result.formatted == { "items": ["hello", "world"], - "errors": [{"message": "msg"}], + "id": "foo", + "subPath": ["bar", 1], + "errors": [{"message": "oops"}], "extensions": {"baz": 2}, - "label": "bar", - "path": ["foo", 1], } assert ( str(result) == "IncrementalStreamResult(items=['hello', 'world']," - " errors=[GraphQLError('msg')], path=['foo', 1], label='bar'," + " id='foo', sub_path=['bar', 1], errors=[GraphQLError('oops')]," " extensions={'baz': 2})" ) - def can_print_stream_record(): - record = StreamItemsRecord(None, None, None, None) - assert str(record) == "StreamItemsRecord(path=[])" - record = StreamItemsRecord("foo", Path(None, "bar", "Bar"), record, None) - assert ( - str(record) == "StreamItemsRecord(" - "path=['bar'], label='foo', parent_context)" - ) - record.items = ["hello", "world"] - assert ( - str(record) == "StreamItemsRecord(" - "path=['bar'], label='foo', parent_context, items)" - ) - # noinspection PyTypeChecker def can_compare_incremental_stream_result(): args: dict[str, Any] = { "items": ["hello", "world"], - "errors": [GraphQLError("msg")], - "path": ["foo", 1], - "label": "bar", + "id": "foo", + "sub_path": ["bar", 1], + "errors": [GraphQLError("oops")], "extensions": {"baz": 2}, } result = IncrementalStreamResult(**args) @@ -199,9 +188,11 @@ def can_compare_incremental_stream_result(): assert result != IncrementalStreamResult( **modified_args(args, items=["hello", "foo"]) ) + assert result != IncrementalStreamResult(**modified_args(args, id="bar")) + assert result != IncrementalStreamResult( + **modified_args(args, sub_path=["bar", 2]) + ) assert result != IncrementalStreamResult(**modified_args(args, errors=[])) - assert result != IncrementalStreamResult(**modified_args(args, path=["foo", 2])) - assert result != IncrementalStreamResult(**modified_args(args, label="baz")) assert result != IncrementalStreamResult( **modified_args(args, extensions={"baz": 1}) ) @@ -210,14 +201,22 @@ def can_compare_incremental_stream_result(): assert result == tuple(args.values())[:3] assert result == tuple(args.values())[:2] assert result != tuple(args.values())[:1] - assert result != (["hello", "world"], []) + assert result != (["hello", "world"], "bar") + args["subPath"] = args.pop("sub_path") assert result == args - assert result == dict(list(args.items())[:2]) - assert result == dict(list(args.items())[:3]) - assert result != dict(list(args.items())[:2] + [("path", ["foo", 2])]) - assert result != {**args, "label": "baz"} + assert result != {**args, "items": ["hello", "foo"]} + assert result != {**args, "id": "bar"} + assert result != {**args, "subPath": ["bar", 2]} + assert result != {**args, "errors": []} + assert result != {**args, "extensions": {"baz": 1}} + + def can_print_stream_record(): + record = StreamRecord(Path(None, 0, None)) + assert str(record) == "StreamRecord(path=[0])" + record = StreamRecord(Path(None, "bar", "Bar"), "foo") + assert str(record) == "StreamRecord(path=['bar'], label='foo')" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_list_field(): document = parse("{ scalarList @stream(initialCount: 1) }") result = await complete( @@ -225,22 +224,19 @@ async def can_stream_a_list_field(): ) assert result == [ { - "data": { - "scalarList": ["apple"], - }, - "hasNext": True, - }, - { - "incremental": [{"items": ["banana"], "path": ["scalarList", 1]}], + "data": {"scalarList": ["apple"]}, + "pending": [{"id": "0", "path": ["scalarList"]}], "hasNext": True, }, + {"incremental": [{"items": ["banana"], "id": "0"}], "hasNext": True}, { - "incremental": [{"items": ["coconut"], "path": ["scalarList", 2]}], + "incremental": [{"items": ["coconut"], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_use_default_value_of_initial_count(): document = parse("{ scalarList @stream }") result = await complete( @@ -248,35 +244,27 @@ async def can_use_default_value_of_initial_count(): ) assert result == [ { - "data": { - "scalarList": [], - }, - "hasNext": True, - }, - { - "incremental": [{"items": ["apple"], "path": ["scalarList", 0]}], + "data": {"scalarList": []}, + "pending": [{"id": "0", "path": ["scalarList"]}], "hasNext": True, }, + {"incremental": [{"items": ["apple"], "id": "0"}], "hasNext": True}, + {"incremental": [{"items": ["banana"], "id": "0"}], "hasNext": True}, { - "incremental": [{"items": ["banana"], "path": ["scalarList", 1]}], - "hasNext": True, - }, - { - "incremental": [{"items": ["coconut"], "path": ["scalarList", 2]}], + "incremental": [{"items": ["coconut"], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def negative_values_of_initial_count_throw_field_errors(): document = parse("{ scalarList @stream(initialCount: -2) }") result = await complete( document, {"scalarList": ["apple", "banana", "coconut"]} ) assert result == { - "data": { - "scalarList": None, - }, + "data": {"scalarList": None}, "errors": [ { "message": "initialCount must be a positive integer", @@ -286,14 +274,12 @@ async def negative_values_of_initial_count_throw_field_errors(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def non_integer_values_of_initial_count_throw_field_errors(): document = parse("{ scalarList @stream(initialCount: 1.5) }") result = await complete(document, {"scalarList": ["apple", "half of a banana"]}) assert result == { - "data": { - "scalarList": None, - }, + "data": {"scalarList": None}, "errors": [ { "message": "Argument 'initialCount' has invalid value 1.5.", @@ -303,7 +289,7 @@ async def non_integer_values_of_initial_count_throw_field_errors(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_label_from_stream_directive(): document = parse( '{ scalarList @stream(initialCount: 1, label: "scalar-stream") }' @@ -313,34 +299,21 @@ async def returns_label_from_stream_directive(): ) assert result == [ { - "data": { - "scalarList": ["apple"], - }, - "hasNext": True, - }, - { - "incremental": [ - { - "items": ["banana"], - "path": ["scalarList", 1], - "label": "scalar-stream", - } + "data": {"scalarList": ["apple"]}, + "pending": [ + {"id": "0", "path": ["scalarList"], "label": "scalar-stream"} ], "hasNext": True, }, + {"incremental": [{"items": ["banana"], "id": "0"}], "hasNext": True}, { - "incremental": [ - { - "items": ["coconut"], - "path": ["scalarList", 2], - "label": "scalar-stream", - } - ], + "incremental": [{"items": ["coconut"], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws_an_error_for_stream_directive_with_non_string_label(): document = parse("{ scalarList @stream(initialCount: 1, label: 42) }") result = await complete(document, {"scalarList": ["some apples"]}) @@ -348,31 +321,22 @@ async def throws_an_error_for_stream_directive_with_non_string_label(): "data": {"scalarList": None}, "errors": [ { - "locations": [ - { - "line": 1, - "column": 46, - } - ], + "locations": [{"line": 1, "column": 46}], "message": "Argument 'label' has invalid value 42.", "path": ["scalarList"], } ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_disable_stream_using_if_argument(): document = parse("{ scalarList @stream(initialCount: 0, if: false) }") result = await complete( document, {"scalarList": ["apple", "banana", "coconut"]} ) - assert result == { - "data": { - "scalarList": ["apple", "banana", "coconut"], - }, - } + assert result == {"data": {"scalarList": ["apple", "banana", "coconut"]}} - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def does_not_disable_stream_with_null_if_argument(): document = parse( @@ -384,23 +348,18 @@ async def does_not_disable_stream_with_null_if_argument(): ) assert result == [ { - "data": { - "scalarList": ["apple", "banana"], - }, + "data": {"scalarList": ["apple", "banana"]}, + "pending": [{"id": "0", "path": ["scalarList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": ["coconut"], - "path": ["scalarList", 2], - } - ], + "incremental": [{"items": ["coconut"], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_multi_dimensional_lists(): document = parse("{ scalarListList @stream(initialCount: 1) }") result = await complete( @@ -415,32 +374,24 @@ async def can_stream_multi_dimensional_lists(): ) assert result == [ { - "data": { - "scalarListList": [["apple", "apple", "apple"]], - }, + "data": {"scalarListList": [["apple", "apple", "apple"]]}, + "pending": [{"id": "0", "path": ["scalarListList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [["banana", "banana", "banana"]], - "path": ["scalarListList", 1], - } - ], + "incremental": [{"items": [["banana", "banana", "banana"]], "id": "0"}], "hasNext": True, }, { "incremental": [ - { - "items": [["coconut", "coconut", "coconut"]], - "path": ["scalarListList", 2], - } + {"items": [["coconut", "coconut", "coconut"]], "id": "0"} ], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_field_that_returns_a_list_of_awaitables(): document = parse( """ @@ -454,7 +405,6 @@ async def can_stream_a_field_that_returns_a_list_of_awaitables(): ) async def await_friend(f): - await sleep(0) return f result = await complete( @@ -469,20 +419,17 @@ async def await_friend(f): {"name": "Han", "id": "2"}, ], }, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_in_correct_order_with_list_of_awaitables(): document = parse( """ @@ -496,7 +443,6 @@ async def can_stream_in_correct_order_with_list_of_awaitables(): ) async def await_friend(f): - await sleep(0) return f result = await complete( @@ -506,38 +452,25 @@ async def await_friend(f): assert result == [ { "data": {"friendList": []}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Luke", "id": "1"}], - "path": ["friendList", 0], - } - ], + "incremental": [{"items": [{"name": "Luke", "id": "1"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Han", "id": "2"}], - "path": ["friendList", 1], - } - ], + "incremental": [{"items": [{"name": "Han", "id": "2"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_field_that_returns_a_list_with_nested_async_fields(): document = parse( """ @@ -572,20 +505,17 @@ async def get_id(f): {"name": "Han", "id": "2"}, ] }, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_in_list_of_awaitables_before_initial_count_reached(): document = parse( """ @@ -599,7 +529,6 @@ async def handles_error_in_list_of_awaitables_before_initial_count_reached(): ) async def await_friend(f, i): - await sleep(0) if i == 1: raise RuntimeError("bad") return f @@ -622,20 +551,17 @@ async def await_friend(f, i): "path": ["friendList", 1], } ], + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_in_list_of_awaitables_after_initial_count_reached(): document = parse( """ @@ -649,7 +575,6 @@ async def handles_error_in_list_of_awaitables_after_initial_count_reached(): ) async def await_friend(f, i): - await sleep(0) if i == 1: raise RuntimeError("bad") return f @@ -665,13 +590,14 @@ async def await_friend(f, i): assert result == [ { "data": {"friendList": [{"name": "Luke", "id": "1"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "bad", @@ -684,17 +610,13 @@ async def await_friend(f, i): "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_field_that_returns_an_async_iterable(): document = parse( """ @@ -709,48 +631,31 @@ async def can_stream_a_field_that_returns_an_async_iterable(): async def friend_list(_info): for i in range(3): - await sleep(0) yield friends[i] result = await complete(document, {"friendList": friend_list}) assert result == [ { "data": {"friendList": []}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Luke", "id": "1"}], - "path": ["friendList", 0], - } - ], + "incremental": [{"items": [{"name": "Luke", "id": "1"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Han", "id": "2"}], - "path": ["friendList", 1], - } - ], + "incremental": [{"items": [{"name": "Han", "id": "2"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], "hasNext": True, }, - { - "hasNext": False, - }, + {"completed": [{"id": "0"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_field_that_returns_an_async_iterable_with_initial_count(): document = parse( """ @@ -765,7 +670,6 @@ async def can_stream_a_field_that_returns_an_async_iterable_with_initial_count() async def friend_list(_info): for i in range(3): - await sleep(0) yield friends[i] result = await complete(document, {"friendList": friend_list}) @@ -777,23 +681,17 @@ async def friend_list(_info): {"name": "Han", "id": "2"}, ] }, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], "hasNext": True, }, - { - "hasNext": False, - }, + {"completed": [{"id": "0"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def negative_initial_count_throw_error_on_field_returning_async_iterable(): document = parse( """ @@ -821,7 +719,7 @@ async def friend_list(_info): "data": {"friendList": None}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_handle_concurrent_calls_to_next_without_waiting(): document = parse( """ @@ -836,7 +734,6 @@ async def can_handle_concurrent_calls_to_next_without_waiting(): async def friend_list(_info): for i in range(3): - await sleep(0) yield friends[i] result = await complete_async(document, 3, {"friendList": friend_list}) @@ -850,6 +747,7 @@ async def friend_list(_info): {"name": "Han", "id": "2"}, ] }, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, }, @@ -857,19 +755,19 @@ async def friend_list(_info): "done": False, "value": { "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } + {"items": [{"name": "Leia", "id": "3"}], "id": "0"} ], "hasNext": True, }, }, - {"done": False, "value": {"hasNext": False}}, + { + "done": False, + "value": {"completed": [{"id": "0"}], "hasNext": False}, + }, {"done": True, "value": None}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_in_async_iterable_before_initial_count_is_reached(): document = parse( """ @@ -883,9 +781,7 @@ async def handles_error_in_async_iterable_before_initial_count_is_reached(): ) async def friend_list(_info): - await sleep(0) yield friends[0] - await sleep(0) raise RuntimeError("bad") result = await complete(document, {"friendList": friend_list}) @@ -900,7 +796,7 @@ async def friend_list(_info): "data": {"friendList": None}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_in_async_iterable_after_initial_count_is_reached(): document = parse( """ @@ -914,24 +810,20 @@ async def handles_error_in_async_iterable_after_initial_count_is_reached(): ) async def friend_list(_info): - await sleep(0) yield friends[0] - await sleep(0) raise RuntimeError("bad") result = await complete(document, {"friendList": friend_list}) assert result == [ { - "data": { - "friendList": [{"name": "Luke", "id": "1"}], - }, + "data": {"friendList": [{"name": "Luke", "id": "1"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "bad", @@ -945,7 +837,7 @@ async def friend_list(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_null_for_non_null_list_items_after_initial_count_is_reached(): document = parse( """ @@ -962,16 +854,14 @@ async def handles_null_for_non_null_list_items_after_initial_count_is_reached(): ) assert result == [ { - "data": { - "nonNullFriendList": [{"name": "Luke"}], - }, + "data": {"nonNullFriendList": [{"name": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -986,7 +876,7 @@ async def handles_null_for_non_null_list_items_after_initial_count_is_reached(): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_null_for_non_null_async_items_after_initial_count_is_reached(): document = parse( """ @@ -1000,9 +890,7 @@ async def handles_null_for_non_null_async_items_after_initial_count_is_reached() async def friend_list(_info): try: - await sleep(0) yield friends[0] - await sleep(0) yield None finally: raise RuntimeError("Oops") @@ -1010,16 +898,14 @@ async def friend_list(_info): result = await complete(document, {"nonNullFriendList": friend_list}) assert result == [ { - "data": { - "nonNullFriendList": [{"name": "Luke"}], - }, + "data": {"nonNullFriendList": [{"name": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -1034,7 +920,7 @@ async def friend_list(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_thrown_in_complete_value_after_initial_count_is_reached(): document = parse( """ @@ -1050,16 +936,15 @@ async def scalar_list(_info): result = await complete(document, {"scalarList": scalar_list}) assert result == [ { - "data": { - "scalarList": ["Luke"], - }, + "data": {"scalarList": ["Luke"]}, + "pending": [{"id": "0", "path": ["scalarList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["scalarList", 1], + "id": "0", "errors": [ { "message": "String cannot represent value: {}", @@ -1069,11 +954,12 @@ async def scalar_list(_info): ], }, ], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_error_in_complete_value_after_initial_count_is_reached(): document = parse( """ @@ -1089,7 +975,6 @@ async def throw(): raise RuntimeError("Oops") async def get_friend(i): - await sleep(0) return {"nonNullName": throw() if i < 0 else friends[i].name} def get_friends(_info): @@ -1103,16 +988,15 @@ def get_friends(_info): ) assert result == [ { - "data": { - "friendList": [{"nonNullName": "Luke"}], - }, + "data": {"friendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1125,17 +1009,13 @@ def get_friends(_info): "hasNext": True, }, { - "incremental": [ - { - "items": [{"nonNullName": "Han"}], - "path": ["friendList", 2], - }, - ], + "incremental": [{"items": [{"nonNullName": "Han"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_nested_async_error_in_complete_value_after_initial_count(): document = parse( """ @@ -1148,7 +1028,6 @@ async def handles_nested_async_error_in_complete_value_after_initial_count(): ) async def get_friend_name(i): - await sleep(0) if i < 0: raise RuntimeError("Oops") return friends[i].name @@ -1164,16 +1043,15 @@ def get_friends(_info): ) assert result == [ { - "data": { - "friendList": [{"nonNullName": "Luke"}], - }, + "data": {"friendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1186,17 +1064,13 @@ def get_friends(_info): "hasNext": True, }, { - "incremental": [ - { - "items": [{"nonNullName": "Han"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"nonNullName": "Han"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_error_in_complete_value_after_initial_count_non_null(): document = parse( """ @@ -1212,7 +1086,6 @@ async def throw(): raise RuntimeError("Oops") async def get_friend(i): - await sleep(0) return {"nonNullName": throw() if i < 0 else friends[i].name} def get_friends(_info): @@ -1226,16 +1099,14 @@ def get_friends(_info): ) assert result == [ { - "data": { - "nonNullFriendList": [{"nonNullName": "Luke"}], - }, + "data": {"nonNullFriendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1249,7 +1120,7 @@ def get_friends(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_nested_async_error_in_complete_value_after_initial_non_null(): document = parse( """ @@ -1262,7 +1133,6 @@ async def handles_nested_async_error_in_complete_value_after_initial_non_null(): ) async def get_friend_name(i): - await sleep(0) if i < 0: raise RuntimeError("Oops") return friends[i].name @@ -1281,13 +1151,13 @@ def get_friends(_info): "data": { "nonNullFriendList": [{"nonNullName": "Luke"}], }, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1301,7 +1171,7 @@ def get_friends(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_error_in_complete_value_after_initial_from_async_iterable(): document = parse( """ @@ -1317,7 +1187,6 @@ async def throw(): raise RuntimeError("Oops") async def get_friend(i): - await sleep(0) return {"nonNullName": throw() if i < 0 else friends[i].name} async def get_friends(_info): @@ -1332,16 +1201,15 @@ async def get_friends(_info): ) assert result == [ { - "data": { - "friendList": [{"nonNullName": "Luke"}], - }, + "data": {"friendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1354,21 +1222,14 @@ async def get_friends(_info): "hasNext": True, }, { - "incremental": [ - { - "items": [{"nonNullName": "Han"}], - "path": ["friendList", 2], - }, - ], + "incremental": [{"items": [{"nonNullName": "Han"}], "id": "0"}], "hasNext": True, }, - { - "hasNext": False, - }, + {"completed": [{"id": "0"}], "hasNext": False}, ] - @pytest.mark.asyncio() - async def handles_async_error_in_complete_value_from_async_iterable_non_null(): + @pytest.mark.asyncio + async def handles_async_error_in_complete_value_from_async_generator_non_null(): document = parse( """ query { @@ -1383,7 +1244,6 @@ async def throw(): raise RuntimeError("Oops") async def get_friend(i): - await sleep(0) return {"nonNullName": throw() if i < 0 else friends[i].name} async def get_friends(_info): @@ -1392,22 +1252,79 @@ async def get_friends(_info): result = await complete( document, + {"nonNullFriendList": get_friends}, + ) + assert result == [ { - "nonNullFriendList": get_friends, + "data": {"nonNullFriendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], + "hasNext": True, }, + { + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Oops", + "locations": [{"line": 4, "column": 17}], + "path": ["nonNullFriendList", 1, "nonNullName"], + }, + ], + }, + ], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def handles_async_errors_in_complete_value_after_initial_count_no_aclose(): + # Handles async errors thrown by complete_value after initialCount is reached + # from async iterable for a non-nullable list when the async iterable does + # not provide an aclose method. + document = parse( + """ + query { + nonNullFriendList @stream(initialCount: 1) { + nonNullName + } + } + """ ) + + async def throw(): + raise RuntimeError("Oops") + + class AsyncIterableWithoutAclose: + def __init__(self): + self.count = 0 + + def __aiter__(self): + return self + + async def __anext__(self): + count = self.count + self.count += 1 + if count == 1: + name = throw() + else: + if count: + count -= 1 # pragma: no cover + name = friends[count].name + return {"nonNullName": name} + + async_iterable = AsyncIterableWithoutAclose() + result = await complete(document, {"nonNullFriendList": async_iterable}) assert result == [ { - "data": { - "nonNullFriendList": [{"nonNullName": "Luke"}], - }, + "data": {"nonNullFriendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1421,7 +1338,76 @@ async def get_friends(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio + async def handles_async_errors_in_complete_value_after_initial_count_slow_aclose(): + # Handles async errors thrown by completeValue after initialCount is reached + # from async iterable for a non-nullable list when the async iterable provides + # concurrent next/return methods and has a slow aclose() + document = parse( + """ + query { + nonNullFriendList @stream(initialCount: 1) { + nonNullName + } + } + """ + ) + + async def throw(): + raise RuntimeError("Oops") + + class AsyncIterableWithSlowAclose: + def __init__(self): + self.count = 0 + self.finished = False + + def __aiter__(self): + return self + + async def __anext__(self): + if self.finished: + raise StopAsyncIteration # pragma: no cover + count = self.count + self.count += 1 + if count == 1: + name = throw() + else: + if count: + count -= 1 # pragma: no cover + name = friends[count].name + return {"nonNullName": name} + + async def aclose(self): + await sleep(0) + self.finished = True + + async_iterable = AsyncIterableWithSlowAclose() + result = await complete(document, {"nonNullFriendList": async_iterable}) + assert result == [ + { + "data": {"nonNullFriendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], + "hasNext": True, + }, + { + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Oops", + "locations": [{"line": 4, "column": 17}], + "path": ["nonNullFriendList", 1, "nonNullName"], + }, + ], + }, + ], + "hasNext": False, + }, + ] + assert async_iterable.finished + + @pytest.mark.asyncio async def filters_payloads_that_are_nulled(): document = parse( """ @@ -1437,10 +1423,9 @@ async def filters_payloads_that_are_nulled(): ) async def resolve_null(_info): - await sleep(0) + return None async def friend_list(_info): - await sleep(0) yield friends[0] result = await complete( @@ -1458,21 +1443,14 @@ async def friend_list(_info): { "message": "Cannot return null for non-nullable field" " NestedObject.nonNullScalarField.", - "locations": [ - { - "line": 4, - "column": 17, - } - ], + "locations": [{"line": 4, "column": 17}], "path": ["nestedObject", "nonNullScalarField"], }, ], - "data": { - "nestedObject": None, - }, + "data": {"nestedObject": None}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def filters_payloads_that_are_nulled_by_a_later_synchronous_error(): document = parse( """ @@ -1488,7 +1466,6 @@ async def filters_payloads_that_are_nulled_by_a_later_synchronous_error(): ) async def friend_list(_info): - await sleep(0) # pragma: no cover yield friends[0] # pragma: no cover result = await complete( @@ -1510,12 +1487,10 @@ async def friend_list(_info): "path": ["nestedObject", "nonNullScalarField"], }, ], - "data": { - "nestedObject": None, - }, + "data": {"nestedObject": None}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def does_not_filter_payloads_when_null_error_is_in_a_different_path(): document = parse( @@ -1536,11 +1511,9 @@ async def does_not_filter_payloads_when_null_error_is_in_a_different_path(): ) async def error_field(_info): - await sleep(0) raise RuntimeError("Oops") async def friend_list(_info): - await sleep(0) yield friends[0] result = await complete( @@ -1559,13 +1532,17 @@ async def friend_list(_info): "otherNestedObject": {}, "nestedObject": {"nestedFriendList": []}, }, + "pending": [ + {"id": "0", "path": ["otherNestedObject"]}, + {"id": "1", "path": ["nestedObject", "nestedFriendList"]}, + ], "hasNext": True, }, { "incremental": [ { "data": {"scalarField": None}, - "path": ["otherNestedObject"], + "id": "0", "errors": [ { "message": "Oops", @@ -1574,17 +1551,15 @@ async def friend_list(_info): }, ], }, - { - "items": [{"name": "Luke"}], - "path": ["nestedObject", "nestedFriendList", 0], - }, + {"items": [{"name": "Luke"}], "id": "1"}, ], + "completed": [{"id": "0"}], "hasNext": True, }, - {"hasNext": False}, + {"completed": [{"id": "1"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def filters_stream_payloads_that_are_nulled_in_a_deferred_payload(): document = parse( @@ -1605,10 +1580,9 @@ async def filters_stream_payloads_that_are_nulled_in_a_deferred_payload(): ) async def resolve_null(_info): - await sleep(0) + return None async def friend_list(_info): - await sleep(0) yield friends[0] result = await complete( @@ -1625,18 +1599,15 @@ async def friend_list(_info): assert result == [ { - "data": { - "nestedObject": {}, - }, + "data": {"nestedObject": {}}, + "pending": [{"id": "0", "path": ["nestedObject"]}], "hasNext": True, }, { "incremental": [ { - "data": { - "deeperNestedObject": None, - }, - "path": ["nestedObject"], + "data": {"deeperNestedObject": None}, + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -1651,11 +1622,13 @@ async def friend_list(_info): ], }, ], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio + @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def filters_defer_payloads_that_are_nulled_in_a_stream_response(): document = parse( """ @@ -1671,33 +1644,30 @@ async def filters_defer_payloads_that_are_nulled_in_a_stream_response(): ) async def resolve_null(_info): - await sleep(0) + return None async def friend(): - await sleep(0) return { "name": friends[0].name, "nonNullName": resolve_null, } async def friend_list(_info): - await sleep(0) yield await friend() result = await complete(document, {"friendList": friend_list}) assert result == [ { - "data": { - "friendList": [], - }, + "data": {"friendList": []}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 0], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -1710,26 +1680,23 @@ async def friend_list(_info): ], "hasNext": True, }, - { - "hasNext": False, - }, + {"completed": [{"id": "0"}], "hasNext": False}, ] @pytest.mark.timeout(1) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_iterator_and_ignores_error_when_stream_payloads_are_filtered(): finished = False async def resolve_null(_info): - await sleep(0) + return None async def iterable(_info): nonlocal finished for i in range(3): - await sleep(0) friend = friends[i] yield {"name": friend.name, "nonNullName": None} - finished = True # pragma: no cover + finished = True document = parse( """ @@ -1765,14 +1732,20 @@ async def iterable(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"nestedObject": {}}, "hasNext": True} + assert result1 == { + "data": {"nestedObject": {}}, + "pending": [{"id": "0", "path": ["nestedObject"]}], + "hasNext": True, + } + + assert not finished result2 = await anext(iterator) assert result2.formatted == { "incremental": [ { "data": {"deeperNestedObject": None}, - "path": ["nestedObject"], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -1787,15 +1760,16 @@ async def iterable(_info): ], }, ], + "completed": [{"id": "0"}], "hasNext": False, } with pytest.raises(StopAsyncIteration): await anext(iterator) - assert not finished # running iterator cannot be canceled + assert finished - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_awaitables_from_complete_value_after_initial_count_is_reached(): document = parse( """ @@ -1809,11 +1783,9 @@ async def handles_awaitables_from_complete_value_after_initial_count_is_reached( ) async def get_friend_name(i): - await sleep(0) return friends[i].name async def get_friend(i): - await sleep(0) if i < 2: return friends[i] return {"id": friends[2].id, "name": get_friend_name(i)} @@ -1830,35 +1802,78 @@ async def get_friends(_info): ) assert result == [ { - "data": { - "friendList": [{"id": "1", "name": "Luke"}], - }, + "data": {"friendList": [{"id": "1", "name": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"id": "2", "name": "Han"}], - "path": ["friendList", 1], - } - ], + "incremental": [{"items": [{"id": "2", "name": "Han"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"id": "3", "name": "Leia"}], - "path": ["friendList", 2], - } + "incremental": [{"items": [{"id": "3", "name": "Leia"}], "id": "0"}], + "hasNext": True, + }, + {"completed": [{"id": "0"}], "hasNext": False}, + ] + + @pytest.mark.asyncio + async def handles_overlapping_deferred_and_non_deferred_streams(): + document = parse( + """ + query { + nestedObject { + nestedFriendList @stream(initialCount: 0) { + id + } + } + nestedObject { + ... @defer { + nestedFriendList @stream(initialCount: 0) { + id + name + } + } + } + } + """ + ) + + async def get_nested_friend_list(_info): + for i in range(2): + yield friends[i] + + result = await complete( + document, + { + "nestedObject": { + "nestedFriendList": get_nested_friend_list, + } + }, + ) + + assert result == [ + { + "data": {"nestedObject": {"nestedFriendList": []}}, + "pending": [ + {"id": "0", "path": ["nestedObject"]}, + {"id": "1", "path": ["nestedObject", "nestedFriendList"]}, ], "hasNext": True, }, { - "hasNext": False, + "incremental": [{"items": [{"id": "1", "name": "Luke"}], "id": "1"}], + "completed": [{"id": "0"}], + "hasNext": True, + }, + { + "incremental": [{"items": [{"id": "2", "name": "Han"}], "id": "1"}], + "hasNext": True, }, + {"completed": [{"id": "1"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_payloads_properly_when_parent_deferred_slower_than_stream(): resolve_slow_field = Event() @@ -1884,7 +1899,6 @@ async def slow_field(_info): async def get_friends(_info): for i in range(2): - await sleep(0) yield friends[i] execute_result = experimental_execute_incrementally( @@ -1902,49 +1916,40 @@ async def get_friends(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"nestedObject": {}}, "hasNext": True} + assert result1 == { + "data": {"nestedObject": {}}, + "pending": [{"id": "0", "path": ["nestedObject"]}], + "hasNext": True, + } resolve_slow_field.set() result2 = await anext(iterator) assert result2.formatted == { + "pending": [{"id": "1", "path": ["nestedObject", "nestedFriendList"]}], "incremental": [ - { - "data": {"scalarField": "slow", "nestedFriendList": []}, - "path": ["nestedObject"], - }, + {"data": {"scalarField": "slow", "nestedFriendList": []}, "id": "0"}, ], + "completed": [{"id": "0"}], "hasNext": True, } result3 = await anext(iterator) assert result3.formatted == { - "incremental": [ - { - "items": [{"name": "Luke"}], - "path": ["nestedObject", "nestedFriendList", 0], - }, - ], + "incremental": [{"items": [{"name": "Luke"}], "id": "1"}], "hasNext": True, } result4 = await anext(iterator) assert result4.formatted == { - "incremental": [ - { - "items": [{"name": "Han"}], - "path": ["nestedObject", "nestedFriendList", 1], - }, - ], + "incremental": [{"items": [{"name": "Han"}], "id": "1"}], "hasNext": True, } result5 = await anext(iterator) - assert result5.formatted == { - "hasNext": False, - } + assert result5.formatted == {"completed": [{"id": "1"}], "hasNext": False} with pytest.raises(StopAsyncIteration): await anext(iterator) @pytest.mark.timeout(1) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fields_that_are_resolved_after_async_iterable_is_complete(): resolve_slow_field = Event() resolve_iterable = Event() @@ -1968,9 +1973,7 @@ async def slow_field(_info): ) async def get_friends(_info): - await sleep(0) yield friends[0] - await sleep(0) yield {"id": friends[1].id, "name": slow_field} await resolve_iterable.wait() @@ -1986,43 +1989,44 @@ async def get_friends(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"friendList": [{"id": "1"}]}, "hasNext": True} + assert result1 == { + "data": {"friendList": [{"id": "1"}]}, + "pending": [ + {"id": "0", "path": ["friendList", 0], "label": "DeferName"}, + {"id": "1", "path": ["friendList"], "label": "stream-label"}, + ], + "hasNext": True, + } resolve_iterable.set() result2 = await anext(iterator) assert result2.formatted == { + "pending": [{"id": "2", "path": ["friendList", 1], "label": "DeferName"}], "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["friendList", 0], - "label": "DeferName", - }, - { - "items": [{"id": "2"}], - "path": ["friendList", 1], - "label": "stream-label", - }, + {"data": {"name": "Luke"}, "id": "0"}, + {"items": [{"id": "2"}], "id": "1"}, ], + "completed": [{"id": "0"}], "hasNext": True, } resolve_slow_field.set() result3 = await anext(iterator) assert result3.formatted == { - "incremental": [ - { - "data": {"name": "Han"}, - "path": ["friendList", 1], - "label": "DeferName", - }, - ], + "completed": [{"id": "1"}], + "hasNext": True, + } + result4 = await anext(iterator) + assert result4.formatted == { + "incremental": [{"data": {"name": "Han"}, "id": "2"}], + "completed": [{"id": "2"}], "hasNext": False, } with pytest.raises(StopAsyncIteration): await anext(iterator) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fields_that_are_resolved_before_async_iterable_is_complete(): resolve_slow_field = Event() resolve_iterable = Event() @@ -2046,11 +2050,8 @@ async def slow_field(_info): ) async def get_friends(_info): - await sleep(0) yield friends[0] - await sleep(0) yield {"id": friends[1].id, "name": slow_field} - await sleep(0) await resolve_iterable.wait() execute_result = await experimental_execute_incrementally( # type: ignore @@ -2065,55 +2066,53 @@ async def get_friends(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"friendList": [{"id": "1"}]}, "hasNext": True} + assert result1 == { + "data": {"friendList": [{"id": "1"}]}, + "pending": [ + {"id": "0", "path": ["friendList", 0], "label": "DeferName"}, + {"id": "1", "path": ["friendList"], "label": "stream-label"}, + ], + "hasNext": True, + } resolve_slow_field.set() result2 = await anext(iterator) assert result2.formatted == { + "pending": [{"id": "2", "path": ["friendList", 1], "label": "DeferName"}], "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["friendList", 0], - "label": "DeferName", - }, - { - "items": [{"id": "2"}], - "path": ["friendList", 1], - "label": "stream-label", - }, + {"data": {"name": "Luke"}, "id": "0"}, + {"items": [{"id": "2"}], "id": "1"}, ], + "completed": [{"id": "0"}], "hasNext": True, } result3 = await anext(iterator) assert result3.formatted == { "incremental": [ - { - "data": {"name": "Han"}, - "path": ["friendList", 1], - "label": "DeferName", - }, + {"data": {"name": "Han"}, "id": "2"}, ], + "completed": [{"id": "2"}], "hasNext": True, } resolve_iterable.set() result4 = await anext(iterator) assert result4.formatted == { + "completed": [{"id": "1"}], "hasNext": False, } with pytest.raises(StopAsyncIteration): await anext(iterator) - @pytest.mark.asyncio() - async def finishes_async_iterable_when_returned_generator_is_closed(): + @pytest.mark.asyncio + async def finishes_async_iterable_when_finished_generator_is_closed(): finished = False async def iterable(_info): nonlocal finished for i in range(3): - await sleep(0) yield friends[i] finished = True @@ -2137,16 +2136,22 @@ async def iterable(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"friendList": [{"id": "1"}]}, "hasNext": True} + assert result1 == { + "data": {"friendList": [{"id": "1"}]}, + "pending": [ + {"id": "0", "path": ["friendList", 0]}, + {"id": "1", "path": ["friendList"]}, + ], + "hasNext": True, + } await iterator.aclose() with pytest.raises(StopAsyncIteration): await anext(iterator) - await sleep(0) assert finished - @pytest.mark.asyncio() + @pytest.mark.asyncio async def finishes_async_iterable_when_underlying_iterator_has_no_close_method(): class Iterable: def __init__(self): @@ -2156,7 +2161,6 @@ def __aiter__(self): return self async def __anext__(self): - await sleep(0) index = self.index self.index = index + 1 try: @@ -2186,6 +2190,7 @@ async def __anext__(self): result1 = execute_result.initial_result assert result1 == { "data": {"friendList": [{"id": "1", "name": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, } @@ -2193,18 +2198,15 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(iterator) - await sleep(0) - await sleep(0) assert iterable.index == 4 - @pytest.mark.asyncio() - async def finishes_async_iterable_when_error_is_raised_in_returned_generator(): + @pytest.mark.asyncio + async def finishes_async_iterable_when_error_is_raised_in_finished_generator(): finished = False async def iterable(_info): nonlocal finished for i in range(3): - await sleep(0) yield friends[i] finished = True @@ -2228,7 +2230,14 @@ async def iterable(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"friendList": [{"id": "1"}]}, "hasNext": True} + assert result1 == { + "data": {"friendList": [{"id": "1"}]}, + "pending": [ + {"id": "0", "path": ["friendList", 0]}, + {"id": "1", "path": ["friendList"]}, + ], + "hasNext": True, + } with pytest.raises(RuntimeError, match="bad"): await iterator.athrow(RuntimeError("bad")) @@ -2236,5 +2245,4 @@ async def iterable(_info): with pytest.raises(StopAsyncIteration): await anext(iterator) - await sleep(0) assert finished diff --git a/tests/execution/test_subscribe.py b/tests/execution/test_subscribe.py index fcbd13ef..8a6b4c38 100644 --- a/tests/execution/test_subscribe.py +++ b/tests/execution/test_subscribe.py @@ -13,6 +13,7 @@ ) import pytest + from graphql.execution import ( ExecutionResult, create_source_event_stream, @@ -44,7 +45,7 @@ anext # noqa: B018 except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator): # noqa: A001 + async def anext(iterator): """Return the next item from an async iterator.""" return await iterator.__anext__() @@ -197,7 +198,7 @@ def subscribe_with_bad_args( # Check all error cases when initializing the subscription. def describe_subscription_initialization_phase(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_positional_arguments(): document = parse( """ @@ -217,7 +218,7 @@ async def empty_async_iterable(_info): await anext(ai) await ai.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_multiple_subscription_fields_defined_in_schema(): schema = GraphQLSchema( query=DummyQueryType, @@ -242,7 +243,7 @@ async def foo_generator(_info): await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_type_definition_with_sync_subscribe_function(): async def foo_generator(_obj, _info): yield {"foo": "FooValue"} @@ -262,7 +263,7 @@ async def foo_generator(_obj, _info): await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_type_definition_with_async_subscribe_function(): async def foo_generator(_obj, _info): await asyncio.sleep(0) @@ -290,7 +291,7 @@ async def subscribe_fn(obj, info): await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_only_resolve_the_first_field_of_invalid_multi_field(): did_resolve = {"foo": False, "bar": False} @@ -325,7 +326,7 @@ async def subscribe_bar(_obj, _info): # pragma: no cover await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolves_to_an_error_if_schema_does_not_support_subscriptions(): schema = GraphQLSchema(query=DummyQueryType) document = parse("subscription { unknownField }") @@ -343,7 +344,7 @@ async def resolves_to_an_error_if_schema_does_not_support_subscriptions(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolves_to_an_error_for_unknown_subscription_field(): schema = GraphQLSchema( query=DummyQueryType, @@ -364,7 +365,7 @@ async def resolves_to_an_error_for_unknown_subscription_field(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_pass_through_unexpected_errors_thrown_in_subscribe(): schema = GraphQLSchema( query=DummyQueryType, @@ -375,7 +376,7 @@ async def should_pass_through_unexpected_errors_thrown_in_subscribe(): with pytest.raises(AttributeError): subscribe_with_bad_args(schema=schema, document={}) # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_an_error_if_subscribe_does_not_return_an_iterator(): expected_result = ( @@ -405,7 +406,7 @@ async def async_fn(obj, info): del result cleanup() - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolves_to_an_error_for_subscription_resolver_errors(): expected_result = ( None, @@ -447,7 +448,7 @@ async def reject_with_error(*args): assert is_awaitable(result) assert await result == expected_result - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolves_to_an_error_if_variables_were_wrong_type(): schema = GraphQLSchema( query=DummyQueryType, @@ -492,7 +493,7 @@ async def resolves_to_an_error_if_variables_were_wrong_type(): # Once a subscription returns a valid AsyncIterator, it can still yield errors. def describe_subscription_publish_phase(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def produces_a_payload_for_multiple_subscribe_in_same_subscription(): pubsub = SimplePubSub() @@ -527,7 +528,7 @@ async def produces_a_payload_for_multiple_subscribe_in_same_subscription(): assert await payload1 == (expected_payload, None) assert await payload2 == (expected_payload, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def produces_a_payload_when_queried_fields_are_async(): pubsub = SimplePubSub() subscription = create_subscription(pubsub, {"asyncResolver": True}) @@ -564,7 +565,7 @@ async def produces_a_payload_when_queried_fields_are_async(): with pytest.raises(StopAsyncIteration): await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def produces_a_payload_per_subscription_event(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -643,7 +644,7 @@ async def produces_a_payload_per_subscription_event(): with pytest.raises(StopAsyncIteration): assert await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscribe_function_returns_errors_with_defer(): pubsub = SimplePubSub() subscription = create_subscription(pubsub, {"shouldDefer": True}) @@ -707,7 +708,7 @@ async def subscribe_function_returns_errors_with_defer(): with pytest.raises(StopAsyncIteration): assert await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscribe_function_returns_errors_with_stream(): pubsub = SimplePubSub() subscription = create_subscription(pubsub, {"shouldStream": True}) @@ -788,7 +789,7 @@ async def subscribe_function_returns_errors_with_stream(): with pytest.raises(StopAsyncIteration): assert await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def produces_a_payload_when_there_are_multiple_events(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -844,7 +845,7 @@ async def produces_a_payload_when_there_are_multiple_events(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_not_trigger_when_subscription_is_already_done(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -895,7 +896,7 @@ async def should_not_trigger_when_subscription_is_already_done(): with pytest.raises(StopAsyncIteration): await payload - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_not_trigger_when_subscription_is_thrown(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -936,7 +937,7 @@ async def should_not_trigger_when_subscription_is_thrown(): with pytest.raises(StopAsyncIteration): await payload - @pytest.mark.asyncio() + @pytest.mark.asyncio async def event_order_is_correct_for_multiple_publishes(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -992,7 +993,7 @@ async def event_order_is_correct_for_multiple_publishes(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_handle_error_during_execution_of_source_event(): async def generate_messages(_obj, _info): yield "Hello" @@ -1040,7 +1041,7 @@ def resolve_message(message, _info): # Subsequent events are still executed. assert await anext(subscription) == ({"newMessage": "Bonjour"}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_pass_through_error_thrown_in_source_event_stream(): async def generate_messages(_obj, _info): yield "Hello" @@ -1077,7 +1078,7 @@ def resolve_message(message, _info): with pytest.raises(StopAsyncIteration): await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_work_with_sync_resolve_function(): async def generate_messages(_obj, _info): yield "Hello" @@ -1105,7 +1106,7 @@ def resolve_message(message, _info): assert await anext(subscription) == ({"newMessage": "Hello"}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_work_with_async_resolve_function(): async def generate_messages(_obj, _info): await asyncio.sleep(0) @@ -1135,7 +1136,7 @@ async def resolve_message(message, _info): assert await anext(subscription) == ({"newMessage": "Hello"}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_work_with_custom_async_iterator(): class MessageGenerator: resolved: List[str] = [] @@ -1185,7 +1186,7 @@ async def resolve(cls, message, _info) -> str: await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_close_custom_async_iterator(): class MessageGenerator: closed: bool = False diff --git a/tests/execution/test_sync.py b/tests/execution/test_sync.py index 36f8c9a5..d5e9504f 100644 --- a/tests/execution/test_sync.py +++ b/tests/execution/test_sync.py @@ -1,4 +1,5 @@ import pytest + from graphql import graphql_sync from graphql.execution import execute, execute_sync from graphql.language import parse @@ -51,7 +52,7 @@ def does_not_return_an_awaitable_if_mutation_fields_are_all_synchronous(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_an_awaitable_if_any_field_is_asynchronous(): doc = "query Example { syncField, asyncField }" result = execute(schema, parse(doc), "rootValue") @@ -80,7 +81,7 @@ def does_not_throw_if_not_encountering_async_execution_with_check_sync(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_execution_with_check_sync(): doc = "query Example { syncField, asyncField }" @@ -93,7 +94,7 @@ async def throws_if_encountering_async_execution_with_check_sync(): del exc_info cleanup() - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_operation_without_check_sync(): doc = "query Example { syncField, asyncField }" @@ -112,7 +113,7 @@ async def throws_if_encountering_async_operation_without_check_sync(): del result cleanup() - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_iterable_execution_with_check_sync(): doc = """ @@ -132,7 +133,7 @@ async def throws_if_encountering_async_iterable_execution_with_check_sync(): del exc_info cleanup() - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_iterable_execution_without_check_sync(): doc = """ @@ -188,7 +189,7 @@ def does_not_throw_if_not_encountering_async_operation_with_check_sync(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_operation_with_check_sync(): doc = "query Example { syncField, asyncField }" @@ -199,7 +200,7 @@ async def throws_if_encountering_async_operation_with_check_sync(): del exc_info cleanup() - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_operation_without_check_sync(): doc = "query Example { syncField, asyncField }" diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py index 3df1c2f0..5e4058f9 100644 --- a/tests/fixtures/__init__.py +++ b/tests/fixtures/__init__.py @@ -7,11 +7,11 @@ import pytest __all__ = [ + "big_schema_introspection_result", + "big_schema_sdl", "cleanup", "kitchen_sink_query", "kitchen_sink_sdl", - "big_schema_sdl", - "big_schema_introspection_result", ] diff --git a/tests/fixtures/schema_kitchen_sink.graphql b/tests/fixtures/schema_kitchen_sink.graphql index 8ec1f2d8..c1d9d06e 100644 --- a/tests/fixtures/schema_kitchen_sink.graphql +++ b/tests/fixtures/schema_kitchen_sink.graphql @@ -26,6 +26,7 @@ type Foo implements Bar & Baz & Two { five(argument: [String] = ["string", "string"]): String six(argument: InputType = {key: "value"}): Type seven(argument: Int = null): Type + eight(argument: OneOfInputType): Type } type AnnotatedObject @onObject(arg: "value") { @@ -115,6 +116,11 @@ input InputType { answer: Int = 42 } +input OneOfInputType @oneOf { + string: String + int: Int +} + input AnnotatedInput @onInputObject { annotatedField: Type @onInputFieldDefinition } diff --git a/tests/language/test_block_string.py b/tests/language/test_block_string.py index 74f99734..d135dde9 100644 --- a/tests/language/test_block_string.py +++ b/tests/language/test_block_string.py @@ -148,8 +148,8 @@ def __init__(self, string: str) -> None: def __str__(self) -> str: return self.string - _assert_printable(cast(str, LazyString(""))) - _assert_non_printable(cast(str, LazyString(" "))) + _assert_printable(cast("str", LazyString(""))) + _assert_non_printable(cast("str", LazyString(" "))) def describe_print_block_string(): @@ -212,4 +212,4 @@ class LazyString: def __str__(self) -> str: return "lazy" - _assert_block_string(cast(str, LazyString()), '"""lazy"""') + _assert_block_string(cast("str", LazyString()), '"""lazy"""') diff --git a/tests/language/test_block_string_fuzz.py b/tests/language/test_block_string_fuzz.py index feb7ca2b..0e17b4d4 100644 --- a/tests/language/test_block_string_fuzz.py +++ b/tests/language/test_block_string_fuzz.py @@ -1,4 +1,5 @@ import pytest + from graphql.language import Lexer, Source, TokenKind from graphql.language.block_string import ( is_printable_as_block_string, @@ -40,7 +41,7 @@ def assert_non_printable_block_string(test_value: str) -> None: def describe_print_block_string(): - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(80) def correctly_print_random_strings(): # Testing with length >7 is taking exponentially more time. However, it is diff --git a/tests/language/test_lexer.py b/tests/language/test_lexer.py index 0bc9a398..a44e859d 100644 --- a/tests/language/test_lexer.py +++ b/tests/language/test_lexer.py @@ -3,6 +3,7 @@ from typing import Optional, Tuple import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import Lexer, Source, SourceLocation, Token, TokenKind from graphql.language.lexer import is_punctuator_token_kind @@ -393,8 +394,7 @@ def lexes_block_strings(): TokenKind.BLOCK_STRING, 0, 19, 1, 1, "slashes \\\\ \\/" ) assert lex_one( - '"""\n\n spans\n multiple\n' - ' lines\n\n """' + '"""\n\n spans\n multiple\n lines\n\n """' ) == Token(TokenKind.BLOCK_STRING, 0, 68, 1, 1, "spans\n multiple\n lines") def advance_line_after_lexing_multiline_block_string(): diff --git a/tests/language/test_parser.py b/tests/language/test_parser.py index b671e444..0121db23 100644 --- a/tests/language/test_parser.py +++ b/tests/language/test_parser.py @@ -3,6 +3,7 @@ from typing import Optional, Tuple, cast import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import ( ArgumentNode, @@ -180,11 +181,11 @@ def parses_multi_byte_characters(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - selection_set = cast(OperationDefinitionNode, definitions[0]).selection_set + selection_set = cast("OperationDefinitionNode", definitions[0]).selection_set selections = selection_set.selections assert isinstance(selections, tuple) assert len(selections) == 1 - arguments = cast(FieldNode, selections[0]).arguments + arguments = cast("FieldNode", selections[0]).arguments assert isinstance(arguments, tuple) assert len(arguments) == 1 value = arguments[0].value @@ -262,7 +263,7 @@ def parses_required_field(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -327,7 +328,7 @@ def parses_field_with_required_list_elements(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -351,7 +352,7 @@ def parses_field_with_optional_list_elements(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -375,7 +376,7 @@ def parses_field_with_required_list(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -399,7 +400,7 @@ def parses_field_with_optional_list(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -423,7 +424,7 @@ def parses_field_with_mixed_list_elements(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -482,7 +483,7 @@ def creates_ast(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) assert isinstance(definition, DefinitionNode) assert definition.loc == (0, 40) assert definition.operation == OperationType.QUERY diff --git a/tests/language/test_printer.py b/tests/language/test_printer.py index 6117c69d..42531096 100644 --- a/tests/language/test_printer.py +++ b/tests/language/test_printer.py @@ -1,6 +1,7 @@ from copy import deepcopy import pytest + from graphql.language import FieldNode, NameNode, parse, print_ast from ..fixtures import kitchen_sink_query # noqa: F401 @@ -59,8 +60,7 @@ def correctly_prints_mutation_operation_with_artifacts(): def prints_query_with_variable_directives(): query_ast_with_variable_directive = parse( - "query ($foo: TestType = { a: 123 }" - " @testDirective(if: true) @test) { id }" + "query ($foo: TestType = { a: 123 } @testDirective(if: true) @test) { id }" ) assert print_ast(query_ast_with_variable_directive) == dedent( """ diff --git a/tests/language/test_schema_parser.py b/tests/language/test_schema_parser.py index a5005a06..df64381a 100644 --- a/tests/language/test_schema_parser.py +++ b/tests/language/test_schema_parser.py @@ -6,6 +6,7 @@ from typing import Optional, Tuple import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import ( ArgumentNode, diff --git a/tests/language/test_schema_printer.py b/tests/language/test_schema_printer.py index 35da0b06..083dcd0f 100644 --- a/tests/language/test_schema_printer.py +++ b/tests/language/test_schema_printer.py @@ -1,6 +1,7 @@ from copy import deepcopy import pytest + from graphql.language import NameNode, ScalarTypeDefinitionNode, parse, print_ast from ..fixtures import kitchen_sink_sdl # noqa: F401 @@ -57,6 +58,7 @@ def prints_kitchen_sink_without_altering_ast(kitchen_sink_sdl): # noqa: F811 five(argument: [String] = ["string", "string"]): String six(argument: InputType = { key: "value" }): Type seven(argument: Int = null): Type + eight(argument: OneOfInputType): Type } type AnnotatedObject @onObject(arg: "value") { @@ -139,6 +141,11 @@ def prints_kitchen_sink_without_altering_ast(kitchen_sink_sdl): # noqa: F811 answer: Int = 42 } + input OneOfInputType @oneOf { + string: String + int: Int + } + input AnnotatedInput @onInputObject { annotatedField: Type @onInputFieldDefinition } diff --git a/tests/language/test_source.py b/tests/language/test_source.py index 02014445..b973410d 100644 --- a/tests/language/test_source.py +++ b/tests/language/test_source.py @@ -4,6 +4,7 @@ from typing import cast import pytest + from graphql.language import Source, SourceLocation from ..utils import dedent @@ -80,7 +81,7 @@ def can_create_custom_attribute(): def rejects_invalid_location_offset(): def create_source(location_offset: tuple[int, int]) -> Source: - return Source("", "", cast(SourceLocation, location_offset)) + return Source("", "", cast("SourceLocation", location_offset)) with pytest.raises(TypeError): create_source(None) # type: ignore diff --git a/tests/language/test_visitor.py b/tests/language/test_visitor.py index 1e74c6ff..f3fdb370 100644 --- a/tests/language/test_visitor.py +++ b/tests/language/test_visitor.py @@ -5,6 +5,7 @@ from typing import Any, cast import pytest + from graphql.language import ( BREAK, REMOVE, @@ -580,7 +581,9 @@ class CustomFieldNode(SelectionNode): name: NameNode selection_set: SelectionSetNode | None - custom_selection_set = cast(FieldNode, custom_ast.definitions[0]).selection_set + custom_selection_set = cast( + "FieldNode", custom_ast.definitions[0] + ).selection_set assert custom_selection_set is not None custom_selection_set.selections = ( *custom_selection_set.selections, diff --git a/tests/pyutils/test_async_reduce.py b/tests/pyutils/test_async_reduce.py index cbcef554..0ac606c8 100644 --- a/tests/pyutils/test_async_reduce.py +++ b/tests/pyutils/test_async_reduce.py @@ -1,6 +1,7 @@ from functools import reduce import pytest + from graphql.pyutils import async_reduce, is_awaitable @@ -16,7 +17,7 @@ def callback(accumulator, current_value): assert result == 42 assert result == reduce(callback, values, initial_value) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def works_with_sync_values_and_sync_initial_value(): def callback(accumulator, current_value): return accumulator + "-" + current_value @@ -26,7 +27,7 @@ def callback(accumulator, current_value): assert not is_awaitable(result) assert result == "foo-bar-baz" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def works_with_async_initial_value(): async def async_initial_value(): return "foo" @@ -39,7 +40,7 @@ def callback(accumulator, current_value): assert is_awaitable(result) assert await result == "foo-bar-baz" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def works_with_async_callback(): async def async_callback(accumulator, current_value): return accumulator + "-" + current_value @@ -49,7 +50,7 @@ async def async_callback(accumulator, current_value): assert is_awaitable(result) assert await result == "foo-bar-baz" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def works_with_async_callback_and_async_initial_value(): async def async_initial_value(): return 1 / 8 diff --git a/tests/pyutils/test_description.py b/tests/pyutils/test_description.py index 57edff39..781ab14e 100644 --- a/tests/pyutils/test_description.py +++ b/tests/pyutils/test_description.py @@ -2,6 +2,7 @@ from typing import cast import pytest + from graphql import graphql_sync from graphql.pyutils import ( Description, @@ -33,7 +34,7 @@ def __str__(self) -> str: return str(self.text) -lazy_string = cast(str, LazyString("Why am I so lazy?")) +lazy_string = cast("str", LazyString("Why am I so lazy?")) @contextmanager @@ -42,7 +43,7 @@ def registered(base: type): try: yield None finally: - unregister_description(LazyString) + unregister_description(base) def describe_description(): @@ -185,8 +186,8 @@ def __str__(self) -> str: with registered(Lazy): field = GraphQLField( GraphQLString, - description=cast(str, description), - deprecation_reason=cast(str, deprecation_reason), + description=cast("str", description), + deprecation_reason=cast("str", deprecation_reason), ) schema = GraphQLSchema(GraphQLObjectType("Query", {"lazyField": field})) @@ -221,8 +222,8 @@ def __str__(self) -> str: with registered(Lazy): field = GraphQLField( GraphQLString, - description=cast(str, description), - deprecation_reason=cast(str, deprecation_reason), + description=cast("str", description), + deprecation_reason=cast("str", deprecation_reason), ) schema = GraphQLSchema(GraphQLObjectType("Query", {"lazyField": field})) diff --git a/tests/pyutils/test_format_list.py b/tests/pyutils/test_format_list.py index ee425eca..09567645 100644 --- a/tests/pyutils/test_format_list.py +++ b/tests/pyutils/test_format_list.py @@ -1,4 +1,5 @@ import pytest + from graphql.pyutils import and_list, or_list diff --git a/tests/pyutils/test_inspect.py b/tests/pyutils/test_inspect.py index 3721d018..94c62b48 100644 --- a/tests/pyutils/test_inspect.py +++ b/tests/pyutils/test_inspect.py @@ -6,6 +6,7 @@ from typing import Any import pytest + from graphql.pyutils import Undefined, inspect from graphql.type import ( GraphQLDirective, @@ -138,7 +139,7 @@ def test_generator(): assert inspect(test_generator) == "" assert inspect(test_generator()) == "" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def inspect_coroutine(): async def test_coroutine(): pass diff --git a/tests/pyutils/test_is_awaitable.py b/tests/pyutils/test_is_awaitable.py index dcee07d9..b05f01af 100644 --- a/tests/pyutils/test_is_awaitable.py +++ b/tests/pyutils/test_is_awaitable.py @@ -3,6 +3,7 @@ from sys import version_info as python_version import pytest + from graphql.pyutils import is_awaitable @@ -66,7 +67,7 @@ async def some_async_function(): assert not isawaitable(some_async_function) assert not is_awaitable(some_async_function) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def recognizes_a_coroutine_object(): async def some_async_function(): return True @@ -92,7 +93,7 @@ def some_function(): assert is_awaitable(some_old_style_coroutine) assert is_awaitable(some_old_style_coroutine) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def recognizes_a_future_object(): async def some_async_function(): return True @@ -105,7 +106,7 @@ async def some_async_function(): assert await some_future is True - @pytest.mark.asyncio() + @pytest.mark.asyncio async def declines_an_async_generator(): async def some_async_generator_function(): yield True diff --git a/tests/pyutils/test_ref_map.py b/tests/pyutils/test_ref_map.py new file mode 100644 index 00000000..96e15c58 --- /dev/null +++ b/tests/pyutils/test_ref_map.py @@ -0,0 +1,124 @@ +import pytest + +from graphql.pyutils import RefMap + +obj1 = {"a": 1, "b": 2, "c": 3} +obj2 = obj1.copy() +obj3 = obj1.copy() +obj4 = obj1.copy() + + +def describe_object_map(): + def can_create_an_empty_map(): + m = RefMap[str, int]() + assert not m + assert len(m) == 0 + assert list(m) == [] + assert list(m.keys()) == [] + assert list(m.values()) == [] + assert list(m.items()) == [] + + def can_create_a_map_with_scalar_keys_and_values(): + m = RefMap[str, int](list(obj1.items())) + assert m + assert len(m) == 3 + assert list(m) == ["a", "b", "c"] + assert list(m.keys()) == ["a", "b", "c"] + assert list(m.values()) == [1, 2, 3] + assert list(m.items()) == [("a", 1), ("b", 2), ("c", 3)] + for k, v in m.items(): + assert k in m + assert m[k] == v + assert m.get(k) == v + assert v not in m + with pytest.raises(KeyError): + m[v] # type: ignore + assert m.get(v) is None + + def can_create_a_map_with_one_object_as_key(): + m = RefMap[dict, int]([(obj1, 1)]) + assert m + assert len(m) == 1 + assert list(m) == [obj1] + assert list(m.keys()) == [obj1] + assert list(m.values()) == [1] + assert list(m.items()) == [(obj1, 1)] + assert obj1 in m + assert 1 not in m + assert obj2 not in m + assert m[obj1] == 1 + assert m.get(obj1) == 1 + with pytest.raises(KeyError): + m[1] # type: ignore + assert m.get(1) is None + with pytest.raises(KeyError): + m[obj2] + assert m.get(obj2) is None + + def can_create_a_map_with_three_objects_as_keys(): + m = RefMap[dict, int]([(obj1, 1), (obj2, 2), (obj3, 3)]) + assert m + assert len(m) == 3 + assert list(m) == [obj1, obj2, obj3] + assert list(m.keys()) == [obj1, obj2, obj3] + assert list(m.values()) == [1, 2, 3] + assert list(m.items()) == [(obj1, 1), (obj2, 2), (obj3, 3)] + for k, v in m.items(): + assert k in m + assert m[k] == v + assert m.get(k) == v + assert v not in m + with pytest.raises(KeyError): + m[v] # type: ignore + assert m.get(v) is None + assert obj4 not in m + with pytest.raises(KeyError): + m[obj4] + assert m.get(obj4) is None + + def can_set_a_key_that_is_an_object(): + m = RefMap[dict, int]() + m[obj1] = 1 + assert m[obj1] == 1 + assert list(m) == [obj1] + with pytest.raises(KeyError): + m[obj2] + m[obj2] = 2 + assert m[obj1] == 1 + assert m[obj2] == 2 + assert list(m) == [obj1, obj2] + m[obj2] = 3 + assert m[obj1] == 1 + assert m[obj2] == 3 + assert list(m) == [obj1, obj2] + assert len(m) == 2 + + def can_delete_a_key_that_is_an_object(): + m = RefMap[dict, int]([(obj1, 1), (obj2, 2), (obj3, 3)]) + del m[obj2] + assert obj2 not in m + assert list(m) == [obj1, obj3] + with pytest.raises(KeyError): + del m[obj2] + assert list(m) == [obj1, obj3] + assert len(m) == 2 + + def can_update_a_map(): + m = RefMap[dict, int]([(obj1, 1), (obj2, 2)]) + m.update([]) + assert list(m.keys()) == [obj1, obj2] + assert len(m) == 2 + m.update([(obj2, 3), (obj3, 4)]) + assert list(m.keys()) == [obj1, obj2, obj3] + assert list(m.values()) == [1, 3, 4] + assert list(m.items()) == [(obj1, 1), (obj2, 3), (obj3, 4)] + assert obj3 in m + assert m[obj2] == 3 + assert m[obj3] == 4 + assert len(m) == 3 + + def can_get_the_representation_of_a_ref_map(): + m = RefMap[dict, int]([(obj1, 1), (obj2, 2)]) + assert repr(m) == ( + "RefMap([({'a': 1, 'b': 2, 'c': 3}, 1), ({'a': 1, 'b': 2, 'c': 3}, 2)])" + ) diff --git a/tests/pyutils/test_ref_set.py b/tests/pyutils/test_ref_set.py new file mode 100644 index 00000000..fead877b --- /dev/null +++ b/tests/pyutils/test_ref_set.py @@ -0,0 +1,89 @@ +import pytest + +from graphql.pyutils import RefSet + +obj1 = ["a", "b", "c"] +obj2 = obj1.copy() +obj3 = obj1.copy() +obj4 = obj1.copy() + + +def describe_object_set(): + def can_create_an_empty_set(): + s = RefSet[int]() + assert not s + assert len(s) == 0 + assert list(s) == [] + + def can_create_a_set_with_scalar_values(): + s = RefSet[str](obj1) + assert s + assert len(s) == 3 + assert list(s) == ["a", "b", "c"] + for v in s: + assert v in s + + def can_create_a_set_with_one_object_as_value(): + s = RefSet[list]([obj1]) + assert s + assert len(s) == 1 + assert obj1 in s + assert obj2 not in s + + def can_create_a_set_with_three_objects_as_keys(): + s = RefSet[list]([obj1, obj2, obj3]) + assert s + assert len(s) == 3 + assert list(s) == [obj1, obj2, obj3] + for v in s: + assert v in s + assert obj4 not in s + + def can_add_a_value_that_is_an_object(): + s = RefSet[list]() + s.add(obj1) + assert obj1 in s + assert list(s) == [obj1] + assert obj2 not in s + s.add(obj2) + assert obj1 in s + assert obj2 in s + assert list(s) == [obj1, obj2] + s.add(obj2) + assert obj1 in s + assert obj2 in s + assert list(s) == [obj1, obj2] + assert len(s) == 2 + + def can_remove_a_value_that_is_an_object(): + s = RefSet[list]([obj1, obj2, obj3]) + s.remove(obj2) + assert obj2 not in s + assert list(s) == [obj1, obj3] + with pytest.raises(KeyError): + s.remove(obj2) + assert list(s) == [obj1, obj3] + assert len(s) == 2 + + def can_discard_a_value_that_is_an_object(): + s = RefSet[list]([obj1, obj2, obj3]) + s.discard(obj2) + assert obj2 not in s + assert list(s) == [obj1, obj3] + s.discard(obj2) + assert list(s) == [obj1, obj3] + assert len(s) == 2 + + def can_update_a_set(): + s = RefSet[list]([obj1, obj2]) + s.update([]) + assert list(s) == [obj1, obj2] + assert len(s) == 2 + s.update([obj2, obj3]) + assert list(s) == [obj1, obj2, obj3] + assert obj3 in s + assert len(s) == 3 + + def can_get_the_representation_of_a_ref_set(): + s = RefSet[list]([obj1, obj2]) + assert repr(s) == ("RefSet([['a', 'b', 'c'], ['a', 'b', 'c']])") diff --git a/tests/pyutils/test_simple_pub_sub.py b/tests/pyutils/test_simple_pub_sub.py index 2f30a8e2..f0a88dcb 100644 --- a/tests/pyutils/test_simple_pub_sub.py +++ b/tests/pyutils/test_simple_pub_sub.py @@ -1,11 +1,12 @@ from asyncio import sleep import pytest + from graphql.pyutils import SimplePubSub, is_awaitable def describe_simple_pub_sub(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscribe_async_iterator_mock(): pubsub = SimplePubSub() iterator = pubsub.get_subscriber() @@ -49,7 +50,7 @@ async def subscribe_async_iterator_mock(): with pytest.raises(StopAsyncIteration): await iterator.__anext__() - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterator_aclose_empties_push_queue(): pubsub = SimplePubSub() assert not pubsub.subscribers @@ -67,7 +68,7 @@ async def iterator_aclose_empties_push_queue(): assert iterator.pull_queue.qsize() == 0 assert not iterator.listening - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterator_aclose_empties_pull_queue(): pubsub = SimplePubSub() assert not pubsub.subscribers @@ -84,7 +85,7 @@ async def iterator_aclose_empties_pull_queue(): assert iterator.pull_queue.qsize() == 0 assert not iterator.listening - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterator_aclose_is_idempotent(): pubsub = SimplePubSub() iterator = pubsub.get_subscriber() diff --git a/tests/pyutils/test_undefined.py b/tests/pyutils/test_undefined.py index b6f62eea..b34611e3 100644 --- a/tests/pyutils/test_undefined.py +++ b/tests/pyutils/test_undefined.py @@ -1,6 +1,7 @@ import pickle import pytest + from graphql.pyutils import Undefined, UndefinedType diff --git a/tests/star_wars_schema.py b/tests/star_wars_schema.py index 3f8713ab..5f4c0809 100644 --- a/tests/star_wars_schema.py +++ b/tests/star_wars_schema.py @@ -54,7 +54,6 @@ GraphQLSchema, GraphQLString, ) - from tests.star_wars_data import ( get_droid, get_friends, @@ -141,8 +140,7 @@ "name": GraphQLField(GraphQLString, description="The name of the human."), "friends": GraphQLField( GraphQLList(character_interface), - description="The friends of the human," - " or an empty list if they have none.", + description="The friends of the human, or an empty list if they have none.", resolve=lambda human, _info: get_friends(human), ), "appearsIn": GraphQLField( @@ -183,8 +181,7 @@ "name": GraphQLField(GraphQLString, description="The name of the droid."), "friends": GraphQLField( GraphQLList(character_interface), - description="The friends of the droid," - " or an empty list if they have none.", + description="The friends of the droid, or an empty list if they have none.", resolve=lambda droid, _info: get_friends(droid), ), "appearsIn": GraphQLField( @@ -239,7 +236,7 @@ GraphQLNonNull(GraphQLString), description="id of the human" ) }, - resolve=lambda _source, _info, id: get_human(id), + resolve=lambda _source, _info, id: get_human(id), # noqa: A006 ), "droid": GraphQLField( droid_type, @@ -248,7 +245,7 @@ GraphQLNonNull(GraphQLString), description="id of the droid" ) }, - resolve=lambda _source, _info, id: get_droid(id), + resolve=lambda _source, _info, id: get_droid(id), # noqa: A006 ), }, ) diff --git a/tests/test_star_wars_query.py b/tests/test_star_wars_query.py index 6e5bbf59..bb1008b8 100644 --- a/tests/test_star_wars_query.py +++ b/tests/test_star_wars_query.py @@ -1,4 +1,5 @@ import pytest + from graphql import graphql, graphql_sync from .star_wars_schema import star_wars_schema as schema @@ -6,7 +7,7 @@ def describe_star_wars_query_tests(): def describe_basic_queries(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correctly_identifies_r2_d2_as_hero_of_the_star_wars_saga(): source = """ query HeroNameQuery { @@ -18,7 +19,7 @@ async def correctly_identifies_r2_d2_as_hero_of_the_star_wars_saga(): result = await graphql(schema=schema, source=source) assert result == ({"hero": {"name": "R2-D2"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_positional_arguments_to_graphql(): source = """ query HeroNameQuery { @@ -33,7 +34,7 @@ async def accepts_positional_arguments_to_graphql(): sync_result = graphql_sync(schema, source) assert sync_result == result - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_for_the_id_and_friends_of_r2_d2(): source = """ query HeroNameAndFriendsQuery { @@ -63,7 +64,7 @@ async def allows_us_to_query_for_the_id_and_friends_of_r2_d2(): ) def describe_nested_queries(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_for_the_friends_of_friends_of_r2_d2(): source = """ query NestedQuery { @@ -121,7 +122,7 @@ async def allows_us_to_query_for_the_friends_of_friends_of_r2_d2(): ) def describe_using_ids_and_query_parameters_to_refetch_objects(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_for_r2_d2_directly_using_his_id(): source = """ query { @@ -133,7 +134,7 @@ async def allows_us_to_query_for_r2_d2_directly_using_his_id(): result = await graphql(schema=schema, source=source) assert result == ({"droid": {"name": "R2-D2"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_characters_directly_using_their_id(): source = """ query FetchLukeAndC3POQuery { @@ -151,7 +152,7 @@ async def allows_us_to_query_characters_directly_using_their_id(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_creating_a_generic_query_to_fetch_luke_using_his_id(): source = """ query FetchSomeIDQuery($someId: String!) { @@ -166,7 +167,7 @@ async def allows_creating_a_generic_query_to_fetch_luke_using_his_id(): ) assert result == ({"human": {"name": "Luke Skywalker"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_creating_a_generic_query_to_fetch_han_using_his_id(): source = """ query FetchSomeIDQuery($someId: String!) { @@ -181,7 +182,7 @@ async def allows_creating_a_generic_query_to_fetch_han_using_his_id(): ) assert result == ({"human": {"name": "Han Solo"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def generic_query_that_gets_null_back_when_passed_invalid_id(): source = """ query humanQuery($id: String!) { @@ -197,7 +198,7 @@ async def generic_query_that_gets_null_back_when_passed_invalid_id(): assert result == ({"human": None}, None) def describe_using_aliases_to_change_the_key_in_the_response(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_for_luke_changing_his_key_with_an_alias(): source = """ query FetchLukeAliased { @@ -209,7 +210,7 @@ async def allows_us_to_query_for_luke_changing_his_key_with_an_alias(): result = await graphql(schema=schema, source=source) assert result == ({"luke": {"name": "Luke Skywalker"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def query_for_luke_and_leia_using_two_root_fields_and_an_alias(): source = """ query FetchLukeAndLeiaAliased { @@ -228,7 +229,7 @@ async def query_for_luke_and_leia_using_two_root_fields_and_an_alias(): ) def describe_uses_fragments_to_express_more_complex_queries(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_using_duplicated_content(): source = """ query DuplicateFields { @@ -251,7 +252,7 @@ async def allows_us_to_query_using_duplicated_content(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_use_a_fragment_to_avoid_duplicating_content(): source = """ query UseFragment { @@ -277,7 +278,7 @@ async def allows_us_to_use_a_fragment_to_avoid_duplicating_content(): ) def describe_using_typename_to_find_the_type_of_an_object(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_verify_that_r2_d2_is_a_droid(): source = """ query CheckTypeOfR2 { @@ -290,7 +291,7 @@ async def allows_us_to_verify_that_r2_d2_is_a_droid(): result = await graphql(schema=schema, source=source) assert result == ({"hero": {"__typename": "Droid", "name": "R2-D2"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_verify_that_luke_is_a_human(): source = """ query CheckTypeOfLuke { @@ -307,7 +308,7 @@ async def allows_us_to_verify_that_luke_is_a_human(): ) def describe_reporting_errors_raised_in_resolvers(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correctly_reports_error_on_accessing_secret_backstory(): source = """ query HeroNameQuery { @@ -329,7 +330,7 @@ async def correctly_reports_error_on_accessing_secret_backstory(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correctly_reports_error_on_accessing_backstory_in_a_list(): source = """ query HeroNameQuery { @@ -373,7 +374,7 @@ async def correctly_reports_error_on_accessing_backstory_in_a_list(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correctly_reports_error_on_accessing_through_an_alias(): source = """ query HeroNameQuery { diff --git a/tests/test_user_registry.py b/tests/test_user_registry.py index 7d134a52..0cb2b5b9 100644 --- a/tests/test_user_registry.py +++ b/tests/test_user_registry.py @@ -12,6 +12,7 @@ from typing import Any, AsyncIterable, NamedTuple import pytest + from graphql import ( GraphQLArgument, GraphQLBoolean, @@ -212,13 +213,13 @@ async def resolve_subscription_user(event, info, id): # noqa: ARG001, A002 ) -@pytest.fixture() +@pytest.fixture def context(): return {"registry": UserRegistry()} def describe_query(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def query_user(context): user = await context["registry"].create( firstName="John", lastName="Doe", tweets=42, verified=True @@ -250,7 +251,7 @@ async def query_user(context): def describe_mutation(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def create_user(context): received = {} @@ -261,7 +262,7 @@ def receive(msg): return receive # noinspection PyProtectedMember - pubsub = context["registry"]._pubsub # noqa: SLF001s + pubsub = context["registry"]._pubsub # noqa: SLF001 pubsub[None].subscribers.add(subscriber("User")) pubsub["0"].subscribers.add(subscriber("User 0")) @@ -302,7 +303,7 @@ def receive(msg): "User 0": {"user": user, "mutation": MutationEnum.CREATED.value}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def update_user(context): received = {} @@ -358,7 +359,7 @@ def receive(msg): "User 0": {"user": user, "mutation": MutationEnum.UPDATED.value}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def delete_user(context): received = {} @@ -400,7 +401,7 @@ def receive(msg): def describe_subscription(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscribe_to_user_mutations(context): query = """ subscription ($userId: ID!) { diff --git a/tests/type/test_assert_name.py b/tests/type/test_assert_name.py index 55ef75c7..24ffc55d 100644 --- a/tests/type/test_assert_name.py +++ b/tests/type/test_assert_name.py @@ -1,4 +1,5 @@ import pytest + from graphql.error import GraphQLError from graphql.type import assert_enum_value_name, assert_name diff --git a/tests/type/test_definition.py b/tests/type/test_definition.py index 88ce94f7..ac7830ef 100644 --- a/tests/type/test_definition.py +++ b/tests/type/test_definition.py @@ -12,6 +12,7 @@ from typing_extensions import TypedDict import pytest + from graphql.error import GraphQLError from graphql.language import ( EnumTypeDefinitionNode, @@ -197,8 +198,7 @@ def parse_literal(_node: ValueNode, _vars=None): with pytest.raises(TypeError) as exc_info: GraphQLScalarType("SomeScalar", parse_literal=parse_literal) assert str(exc_info.value) == ( - "SomeScalar must provide both" - " 'parse_value' and 'parse_literal' functions." + "SomeScalar must provide both 'parse_value' and 'parse_literal' functions." ) def pickles_a_custom_scalar_type(): diff --git a/tests/type/test_directives.py b/tests/type/test_directives.py index 3f29a947..4257d81f 100644 --- a/tests/type/test_directives.py +++ b/tests/type/test_directives.py @@ -1,4 +1,5 @@ import pytest + from graphql.error import GraphQLError from graphql.language import DirectiveDefinitionNode, DirectiveLocation from graphql.type import GraphQLArgument, GraphQLDirective, GraphQLInt, GraphQLString diff --git a/tests/type/test_extensions.py b/tests/type/test_extensions.py index 5aa087e2..d28b9482 100644 --- a/tests/type/test_extensions.py +++ b/tests/type/test_extensions.py @@ -1,4 +1,5 @@ import pytest + from graphql.type import ( GraphQLArgument, GraphQLDirective, diff --git a/tests/type/test_introspection.py b/tests/type/test_introspection.py index 09a21c31..1a52f7a2 100644 --- a/tests/type/test_introspection.py +++ b/tests/type/test_introspection.py @@ -364,6 +364,17 @@ def executes_an_introspection_query(): "isDeprecated": False, "deprecationReason": None, }, + { + "name": "isOneOf", + "args": [], + "type": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": None, + }, + "isDeprecated": False, + "deprecationReason": None, + }, ], "inputFields": None, "interfaces": [], @@ -981,6 +992,12 @@ def executes_an_introspection_query(): } ], }, + { + "name": "oneOf", + "isRepeatable": False, + "locations": ["INPUT_OBJECT"], + "args": [], + }, ], } } @@ -1433,6 +1450,109 @@ def respects_the_include_deprecated_parameter_for_enum_values(): None, ) + def identifies_one_of_for_input_objects(): + schema = build_schema( + """ + input SomeInputObject @oneOf { + a: String + } + + input AnotherInputObject { + a: String + b: String + } + + type Query { + someField(someArg: SomeInputObject): String + anotherField(anotherArg: AnotherInputObject): String + } + """ + ) + + source = """ + { + oneOfInputObject: __type(name: "SomeInputObject") { + isOneOf + } + inputObject: __type(name: "AnotherInputObject") { + isOneOf + } + } + """ + + assert graphql_sync(schema=schema, source=source) == ( + { + "oneOfInputObject": { + "isOneOf": True, + }, + "inputObject": { + "isOneOf": False, + }, + }, + None, + ) + + def returns_null_for_one_of_for_other_types(): + schema = build_schema( + """ + type SomeObject implements SomeInterface { + fieldA: String + } + enum SomeEnum { + SomeObject + } + interface SomeInterface { + fieldA: String + } + union SomeUnion = SomeObject + type Query { + someField(enum: SomeEnum): SomeUnion + anotherField(enum: SomeEnum): SomeInterface + } + """ + ) + + source = """ + { + object: __type(name: "SomeObject") { + isOneOf + } + enum: __type(name: "SomeEnum") { + isOneOf + } + interface: __type(name: "SomeInterface") { + isOneOf + } + scalar: __type(name: "String") { + isOneOf + } + union: __type(name: "SomeUnion") { + isOneOf + } + } + """ + + assert graphql_sync(schema=schema, source=source) == ( + { + "object": { + "isOneOf": None, + }, + "enum": { + "isOneOf": None, + }, + "interface": { + "isOneOf": None, + }, + "scalar": { + "isOneOf": None, + }, + "union": { + "isOneOf": None, + }, + }, + None, + ) + def fails_as_expected_on_the_type_root_field_without_an_arg(): schema = build_schema( """ diff --git a/tests/type/test_predicate.py b/tests/type/test_predicate.py index bd006e74..c741eca3 100644 --- a/tests/type/test_predicate.py +++ b/tests/type/test_predicate.py @@ -1,6 +1,7 @@ from typing import Any import pytest + from graphql.language import DirectiveLocation from graphql.type import ( GraphQLArgument, diff --git a/tests/type/test_scalars.py b/tests/type/test_scalars.py index 27255388..0ef5e548 100644 --- a/tests/type/test_scalars.py +++ b/tests/type/test_scalars.py @@ -3,6 +3,7 @@ from typing import Any import pytest + from graphql.error import GraphQLError from graphql.language import parse_value as parse_value_to_ast from graphql.pyutils import Undefined diff --git a/tests/type/test_schema.py b/tests/type/test_schema.py index f589302b..e678de35 100644 --- a/tests/type/test_schema.py +++ b/tests/type/test_schema.py @@ -1,6 +1,7 @@ from copy import deepcopy import pytest + from graphql.language import ( DirectiveLocation, SchemaDefinitionNode, diff --git a/tests/type/test_validation.py b/tests/type/test_validation.py index eb4e2ab7..a4efe041 100644 --- a/tests/type/test_validation.py +++ b/tests/type/test_validation.py @@ -3,6 +3,7 @@ from operator import attrgetter import pytest + from graphql.language import DirectiveLocation, parse from graphql.pyutils import inspect from graphql.type import ( @@ -241,8 +242,7 @@ def rejects_a_schema_whose_query_root_type_is_not_an_object_type(): ) assert validate_schema(schema) == [ { - "message": "Query root type must be Object type," - " it cannot be Query.", + "message": "Query root type must be Object type, it cannot be Query.", "locations": [(2, 13)], } ] @@ -1593,6 +1593,49 @@ def rejects_with_relevant_locations_for_a_non_input_type(): ] +def describe_type_system_one_of_input_object_fields_must_be_nullable(): + def rejects_non_nullable_fields(): + schema = build_schema( + """ + type Query { + test(arg: SomeInputObject): String + } + + input SomeInputObject @oneOf { + a: String + b: String! + } + """ + ) + assert validate_schema(schema) == [ + { + "message": "OneOf input field SomeInputObject.b must be nullable.", + "locations": [(8, 18)], + } + ] + + def rejects_fields_with_default_values(): + schema = build_schema( + """ + type Query { + test(arg: SomeInputObject): String + } + + input SomeInputObject @oneOf { + a: String + b: String = "foo" + } + """ + ) + assert validate_schema(schema) == [ + { + "message": "OneOf input field SomeInputObject.b" + " cannot have a default value.", + "locations": [(8, 15)], + } + ] + + def describe_objects_must_adhere_to_interfaces_they_implement(): def accepts_an_object_which_implements_an_interface(): schema = build_schema( diff --git a/tests/utilities/test_ast_from_value.py b/tests/utilities/test_ast_from_value.py index 1432d7a4..947f2b18 100644 --- a/tests/utilities/test_ast_from_value.py +++ b/tests/utilities/test_ast_from_value.py @@ -1,6 +1,7 @@ from math import inf, nan import pytest + from graphql.error import GraphQLError from graphql.language import ( BooleanValueNode, diff --git a/tests/utilities/test_build_ast_schema.py b/tests/utilities/test_build_ast_schema.py index a0aefb1a..d0196bd7 100644 --- a/tests/utilities/test_build_ast_schema.py +++ b/tests/utilities/test_build_ast_schema.py @@ -7,6 +7,7 @@ from typing import Union import pytest + from graphql import graphql_sync from graphql.language import DocumentNode, InterfaceTypeDefinitionNode, parse, print_ast from graphql.type import ( @@ -22,6 +23,7 @@ GraphQLInputField, GraphQLInt, GraphQLNamedType, + GraphQLOneOfDirective, GraphQLSchema, GraphQLSkipDirective, GraphQLSpecifiedByDirective, @@ -237,14 +239,15 @@ def supports_descriptions(): ) assert cycle_sdl(sdl) == sdl - def maintains_include_skip_and_specified_by_url_directives(): + def maintains_include_skip_and_three_other_directives(): schema = build_schema("type Query") - assert len(schema.directives) == 4 + assert len(schema.directives) == 5 assert schema.get_directive("skip") is GraphQLSkipDirective assert schema.get_directive("include") is GraphQLIncludeDirective assert schema.get_directive("deprecated") is GraphQLDeprecatedDirective assert schema.get_directive("specifiedBy") is GraphQLSpecifiedByDirective + assert schema.get_directive("oneOf") is GraphQLOneOfDirective def overriding_directives_excludes_specified(): schema = build_schema( @@ -253,10 +256,11 @@ def overriding_directives_excludes_specified(): directive @include on FIELD directive @deprecated on FIELD_DEFINITION directive @specifiedBy on FIELD_DEFINITION + directive @oneOf on OBJECT """ ) - assert len(schema.directives) == 4 + assert len(schema.directives) == 5 get_directive = schema.get_directive assert get_directive("skip") is not GraphQLSkipDirective assert get_directive("skip") is not None @@ -266,19 +270,22 @@ def overriding_directives_excludes_specified(): assert get_directive("deprecated") is not None assert get_directive("specifiedBy") is not GraphQLSpecifiedByDirective assert get_directive("specifiedBy") is not None + assert get_directive("oneOf") is not GraphQLOneOfDirective + assert get_directive("oneOf") is not None - def adding_directives_maintains_include_skip_and_specified_by_directives(): + def adding_directives_maintains_include_skip_and_three_other_directives(): schema = build_schema( """ directive @foo(arg: Int) on FIELD """ ) - assert len(schema.directives) == 5 + assert len(schema.directives) == 6 assert schema.get_directive("skip") is GraphQLSkipDirective assert schema.get_directive("include") is GraphQLIncludeDirective assert schema.get_directive("deprecated") is GraphQLDeprecatedDirective assert schema.get_directive("specifiedBy") is GraphQLSpecifiedByDirective + assert schema.get_directive("oneOf") is GraphQLOneOfDirective assert schema.get_directive("foo") is not None def type_modifiers(): @@ -1133,7 +1140,7 @@ def can_build_invalid_schema(): assert errors def do_not_override_standard_types(): - # Note: not sure it's desired behaviour to just silently ignore override + # Note: not sure it's desired behavior to just silently ignore override # attempts so just documenting it here. schema = build_schema( @@ -1215,6 +1222,25 @@ def can_deep_copy_schema(): # check that printing the copied schema gives the same SDL assert print_schema(copied) == sdl + def can_deep_copy_schema_with_directive_using_args_of_custom_type(): + sdl = dedent(""" + directive @someDirective(someArg: SomeEnum) on FIELD_DEFINITION + + enum SomeEnum { + ONE + TWO + } + + type Query { + someField: String @someDirective(someArg: ONE) + } + """) + schema = build_schema(sdl) + copied = deepcopy(schema) + # custom directives on field definitions cannot be reproduced + expected_sdl = sdl.replace(" @someDirective(someArg: ONE)", "") + assert print_schema(copied) == expected_sdl + def can_pickle_and_unpickle_star_wars_schema(): # create a schema from the star wars SDL schema = build_schema(sdl, assume_valid_sdl=True) @@ -1246,7 +1272,7 @@ def can_deep_copy_pickled_schema(): # check that printing the copied schema gives the same SDL assert print_schema(copied) == sdl - @pytest.mark.slow() + @pytest.mark.slow def describe_deepcopy_and_pickle_big(): # pragma: no cover @pytest.mark.timeout(20) def can_deep_copy_big_schema(big_schema_sdl): # noqa: F811 diff --git a/tests/utilities/test_build_client_schema.py b/tests/utilities/test_build_client_schema.py index 518fb5bf..1455f473 100644 --- a/tests/utilities/test_build_client_schema.py +++ b/tests/utilities/test_build_client_schema.py @@ -1,6 +1,7 @@ -from typing import cast +from typing import TYPE_CHECKING, cast import pytest + from graphql import graphql_sync from graphql.type import ( GraphQLArgument, @@ -22,14 +23,16 @@ introspection_from_schema, print_schema, ) -from graphql.utilities.get_introspection_query import ( - IntrospectionEnumType, - IntrospectionInputObjectType, - IntrospectionInterfaceType, - IntrospectionObjectType, - IntrospectionType, - IntrospectionUnionType, -) + +if TYPE_CHECKING: + from graphql.utilities.get_introspection_query import ( + IntrospectionEnumType, + IntrospectionInputObjectType, + IntrospectionInterfaceType, + IntrospectionObjectType, + IntrospectionType, + IntrospectionUnionType, + ) from ..utils import dedent @@ -714,7 +717,9 @@ def throws_when_missing_definition_for_one_of_the_standard_scalars(): def throws_when_type_reference_is_missing_name(): introspection = introspection_from_schema(dummy_schema) - query_type = cast(IntrospectionType, introspection["__schema"]["queryType"]) + query_type = cast( + "IntrospectionType", introspection["__schema"]["queryType"] + ) assert query_type["name"] == "Query" del query_type["name"] # type: ignore @@ -744,7 +749,7 @@ def throws_when_missing_kind(): def throws_when_missing_interfaces(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -766,7 +771,7 @@ def throws_when_missing_interfaces(): def legacy_support_for_interfaces_with_null_as_interfaces_field(): introspection = introspection_from_schema(dummy_schema) some_interface_introspection = cast( - IntrospectionInterfaceType, + "IntrospectionInterfaceType", next( type_ for type_ in introspection["__schema"]["types"] @@ -783,7 +788,7 @@ def legacy_support_for_interfaces_with_null_as_interfaces_field(): def throws_when_missing_fields(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -805,7 +810,7 @@ def throws_when_missing_fields(): def throws_when_missing_field_args(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -827,7 +832,7 @@ def throws_when_missing_field_args(): def throws_when_output_type_is_used_as_an_arg_type(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -851,7 +856,7 @@ def throws_when_output_type_is_used_as_an_arg_type(): def throws_when_output_type_is_used_as_an_input_value_type(): introspection = introspection_from_schema(dummy_schema) input_object_type_introspection = cast( - IntrospectionInputObjectType, + "IntrospectionInputObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -875,7 +880,7 @@ def throws_when_output_type_is_used_as_an_input_value_type(): def throws_when_input_type_is_used_as_a_field_type(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -899,7 +904,7 @@ def throws_when_input_type_is_used_as_a_field_type(): def throws_when_missing_possible_types(): introspection = introspection_from_schema(dummy_schema) some_union_introspection = cast( - IntrospectionUnionType, + "IntrospectionUnionType", next( type_ for type_ in introspection["__schema"]["types"] @@ -920,7 +925,7 @@ def throws_when_missing_possible_types(): def throws_when_missing_enum_values(): introspection = introspection_from_schema(dummy_schema) some_enum_introspection = cast( - IntrospectionEnumType, + "IntrospectionEnumType", next( type_ for type_ in introspection["__schema"]["types"] @@ -941,7 +946,7 @@ def throws_when_missing_enum_values(): def throws_when_missing_input_fields(): introspection = introspection_from_schema(dummy_schema) some_input_object_introspection = cast( - IntrospectionInputObjectType, + "IntrospectionInputObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -990,11 +995,11 @@ def throws_when_missing_directive_args(): build_client_schema(introspection) def describe_very_deep_decorators_are_not_supported(): - def fails_on_very_deep_lists_more_than_7_levels(): + def fails_on_very_deep_lists_more_than_8_levels(): schema = build_schema( """ type Query { - foo: [[[[[[[[String]]]]]]]] + foo: [[[[[[[[[[String]]]]]]]]]] } """ ) @@ -1009,11 +1014,11 @@ def fails_on_very_deep_lists_more_than_7_levels(): " Decorated type deeper than introspection query." ) - def fails_on_a_very_deep_non_null_more_than_7_levels(): + def fails_on_a_very_deep_more_than_8_levels_non_null(): schema = build_schema( """ type Query { - foo: [[[[String!]!]!]!] + foo: [[[[[String!]!]!]!]!] } """ ) @@ -1028,12 +1033,12 @@ def fails_on_a_very_deep_non_null_more_than_7_levels(): " Decorated type deeper than introspection query." ) - def succeeds_on_deep_types_less_or_equal_7_levels(): - # e.g., fully non-null 3D matrix + def succeeds_on_deep_less_or_equal_8_levels_types(): + # e.g., fully non-null 4D matrix sdl = dedent( """ type Query { - foo: [[[String!]!]!]! + foo: [[[[String!]!]!]!]! } """ ) @@ -1054,7 +1059,7 @@ def recursive_interfaces(): schema = build_schema(sdl, assume_valid=True) introspection = introspection_from_schema(schema) foo_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] diff --git a/tests/utilities/test_coerce_input_value.py b/tests/utilities/test_coerce_input_value.py index 61b1feab..90af6cb9 100644 --- a/tests/utilities/test_coerce_input_value.py +++ b/tests/utilities/test_coerce_input_value.py @@ -4,6 +4,7 @@ from typing import Any, NamedTuple import pytest + from graphql.error import GraphQLError from graphql.pyutils import Undefined from graphql.type import ( @@ -250,6 +251,99 @@ def transforms_values_with_out_type(): result = _coerce_value({"real": 1, "imag": 2}, ComplexInputObject) assert expect_value(result) == 1 + 2j + def describe_for_graphql_input_object_that_is_one_of(): + TestInputObject = GraphQLInputObjectType( + "TestInputObject", + { + "foo": GraphQLInputField(GraphQLInt), + "bar": GraphQLInputField(GraphQLInt), + }, + is_one_of=True, + ) + + def returns_no_error_for_a_valid_input(): + result = _coerce_value({"foo": 123}, TestInputObject) + assert expect_value(result) == {"foo": 123} + + def returns_an_error_if_more_than_one_field_is_specified(): + result = _coerce_value({"foo": 123, "bar": None}, TestInputObject) + assert expect_errors(result) == [ + ( + "Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + [], + {"foo": 123, "bar": None}, + ) + ] + + def returns_an_error_if_the_one_field_is_null(): + result = _coerce_value({"bar": None}, TestInputObject) + assert expect_errors(result) == [ + ( + "Field 'bar' must be non-null.", + ["bar"], + None, + ) + ] + + def returns_an_error_for_an_invalid_field(): + result = _coerce_value({"foo": nan}, TestInputObject) + assert expect_errors(result) == [ + ( + "Int cannot represent non-integer value: nan", + ["foo"], + nan, + ) + ] + + def returns_multiple_errors_for_multiple_invalid_fields(): + result = _coerce_value({"foo": "abc", "bar": "def"}, TestInputObject) + assert expect_errors(result) == [ + ( + "Int cannot represent non-integer value: 'abc'", + ["foo"], + "abc", + ), + ( + "Int cannot represent non-integer value: 'def'", + ["bar"], + "def", + ), + ( + "Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + [], + {"foo": "abc", "bar": "def"}, + ), + ] + + def returns_an_error_for_an_unknown_field(): + result = _coerce_value({"foo": 123, "unknownField": 123}, TestInputObject) + assert expect_errors(result) == [ + ( + "Field 'unknownField' is not defined by type 'TestInputObject'.", + [], + {"foo": 123, "unknownField": 123}, + ) + ] + + def returns_an_error_for_a_misspelled_field(): + result = _coerce_value({"bart": 123}, TestInputObject) + assert expect_errors(result) == [ + ( + "Field 'bart' is not defined by type 'TestInputObject'." + " Did you mean 'bar'?", + [], + {"bart": 123}, + ), + ( + "Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + [], + {"bart": 123}, + ), + ] + def describe_for_graphql_input_object_with_default_value(): def _get_test_input_object(default_value): return GraphQLInputObjectType( diff --git a/tests/utilities/test_extend_schema.py b/tests/utilities/test_extend_schema.py index 75c70efd..1eb98d38 100644 --- a/tests/utilities/test_extend_schema.py +++ b/tests/utilities/test_extend_schema.py @@ -3,6 +3,7 @@ from typing import Union import pytest + from graphql import graphql_sync from graphql.language import parse, print_ast from graphql.type import ( @@ -1362,8 +1363,7 @@ def does_not_allow_replacing_a_default_directive(): with pytest.raises(TypeError) as exc_info: extend_schema(schema, extend_ast) assert str(exc_info.value).startswith( - "Directive '@include' already exists in the schema." - " It cannot be redefined." + "Directive '@include' already exists in the schema. It cannot be redefined." ) def does_not_allow_replacing_an_existing_enum_value(): diff --git a/tests/utilities/test_find_breaking_changes.py b/tests/utilities/test_find_breaking_changes.py index c9003a6c..bfcc7e72 100644 --- a/tests/utilities/test_find_breaking_changes.py +++ b/tests/utilities/test_find_breaking_changes.py @@ -1,6 +1,7 @@ from graphql.type import ( GraphQLDeprecatedDirective, GraphQLIncludeDirective, + GraphQLOneOfDirective, GraphQLSchema, GraphQLSkipDirective, GraphQLSpecifiedByDirective, @@ -754,8 +755,7 @@ def should_detect_all_breaking_changes(): ), ( BreakingChangeType.TYPE_CHANGED_KIND, - "TypeThatChangesType changed from an Object type to an" - " Interface type.", + "TypeThatChangesType changed from an Object type to an Interface type.", ), ( BreakingChangeType.FIELD_REMOVED, @@ -817,6 +817,7 @@ def should_detect_if_a_directive_was_implicitly_removed(): GraphQLSkipDirective, GraphQLIncludeDirective, GraphQLSpecifiedByDirective, + GraphQLOneOfDirective, ] ) diff --git a/tests/utilities/test_introspection_from_schema.py b/tests/utilities/test_introspection_from_schema.py index 895ade9a..1c9dbd52 100644 --- a/tests/utilities/test_introspection_from_schema.py +++ b/tests/utilities/test_introspection_from_schema.py @@ -3,6 +3,7 @@ from copy import deepcopy import pytest + from graphql.type import GraphQLField, GraphQLObjectType, GraphQLSchema, GraphQLString from graphql.utilities import ( IntrospectionQuery, @@ -105,7 +106,7 @@ def can_deep_copy_pickled_schema(): # check that introspecting the copied schema gives the same result assert introspection_from_schema(copied) == introspected_schema - @pytest.mark.slow() + @pytest.mark.slow def describe_deepcopy_and_pickle_big(): # pragma: no cover @pytest.mark.timeout(20) def can_deep_copy_big_schema(big_schema_sdl): # noqa: F811 diff --git a/tests/utilities/test_print_schema.py b/tests/utilities/test_print_schema.py index 1939ed59..ab997610 100644 --- a/tests/utilities/test_print_schema.py +++ b/tests/utilities/test_print_schema.py @@ -555,7 +555,7 @@ def prints_enum(): def prints_empty_types(): schema = GraphQLSchema( types=[ - GraphQLEnumType("SomeEnum", cast(Dict[str, Any], {})), + GraphQLEnumType("SomeEnum", cast("Dict[str, Any]", {})), GraphQLInputObjectType("SomeInputObject", {}), GraphQLInterfaceType("SomeInterface", {}), GraphQLObjectType("SomeObject", {}), @@ -765,12 +765,17 @@ def prints_introspection_schema(): reason: String = "No longer supported" ) on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE - """Exposes a URL that specifies the behaviour of this scalar.""" + """Exposes a URL that specifies the behavior of this scalar.""" directive @specifiedBy( - """The URL that specifies the behaviour of this scalar.""" + """The URL that specifies the behavior of this scalar.""" url: String! ) on SCALAR + """ + Indicates exactly one field must be supplied and this field must not be `null`. + """ + directive @oneOf on INPUT_OBJECT + """ A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations. """ @@ -813,6 +818,7 @@ def prints_introspection_schema(): enumValues(includeDeprecated: Boolean = false): [__EnumValue!] inputFields(includeDeprecated: Boolean = false): [__InputValue!] ofType: __Type + isOneOf: Boolean } """An enum describing what kind of type a given `__Type` is.""" diff --git a/tests/utilities/test_strip_ignored_characters.py b/tests/utilities/test_strip_ignored_characters.py index d708bfdb..cdc6062d 100644 --- a/tests/utilities/test_strip_ignored_characters.py +++ b/tests/utilities/test_strip_ignored_characters.py @@ -1,6 +1,7 @@ from __future__ import annotations import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import Lexer, Source, TokenKind, parse from graphql.utilities import strip_ignored_characters diff --git a/tests/utilities/test_strip_ignored_characters_fuzz.py b/tests/utilities/test_strip_ignored_characters_fuzz.py index 85c43aec..4c276e07 100644 --- a/tests/utilities/test_strip_ignored_characters_fuzz.py +++ b/tests/utilities/test_strip_ignored_characters_fuzz.py @@ -3,6 +3,7 @@ from json import dumps import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import Lexer, Source, TokenKind from graphql.utilities import strip_ignored_characters @@ -74,7 +75,7 @@ def lex_value(s: str) -> str | None: def describe_strip_ignored_characters(): - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_documents_with_random_combination_of_ignored_characters(): for ignored in ignored_tokens: @@ -85,7 +86,7 @@ def strips_documents_with_random_combination_of_ignored_characters(): ExpectStripped("".join(ignored_tokens)).to_equal("") - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_random_leading_and_trailing_ignored_tokens(): for token in punctuator_tokens + non_punctuator_tokens: @@ -100,7 +101,7 @@ def strips_random_leading_and_trailing_ignored_tokens(): ExpectStripped("".join(ignored_tokens) + token).to_equal(token) ExpectStripped(token + "".join(ignored_tokens)).to_equal(token) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_random_ignored_tokens_between_punctuator_tokens(): for left in punctuator_tokens: @@ -117,7 +118,7 @@ def strips_random_ignored_tokens_between_punctuator_tokens(): left + right ) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_random_ignored_tokens_between_punctuator_and_non_punctuator_tokens(): for non_punctuator in non_punctuator_tokens: @@ -136,7 +137,7 @@ def strips_random_ignored_tokens_between_punctuator_and_non_punctuator_tokens(): punctuator + "".join(ignored_tokens) + non_punctuator ).to_equal(punctuator + non_punctuator) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_random_ignored_tokens_between_non_punctuator_and_punctuator_tokens(): for non_punctuator in non_punctuator_tokens: @@ -159,7 +160,7 @@ def strips_random_ignored_tokens_between_non_punctuator_and_punctuator_tokens(): non_punctuator + "".join(ignored_tokens) + punctuator ).to_equal(non_punctuator + punctuator) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def replace_random_ignored_tokens_between_non_punctuator_and_spread_with_space(): for non_punctuator in non_punctuator_tokens: @@ -177,7 +178,7 @@ def replace_random_ignored_tokens_between_non_punctuator_and_spread_with_space() non_punctuator + " ..." ) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def replace_random_ignored_tokens_between_non_punctuator_tokens_with_space(): for left in non_punctuator_tokens: @@ -194,7 +195,7 @@ def replace_random_ignored_tokens_between_non_punctuator_tokens_with_space(): left + " " + right ) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def does_not_strip_random_ignored_tokens_embedded_in_the_string(): for ignored in ignored_tokens: @@ -205,7 +206,7 @@ def does_not_strip_random_ignored_tokens_embedded_in_the_string(): ExpectStripped(dumps("".join(ignored_tokens))).to_stay_the_same() - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def does_not_strip_random_ignored_tokens_embedded_in_the_block_string(): ignored_tokens_without_formatting = [ @@ -226,7 +227,7 @@ def does_not_strip_random_ignored_tokens_embedded_in_the_block_string(): '"""|' + "".join(ignored_tokens_without_formatting) + '|"""' ).to_stay_the_same() - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(80) def strips_ignored_characters_inside_random_block_strings(): # Testing with length >7 is taking exponentially more time. However it is diff --git a/tests/utilities/test_type_from_ast.py b/tests/utilities/test_type_from_ast.py index 282c8f50..fa75a9f9 100644 --- a/tests/utilities/test_type_from_ast.py +++ b/tests/utilities/test_type_from_ast.py @@ -1,4 +1,5 @@ import pytest + from graphql.language import TypeNode, parse_type from graphql.type import GraphQLList, GraphQLNonNull, GraphQLObjectType from graphql.utilities import type_from_ast diff --git a/tests/utilities/test_type_info.py b/tests/utilities/test_type_info.py index d23b878b..01f7e464 100644 --- a/tests/utilities/test_type_info.py +++ b/tests/utilities/test_type_info.py @@ -375,8 +375,7 @@ def leave(*args): assert print_ast(edited_ast) == print_ast( parse( - "{ human(id: 4) { name, pets { __typename } }," - " alien { __typename } }" + "{ human(id: 4) { name, pets { __typename } }, alien { __typename } }" ) ) diff --git a/tests/utilities/test_value_from_ast.py b/tests/utilities/test_value_from_ast.py index f21abcc2..6622b4dc 100644 --- a/tests/utilities/test_value_from_ast.py +++ b/tests/utilities/test_value_from_ast.py @@ -174,6 +174,15 @@ def coerces_non_null_lists_of_non_null_values(): }, ) + test_one_of_input_obj = GraphQLInputObjectType( + "TestOneOfInput", + { + "a": GraphQLInputField(GraphQLString), + "b": GraphQLInputField(GraphQLString), + }, + is_one_of=True, + ) + def coerces_input_objects_according_to_input_coercion_rules(): assert _value_from("null", test_input_obj) is None assert _value_from("[]", test_input_obj) is Undefined @@ -193,6 +202,14 @@ def coerces_input_objects_according_to_input_coercion_rules(): ) assert _value_from("{ requiredBool: null }", test_input_obj) is Undefined assert _value_from("{ bool: true }", test_input_obj) is Undefined + assert _value_from('{ a: "abc" }', test_one_of_input_obj) == {"a": "abc"} + assert _value_from('{ b: "def" }', test_one_of_input_obj) == {"b": "def"} + assert _value_from('{ a: "abc", b: None }', test_one_of_input_obj) is Undefined + assert _value_from("{ a: null }", test_one_of_input_obj) is Undefined + assert _value_from("{ a: 1 }", test_one_of_input_obj) is Undefined + assert _value_from('{ a: "abc", b: "def" }', test_one_of_input_obj) is Undefined + assert _value_from("{}", test_one_of_input_obj) is Undefined + assert _value_from('{ c: "abc" }', test_one_of_input_obj) is Undefined def accepts_variable_values_assuming_already_coerced(): assert _value_from("$var", GraphQLBoolean, {}) is Undefined diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py index 6ae4a6e5..ea374993 100644 --- a/tests/utils/__init__.py +++ b/tests/utils/__init__.py @@ -8,8 +8,8 @@ from .viral_sdl import viral_sdl __all__ = [ - "assert_matching_values", "assert_equal_awaitables_or_values", + "assert_matching_values", "dedent", "gen_fuzz_strings", "viral_schema", diff --git a/tests/utils/assert_equal_awaitables_or_values.py b/tests/utils/assert_equal_awaitables_or_values.py index 8ed8d175..964db1a8 100644 --- a/tests/utils/assert_equal_awaitables_or_values.py +++ b/tests/utils/assert_equal_awaitables_or_values.py @@ -15,7 +15,7 @@ def assert_equal_awaitables_or_values(*items: T) -> T: """Check whether the items are the same and either all awaitables or all values.""" if all(is_awaitable(item) for item in items): - awaitable_items = cast(Tuple[Awaitable], items) + awaitable_items = cast("Tuple[Awaitable]", items) async def assert_matching_awaitables(): return assert_matching_values(*(await asyncio.gather(*awaitable_items))) diff --git a/tests/utils/test_assert_equal_awaitables_or_values.py b/tests/utils/test_assert_equal_awaitables_or_values.py index 214acfea..3e60fbcb 100644 --- a/tests/utils/test_assert_equal_awaitables_or_values.py +++ b/tests/utils/test_assert_equal_awaitables_or_values.py @@ -15,7 +15,7 @@ def does_not_throw_when_given_equal_values(): == test_value ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def does_not_throw_when_given_equal_awaitables(): async def test_value(): return {"test": "test"} @@ -27,7 +27,7 @@ async def test_value(): == await test_value() ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws_when_given_unequal_awaitables(): async def test_value(value): return value @@ -37,7 +37,7 @@ async def test_value(value): test_value({}), test_value({}), test_value({"test": "test"}) ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws_when_given_mixture_of_equal_values_and_awaitables(): async def test_value(): return {"test": "test"} diff --git a/tests/validation/harness.py b/tests/validation/harness.py index 1189e922..737fb2df 100644 --- a/tests/validation/harness.py +++ b/tests/validation/harness.py @@ -12,9 +12,9 @@ from graphql.validation import ASTValidationRule __all__ = [ - "test_schema", - "assert_validation_errors", "assert_sdl_validation_errors", + "assert_validation_errors", + "test_schema", ] test_schema = build_schema( @@ -86,6 +86,11 @@ stringListField: [String] } + input OneOfInput @oneOf { + stringField: String + intField: Int + } + type ComplicatedArgs { # TODO List # TODO Coercion @@ -100,6 +105,7 @@ stringListArgField(stringListArg: [String]): String stringListNonNullArgField(stringListNonNullArg: [String!]): String complexArgField(complexArg: ComplexInput): String + oneOfArgField(oneOfArg: OneOfInput): String multipleReqs(req1: Int!, req2: Int!): String nonNullFieldWithDefault(arg: Int! = 0): String multipleOpts(opt1: Int = 0, opt2: Int = 0): String diff --git a/tests/validation/test_validation.py b/tests/validation/test_validation.py index 37d57e9b..78efbce9 100644 --- a/tests/validation/test_validation.py +++ b/tests/validation/test_validation.py @@ -1,4 +1,5 @@ import pytest + from graphql.error import GraphQLError from graphql.language import parse from graphql.utilities import TypeInfo, build_schema @@ -70,8 +71,7 @@ def deprecated_validates_using_a_custom_type_info(): "Cannot query field 'human' on type 'QueryRoot'. Did you mean 'human'?", "Cannot query field 'meowsVolume' on type 'Cat'." " Did you mean 'meowsVolume'?", - "Cannot query field 'barkVolume' on type 'Dog'." - " Did you mean 'barkVolume'?", + "Cannot query field 'barkVolume' on type 'Dog'. Did you mean 'barkVolume'?", ] def validates_using_a_custom_rule(): diff --git a/tests/validation/test_values_of_correct_type.py b/tests/validation/test_values_of_correct_type.py index e19228aa..7cf20648 100644 --- a/tests/validation/test_values_of_correct_type.py +++ b/tests/validation/test_values_of_correct_type.py @@ -931,6 +931,29 @@ def full_object_with_fields_in_different_order(): """ ) + def describe_valid_one_of_input_object_value(): + def exactly_one_field(): + assert_valid( + """ + { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: "abc" }) + } + } + """ + ) + + def exactly_one_non_nullable_variable(): + assert_valid( + """ + query ($string: String!) { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: $string }) + } + } + """ + ) + def describe_invalid_input_object_value(): def partial_object_missing_required(): assert_errors( @@ -1097,6 +1120,77 @@ def allows_custom_scalar_to_accept_complex_literals(): schema=schema, ) + def describe_invalid_one_of_input_object_value(): + def invalid_field_type(): + assert_errors( + """ + { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: 2 }) + } + } + """, + [ + { + "message": "String cannot represent a non string value: 2", + "locations": [(4, 60)], + }, + ], + ) + + def exactly_one_null_field(): + assert_errors( + """ + { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: null }) + } + } + """, + [ + { + "message": "Field 'OneOfInput.stringField' must be non-null.", + "locations": [(4, 45)], + }, + ], + ) + + def exactly_one_nullable_variable(): + assert_errors( + """ + query ($string: String) { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: $string }) + } + } + """, + [ + { + "message": "Variable 'string' must be non-nullable to be used" + " for OneOf Input Object 'OneOfInput'.", + "locations": [(4, 45)], + }, + ], + ) + + def more_than_one_field(): + assert_errors( + """ + { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: "abc", intField: 123 }) + } + } + """, + [ + { + "message": "OneOf Input Object 'OneOfInput'" + " must specify exactly one key.", + "locations": [(4, 45)], + }, + ], + ) + def describe_directive_arguments(): def with_directives_of_valid_types(): assert_valid( diff --git a/tox.ini b/tox.ini index f32bcfff..d7dc47bc 100644 --- a/tox.ini +++ b/tox.ini @@ -1,23 +1,24 @@ [tox] -envlist = py3{7,8,9,10,11,12}, pypy3{9,10}, ruff, mypy, docs +envlist = py3{7,8,9,10,11,12,13}, pypy3{9,10}, ruff, mypy, docs isolated_build = true [gh-actions] python = - 3: py311 + 3: py313 3.7: py37 3.8: py38 3.9: py39 3.10: py310 3.11: py311 3.12: py312 - pypy3: pypy9 + 3.13: py313 + pypy3: pypy39 pypy3.9: pypy39 pypy3.10: pypy310 [testenv:ruff] basepython = python3.12 -deps = ruff>=0.5.7,<0.6 +deps = ruff>=0.11,<0.12 commands = ruff check src tests ruff format --check src tests @@ -25,7 +26,7 @@ commands = [testenv:mypy] basepython = python3.12 deps = - mypy>=1.11,<2 + mypy>=1.15,<2 pytest>=8.3,<9 commands = mypy src tests @@ -33,8 +34,8 @@ commands = [testenv:docs] basepython = python3.12 deps = - sphinx>=7,<8 - sphinx_rtd_theme>=2.0,<3 + sphinx>=8,<9 + sphinx_rtd_theme>=3,<4 commands = sphinx-build -b html -nEW docs docs/_build/html @@ -42,13 +43,13 @@ commands = deps = pytest>=7.4,<9 pytest-asyncio>=0.21.1,<1 - pytest-benchmark>=4,<5 - pytest-cov>=4.1,<6 + pytest-benchmark>=4,<6 + pytest-cov>=4.1,<7 pytest-describe>=2.2,<3 pytest-timeout>=2.3,<3 - py37,py38,py39,pypy39: typing-extensions>=4.7.1,<5 + py3{7,8,9},pypy39: typing-extensions>=4.7.1,<5 commands = - # to also run the time-consuming tests: tox -e py311 -- --run-slow - # to run the benchmarks: tox -e py311 -- -k benchmarks --benchmark-enable - py37,py38,py39,py310,py311,pypy39,pypy310: pytest tests {posargs} + # to also run the time-consuming tests: tox -e py312 -- --run-slow + # to run the benchmarks: tox -e py312 -- -k benchmarks --benchmark-enable + py3{7,8,9,10,11,13},pypy3{9,10}: pytest tests {posargs} py312: pytest tests {posargs: --cov-report=term-missing --cov=graphql --cov=tests --cov-fail-under=100}