diff --git a/.bumpversion.cfg b/.bumpversion.cfg index e2aa0e98..9c2cb695 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 3.3.0a6 +current_version = 3.3.0a9 commit = False tag = False diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 00000000..fce1037f --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,35 @@ +name: Performance + +on: + push: + branches: + - "main" + pull_request: + workflow_dispatch: + +jobs: + benchmarks: + name: ๐Ÿ“ˆ Benchmarks + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + id: setup-python + with: + python-version: "3.12" + architecture: x64 + + - name: Install with poetry + run: | + pipx install poetry + poetry env use 3.12 + poetry install --with test + + - name: Run benchmarks with CodSpeed + uses: CodSpeedHQ/action@v3 + with: + token: ${{ secrets.CODSPEED_TOKEN }} + run: poetry run pytest tests --benchmark-enable --codspeed diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 74f14604..703a56aa 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -4,6 +4,7 @@ on: [push, pull_request] jobs: lint: + name: ๐Ÿงน Lint runs-on: ubuntu-latest steps: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 561b3028..8bd8c296 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -7,6 +7,7 @@ on: jobs: build: + name: ๐Ÿ—๏ธ Build runs-on: ubuntu-latest steps: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e99059b8..581528cc 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,17 +4,42 @@ on: [push, pull_request] jobs: tests: + name: ๐Ÿงช Tests runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', 'pypy3.9', 'pypy3.10'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', 'pypy3.9', 'pypy3.10'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install "tox>=4.24,<5" "tox-gh-actions>=3.2,<4" + + - name: Run unit tests with tox + run: tox + + tests-old: + name: ๐Ÿงช Tests (older Python versions) + runs-on: ubuntu-22.04 + + strategy: + matrix: + python-version: ['3.7', '3.8'] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} diff --git a/README.md b/README.md index 7a0a1e7a..63bcb3a5 100644 --- a/README.md +++ b/README.md @@ -6,19 +6,20 @@ a query language for APIs created by Facebook. [![PyPI version](https://badge.fury.io/py/graphql-core.svg)](https://badge.fury.io/py/graphql-core) [![Documentation Status](https://readthedocs.org/projects/graphql-core-3/badge/)](https://graphql-core-3.readthedocs.io) -![Test Status](https://github.com/graphql-python/graphql-core/actions/workflows/test.yml/badge.svg) -![Lint Status](https://github.com/graphql-python/graphql-core/actions/workflows/lint.yml/badge.svg) -[![Code Style](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black) +[![Test Status](https://github.com/graphql-python/graphql-core/actions/workflows/test.yml/badge.svg)](https://github.com/graphql-python/graphql-core/actions/workflows/test.yml) +[![Lint Status](https://github.com/graphql-python/graphql-core/actions/workflows/lint.yml/badge.svg)](https://github.com/graphql-python/graphql-core/actions/workflows/lint.yml) +[![CodSpeed](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/graphql-python/graphql-core) +[![Code style](https://img.shields.io/badge/code%20style-ruff-000000.svg)](https://github.com/astral-sh/ruff) -An extensive test suite with over 2300 unit tests and 100% coverage comprises a -replication of the complete test suite of GraphQL.js, making sure this port is -reliable and compatible with GraphQL.js. +An extensive test suite with over 2500 unit tests and 100% coverage replicates the +complete test suite of GraphQL.js, ensuring that this port is reliable and compatible +with GraphQL.js. -The current stable version 3.2.3 of GraphQL-core is up-to-date with GraphQL.js -version 16.6.0 and supports Python version 3.7 and newer. +The current stable version 3.2.6 of GraphQL-core is up-to-date with GraphQL.js +version 16.8.2 and supports Python versions 3.6 to 3.13. -You can also try out the latest alpha version 3.3.0a6 of GraphQL-core -which is up-to-date with GraphQL.js version 17.0.0a2. +You can also try out the latest alpha version 3.3.0a9 of GraphQL-core, +which is up-to-date with GraphQL.js version 17.0.0a3. Please note that this new minor version of GraphQL-core does not support Python 3.6 anymore. @@ -26,13 +27,12 @@ Note that for various reasons, GraphQL-core does not use SemVer like GraphQL.js. Changes in the major version of GraphQL.js are reflected in the minor version of GraphQL-core instead. This means there can be breaking changes in the API when the minor version changes, and only patch releases are fully backward compatible. -Therefore, we recommend something like `=~ 3.2.0` as version specifier +Therefore, we recommend using something like `~= 3.2.0` as the version specifier when including GraphQL-core as a dependency. - ## Documentation -A more detailed documentation for GraphQL-core 3 can be found at +More detailed documentation for GraphQL-core 3 can be found at [graphql-core-3.readthedocs.io](https://graphql-core-3.readthedocs.io/). The documentation for GraphQL.js can be found at [graphql.org/graphql-js/](https://graphql.org/graphql-js/). @@ -47,10 +47,10 @@ examples. A general overview of GraphQL is available in the [README](https://github.com/graphql/graphql-spec/blob/main/README.md) for the -[Specification for GraphQL](https://github.com/graphql/graphql-spec). That overview -describes a simple set of GraphQL examples that exist as [tests](tests) in this -repository. A good way to get started with this repository is to walk through that -README and the corresponding tests in parallel. +[Specification for GraphQL](https://github.com/graphql/graphql-spec). This overview +includes a simple set of GraphQL examples that are also available as [tests](tests) +in this repository. A good way to get started with this repository is to walk through +that README and the corresponding tests in parallel. ## Installation @@ -174,17 +174,17 @@ asyncio.run(main()) ## Goals and restrictions -GraphQL-core tries to reproduce the code of the reference implementation GraphQL.js -in Python as closely as possible and to stay up-to-date with the latest development of -GraphQL.js. +GraphQL-core aims to reproduce the code of the reference implementation GraphQL.js +in Python as closely as possible while staying up-to-date with the latest development +of GraphQL.js. -GraphQL-core 3 (formerly known as GraphQL-core-next) has been created as a modern +GraphQL-core 3 (formerly known as GraphQL-core-next) was created as a modern alternative to [GraphQL-core 2](https://github.com/graphql-python/graphql-core-legacy), -a prior work by Syrus Akbary, based on an older version of GraphQL.js and also -targeting older Python versions. Some parts of GraphQL-core 3 have been inspired by -GraphQL-core 2 or directly taken over with only slight modifications, but most of the -code has been re-implemented from scratch, replicating the latest code in GraphQL.js -very closely and adding type hints for Python. +a prior work by Syrus Akbary based on an older version of GraphQL.js that still +supported legacy Python versions. While some parts of GraphQL-core 3 were inspired by +GraphQL-core 2 or directly taken over with slight modifications, most of the code has +been re-implemented from scratch. This re-implementation closely replicates the latest +code in GraphQL.js and adds type hints for Python. Design goals for the GraphQL-core 3 library were: @@ -208,6 +208,10 @@ Some restrictions (mostly in line with the design goals): * supports asynchronous operations only via async.io (does not support the additional executors in GraphQL-core) +Note that meanwhile we are using the amazing [ruff](https://docs.astral.sh/ruff/) tool +to both format and check the code of GraphQL-core 3, +in addition to using [mypy](https://mypy-lang.org/) as type checker. + ## Integration with other libraries and roadmap @@ -217,19 +221,19 @@ Some restrictions (mostly in line with the design goals): also been created by Syrus Akbary, who meanwhile has handed over the maintenance and future development to members of the GraphQL-Python community. - The current version 2 of Graphene is using Graphql-core 2 as core library for much of - the heavy lifting. Note that Graphene 2 is not compatible with GraphQL-core 3. - The new version 3 of Graphene will use GraphQL-core 3 instead of GraphQL-core 2. + Graphene 3 is now using Graphql-core 3 as core library for much of the heavy lifting. * [Ariadne](https://github.com/mirumee/ariadne) is a Python library for implementing GraphQL servers using schema-first approach created by Mirumee Software. - Ariadne is already using GraphQL-core 3 as its GraphQL implementation. + Ariadne is also using GraphQL-core 3 as its GraphQL implementation. * [Strawberry](https://github.com/strawberry-graphql/strawberry), created by Patrick Arminio, is a new GraphQL library for Python 3, inspired by dataclasses, that is also using GraphQL-core 3 as underpinning. +* [Typed GraphQL](https://github.com/willemt/typed-graphql), thin layer over GraphQL-core that uses native Python types for creating GraphQL schemas. + ## Changelog @@ -240,6 +244,7 @@ Changes are tracked as ## Credits and history The GraphQL-core 3 library + * has been created and is maintained by Christoph Zwerschke * uses ideas and code from GraphQL-core 2, a prior work by Syrus Akbary * is a Python port of GraphQL.js which has been developed by Lee Byron and others diff --git a/docs/conf.py b/docs/conf.py index bd53efa0..6d2d8429 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -50,7 +50,7 @@ # General information about the project. project = "GraphQL-core 3" -copyright = "2024, Christoph Zwerschke" +copyright = "2025, Christoph Zwerschke" author = "Christoph Zwerschke" # The version info for the project you're documenting, acts as replacement for @@ -60,7 +60,7 @@ # The short X.Y version. # version = '3.3' # The full version, including alpha/beta/rc tags. -version = release = "3.3.0a6" +version = release = "3.3.0a9" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -138,50 +138,78 @@ } # ignore the following undocumented or internal references: -ignore_references = set( - """ -GNT GT KT T VT -TContext -enum.Enum -traceback -types.TracebackType -TypeMap -AwaitableOrValue -EnterLeaveVisitor -ExperimentalIncrementalExecutionResults -FieldGroup -FormattedSourceLocation -GraphQLAbstractType -GraphQLCompositeType -GraphQLErrorExtensions -GraphQLFieldResolver -GraphQLInputType -GraphQLNullableType -GraphQLOutputType -GraphQLTypeResolver -GroupedFieldSet -IncrementalDataRecord -Middleware -asyncio.events.AbstractEventLoop -graphql.execution.collect_fields.FieldsAndPatches -graphql.execution.map_async_iterable.map_async_iterable -graphql.execution.Middleware -graphql.execution.execute.ExperimentalIncrementalExecutionResults -graphql.execution.execute.StreamArguments -graphql.execution.incremental_publisher.IncrementalPublisher -graphql.execution.incremental_publisher.StreamItemsRecord -graphql.execution.incremental_publisher.DeferredFragmentRecord -graphql.language.lexer.EscapeSequence -graphql.language.visitor.EnterLeaveVisitor -graphql.type.definition.GT_co -graphql.type.definition.GNT_co -graphql.type.definition.TContext -graphql.type.schema.InterfaceImplementations -graphql.validation.validation_context.VariableUsage -graphql.validation.rules.known_argument_names.KnownArgumentNamesOnDirectivesRule -graphql.validation.rules.provided_required_arguments.ProvidedRequiredArgumentsOnDirectivesRule -""".split() -) +ignore_references = { + "GNT", + "GT", + "KT", + "T", + "VT", + "TContext", + "Enum", + "traceback", + "types.TracebackType", + "TypeMap", + "AwaitableOrValue", + "DeferredFragmentRecord", + "DeferUsage", + "EnterLeaveVisitor", + "ExperimentalIncrementalExecutionResults", + "FieldGroup", + "FormattedIncrementalResult", + "FormattedPendingResult", + "FormattedSourceLocation", + "GraphQLAbstractType", + "GraphQLCompositeType", + "GraphQLEnumValueMap", + "GraphQLErrorExtensions", + "GraphQLFieldResolver", + "GraphQLInputType", + "GraphQLNullableType", + "GraphQLOutputType", + "GraphQLTypeResolver", + "GroupedFieldSet", + "IncrementalDataRecord", + "IncrementalResult", + "InitialResultRecord", + "Middleware", + "PendingResult", + "StreamItemsRecord", + "StreamRecord", + "SubsequentDataRecord", + "asyncio.events.AbstractEventLoop", + "collections.abc.MutableMapping", + "collections.abc.MutableSet", + "enum.Enum", + "graphql.execution.build_field_plan.FieldGroup", + "graphql.execution.build_field_plan.FieldPlan", + "graphql.execution.collect_fields.DeferUsage", + "graphql.execution.execute.StreamArguments", + "graphql.execution.execute.SubFieldPlan", + "graphql.execution.execute.StreamUsage", + "graphql.execution.map_async_iterable.map_async_iterable", + "graphql.execution.incremental_publisher.CompletedResult", + "graphql.execution.incremental_publisher.DeferredFragmentRecord", + "graphql.execution.incremental_publisher.DeferredGroupedFieldSetRecord", + "graphql.execution.incremental_publisher.FormattedCompletedResult", + "graphql.execution.incremental_publisher.FormattedPendingResult", + "graphql.execution.incremental_publisher.IncrementalPublisher", + "graphql.execution.incremental_publisher.InitialResultRecord", + "graphql.execution.incremental_publisher.PendingResult", + "graphql.execution.incremental_publisher.StreamItemsRecord", + "graphql.execution.incremental_publisher.StreamRecord", + "graphql.execution.Middleware", + "graphql.language.lexer.EscapeSequence", + "graphql.language.visitor.EnterLeaveVisitor", + "graphql.pyutils.ref_map.K", + "graphql.pyutils.ref_map.V", + "graphql.type.definition.GT_co", + "graphql.type.definition.GNT_co", + "graphql.type.definition.TContext", + "graphql.type.schema.InterfaceImplementations", + "graphql.validation.validation_context.VariableUsage", + "graphql.validation.rules.known_argument_names.KnownArgumentNamesOnDirectivesRule", + "graphql.validation.rules.provided_required_arguments.ProvidedRequiredArgumentsOnDirectivesRule", +} ignore_references.update(__builtins__.keys()) @@ -199,10 +227,12 @@ def on_missing_reference(app, env, node, contnode): name = target.rsplit(".", 1)[-1] if name in ("GT", "GNT", "KT", "T", "VT"): return contnode - if typ == "obj": - if target.startswith("typing."): - if name in ("Any", "Optional", "Union"): - return contnode + if ( + typ == "obj" + and target.startswith("typing.") + and name in ("Any", "Optional", "Union") + ): + return contnode if typ != "class": return None if "." in target: # maybe too specific diff --git a/docs/modules/pyutils.rst b/docs/modules/pyutils.rst index cd178d65..e33b5d1f 100644 --- a/docs/modules/pyutils.rst +++ b/docs/modules/pyutils.rst @@ -30,3 +30,7 @@ PyUtils .. autoclass:: SimplePubSub .. autoclass:: SimplePubSubIterator .. autodata:: Undefined +.. autoclass:: RefMap + :no-inherited-members: +.. autoclass:: RefSet + :no-inherited-members: diff --git a/docs/requirements.txt b/docs/requirements.txt index f52741c8..9652132e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,2 @@ -sphinx>=7.3.7,<8 -sphinx_rtd_theme>=2.0.0,<3 +sphinx>=7,<8 +sphinx_rtd_theme>=2,<3 diff --git a/poetry.lock b/poetry.lock index 1d4f8e60..1b0fabbd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "alabaster" @@ -6,17 +6,47 @@ version = "0.7.13" description = "A configurable sidebar-enabled Sphinx theme" optional = false python-versions = ">=3.6" +groups = ["doc"] +markers = "python_version < \"3.9\"" files = [ {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"}, {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, ] +[[package]] +name = "alabaster" +version = "0.7.16" +description = "A light, configurable Sphinx theme" +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version == \"3.9\"" +files = [ + {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, + {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, +] + +[[package]] +name = "alabaster" +version = "1.0.0" +description = "A light, configurable Sphinx theme" +optional = false +python-versions = ">=3.10" +groups = ["doc"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b"}, + {file = "alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e"}, +] + [[package]] name = "babel" version = "2.14.0" description = "Internationalization utilities" optional = false python-versions = ">=3.7" +groups = ["doc"] +markers = "python_version == \"3.7\"" files = [ {file = "Babel-2.14.0-py3-none-any.whl", hash = "sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287"}, {file = "Babel-2.14.0.tar.gz", hash = "sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363"}, @@ -30,20 +60,22 @@ dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] [[package]] name = "babel" -version = "2.16.0" +version = "2.17.0" description = "Internationalization utilities" optional = false python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version >= \"3.8\"" files = [ - {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, - {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, + {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, + {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, ] [package.dependencies] pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} [package.extras] -dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] +dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata ; sys_platform == \"win32\""] [[package]] name = "bump2version" @@ -51,6 +83,7 @@ version = "1.0.1" description = "Version-bump your software with a single command!" optional = false python-versions = ">=3.5" +groups = ["lint"] files = [ {file = "bump2version-1.0.1-py2.py3-none-any.whl", hash = "sha256:37f927ea17cde7ae2d7baf832f8e80ce3777624554a653006c9144f8017fe410"}, {file = "bump2version-1.0.1.tar.gz", hash = "sha256:762cb2bfad61f4ec8e2bdf452c7c267416f8c70dd9ecb1653fd0bbb01fa936e6"}, @@ -58,32 +91,209 @@ files = [ [[package]] name = "cachetools" -version = "5.4.0" +version = "5.5.2" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.8\"" files = [ - {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, - {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, + {file = "cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a"}, + {file = "cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4"}, +] + +[[package]] +name = "cachetools" +version = "6.1.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "cachetools-6.1.0-py3-none-any.whl", hash = "sha256:1c7bb3cf9193deaf3508b7c5f2a79986c13ea38965c5adcff1f84519cf39163e"}, + {file = "cachetools-6.1.0.tar.gz", hash = "sha256:b4c4f404392848db3ce7aac34950d17be4d864da4b8b66911008e430bc544587"}, ] [[package]] name = "certifi" -version = "2024.7.4" +version = "2025.6.15" description = "Python package for providing Mozilla's CA Bundle." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +groups = ["doc"] files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, + {file = "certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"}, + {file = "certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b"}, +] + +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = "*" +groups = ["test"] +markers = "python_version == \"3.7\"" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] +[package.dependencies] +pycparser = "*" + [[package]] name = "chardet" version = "5.2.0" description = "Universal encoding detector for Python 3" optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version >= \"3.8\"" files = [ {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, @@ -91,101 +301,104 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +python-versions = ">=3.7" +groups = ["doc"] +files = [ + {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, + {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, + {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, ] [[package]] @@ -194,10 +407,12 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["doc", "test"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {doc = "sys_platform == \"win32\"", test = "python_version >= \"3.8\" or platform_system == \"Windows\" or sys_platform == \"win32\""} [[package]] name = "coverage" @@ -205,6 +420,8 @@ version = "7.2.7" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "coverage-7.2.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8"}, {file = "coverage-7.2.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb"}, @@ -272,7 +489,7 @@ files = [ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] -toml = ["tomli"] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "coverage" @@ -280,6 +497,8 @@ version = "7.6.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" +groups = ["test"] +markers = "python_version == \"3.8\"" files = [ {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, @@ -359,17 +578,102 @@ files = [ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] -toml = ["tomli"] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "coverage" +version = "7.9.1" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "coverage-7.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cc94d7c5e8423920787c33d811c0be67b7be83c705f001f7180c7b186dcf10ca"}, + {file = "coverage-7.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16aa0830d0c08a2c40c264cef801db8bc4fc0e1892782e45bcacbd5889270509"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf95981b126f23db63e9dbe4cf65bd71f9a6305696fa5e2262693bc4e2183f5b"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f05031cf21699785cd47cb7485f67df619e7bcdae38e0fde40d23d3d0210d3c3"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb4fbcab8764dc072cb651a4bcda4d11fb5658a1d8d68842a862a6610bd8cfa3"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0f16649a7330ec307942ed27d06ee7e7a38417144620bb3d6e9a18ded8a2d3e5"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cea0a27a89e6432705fffc178064503508e3c0184b4f061700e771a09de58187"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e980b53a959fa53b6f05343afbd1e6f44a23ed6c23c4b4c56c6662bbb40c82ce"}, + {file = "coverage-7.9.1-cp310-cp310-win32.whl", hash = "sha256:70760b4c5560be6ca70d11f8988ee6542b003f982b32f83d5ac0b72476607b70"}, + {file = "coverage-7.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a66e8f628b71f78c0e0342003d53b53101ba4e00ea8dabb799d9dba0abbbcebe"}, + {file = "coverage-7.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95c765060e65c692da2d2f51a9499c5e9f5cf5453aeaf1420e3fc847cc060582"}, + {file = "coverage-7.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ba383dc6afd5ec5b7a0d0c23d38895db0e15bcba7fb0fa8901f245267ac30d86"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37ae0383f13cbdcf1e5e7014489b0d71cc0106458878ccde52e8a12ced4298ed"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69aa417a030bf11ec46149636314c24c8d60fadb12fc0ee8f10fda0d918c879d"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a4be2a28656afe279b34d4f91c3e26eccf2f85500d4a4ff0b1f8b54bf807338"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:382e7ddd5289f140259b610e5f5c58f713d025cb2f66d0eb17e68d0a94278875"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e5532482344186c543c37bfad0ee6069e8ae4fc38d073b8bc836fc8f03c9e250"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a39d18b3f50cc121d0ce3838d32d58bd1d15dab89c910358ebefc3665712256c"}, + {file = "coverage-7.9.1-cp311-cp311-win32.whl", hash = "sha256:dd24bd8d77c98557880def750782df77ab2b6885a18483dc8588792247174b32"}, + {file = "coverage-7.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:6b55ad10a35a21b8015eabddc9ba31eb590f54adc9cd39bcf09ff5349fd52125"}, + {file = "coverage-7.9.1-cp311-cp311-win_arm64.whl", hash = "sha256:6ad935f0016be24c0e97fc8c40c465f9c4b85cbbe6eac48934c0dc4d2568321e"}, + {file = "coverage-7.9.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a8de12b4b87c20de895f10567639c0797b621b22897b0af3ce4b4e204a743626"}, + {file = "coverage-7.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5add197315a054e92cee1b5f686a2bcba60c4c3e66ee3de77ace6c867bdee7cb"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:600a1d4106fe66f41e5d0136dfbc68fe7200a5cbe85610ddf094f8f22e1b0300"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a876e4c3e5a2a1715a6608906aa5a2e0475b9c0f68343c2ada98110512ab1d8"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81f34346dd63010453922c8e628a52ea2d2ccd73cb2487f7700ac531b247c8a5"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:888f8eee13f2377ce86d44f338968eedec3291876b0b8a7289247ba52cb984cd"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9969ef1e69b8c8e1e70d591f91bbc37fc9a3621e447525d1602801a24ceda898"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:60c458224331ee3f1a5b472773e4a085cc27a86a0b48205409d364272d67140d"}, + {file = "coverage-7.9.1-cp312-cp312-win32.whl", hash = "sha256:5f646a99a8c2b3ff4c6a6e081f78fad0dde275cd59f8f49dc4eab2e394332e74"}, + {file = "coverage-7.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:30f445f85c353090b83e552dcbbdad3ec84c7967e108c3ae54556ca69955563e"}, + {file = "coverage-7.9.1-cp312-cp312-win_arm64.whl", hash = "sha256:af41da5dca398d3474129c58cb2b106a5d93bbb196be0d307ac82311ca234342"}, + {file = "coverage-7.9.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:31324f18d5969feef7344a932c32428a2d1a3e50b15a6404e97cba1cc9b2c631"}, + {file = "coverage-7.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0c804506d624e8a20fb3108764c52e0eef664e29d21692afa375e0dd98dc384f"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef64c27bc40189f36fcc50c3fb8f16ccda73b6a0b80d9bd6e6ce4cffcd810bbd"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4fe2348cc6ec372e25adec0219ee2334a68d2f5222e0cba9c0d613394e12d86"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34ed2186fe52fcc24d4561041979a0dec69adae7bce2ae8d1c49eace13e55c43"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:25308bd3d00d5eedd5ae7d4357161f4df743e3c0240fa773ee1b0f75e6c7c0f1"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73e9439310f65d55a5a1e0564b48e34f5369bee943d72c88378f2d576f5a5751"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ab6be0859141b53aa89412a82454b482c81cf750de4f29223d52268a86de67"}, + {file = "coverage-7.9.1-cp313-cp313-win32.whl", hash = "sha256:64bdd969456e2d02a8b08aa047a92d269c7ac1f47e0c977675d550c9a0863643"}, + {file = "coverage-7.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:be9e3f68ca9edb897c2184ad0eee815c635565dbe7a0e7e814dc1f7cbab92c0a"}, + {file = "coverage-7.9.1-cp313-cp313-win_arm64.whl", hash = "sha256:1c503289ffef1d5105d91bbb4d62cbe4b14bec4d13ca225f9c73cde9bb46207d"}, + {file = "coverage-7.9.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0b3496922cb5f4215bf5caaef4cf12364a26b0be82e9ed6d050f3352cf2d7ef0"}, + {file = "coverage-7.9.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9565c3ab1c93310569ec0d86b017f128f027cab0b622b7af288696d7ed43a16d"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2241ad5dbf79ae1d9c08fe52b36d03ca122fb9ac6bca0f34439e99f8327ac89f"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bb5838701ca68b10ebc0937dbd0eb81974bac54447c55cd58dea5bca8451029"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b30a25f814591a8c0c5372c11ac8967f669b97444c47fd794926e175c4047ece"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2d04b16a6062516df97969f1ae7efd0de9c31eb6ebdceaa0d213b21c0ca1a683"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7931b9e249edefb07cd6ae10c702788546341d5fe44db5b6108a25da4dca513f"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52e92b01041151bf607ee858e5a56c62d4b70f4dac85b8c8cb7fb8a351ab2c10"}, + {file = "coverage-7.9.1-cp313-cp313t-win32.whl", hash = "sha256:684e2110ed84fd1ca5f40e89aa44adf1729dc85444004111aa01866507adf363"}, + {file = "coverage-7.9.1-cp313-cp313t-win_amd64.whl", hash = "sha256:437c576979e4db840539674e68c84b3cda82bc824dd138d56bead1435f1cb5d7"}, + {file = "coverage-7.9.1-cp313-cp313t-win_arm64.whl", hash = "sha256:18a0912944d70aaf5f399e350445738a1a20b50fbea788f640751c2ed9208b6c"}, + {file = "coverage-7.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6f424507f57878e424d9a95dc4ead3fbdd72fd201e404e861e465f28ea469951"}, + {file = "coverage-7.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:535fde4001b2783ac80865d90e7cc7798b6b126f4cd8a8c54acfe76804e54e58"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02532fd3290bb8fa6bec876520842428e2a6ed6c27014eca81b031c2d30e3f71"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56f5eb308b17bca3bbff810f55ee26d51926d9f89ba92707ee41d3c061257e55"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfa447506c1a52271f1b0de3f42ea0fa14676052549095e378d5bff1c505ff7b"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9ca8e220006966b4a7b68e8984a6aee645a0384b0769e829ba60281fe61ec4f7"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:49f1d0788ba5b7ba65933f3a18864117c6506619f5ca80326b478f72acf3f385"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68cd53aec6f45b8e4724c0950ce86eacb775c6be01ce6e3669fe4f3a21e768ed"}, + {file = "coverage-7.9.1-cp39-cp39-win32.whl", hash = "sha256:95335095b6c7b1cc14c3f3f17d5452ce677e8490d101698562b2ffcacc304c8d"}, + {file = "coverage-7.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:e1b5191d1648acc439b24721caab2fd0c86679d8549ed2c84d5a7ec1bedcc244"}, + {file = "coverage-7.9.1-pp39.pp310.pp311-none-any.whl", hash = "sha256:db0f04118d1db74db6c9e1cb1898532c7dcc220f1d2718f058601f7c3f499514"}, + {file = "coverage-7.9.1-py3-none-any.whl", hash = "sha256:66b974b145aa189516b6bf2d8423e888b742517d37872f6ee4c5be0073bd9a3c"}, + {file = "coverage-7.9.1.tar.gz", hash = "sha256:6cf43c78c4282708a28e466316935ec7489a9c487518a77fa68f716c67909cec"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "distlib" -version = "0.3.8" +version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" +groups = ["test"] files = [ - {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, - {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, ] [[package]] @@ -378,6 +682,8 @@ version = "0.19" description = "Docutils -- Python Documentation Utilities" optional = false python-versions = ">=3.7" +groups = ["doc"] +markers = "python_version == \"3.7\"" files = [ {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"}, {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"}, @@ -389,22 +695,42 @@ version = "0.20.1" description = "Docutils -- Python Documentation Utilities" optional = false python-versions = ">=3.7" +groups = ["doc"] +markers = "python_version == \"3.8\"" files = [ {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, ] +[[package]] +name = "docutils" +version = "0.21.2" +description = "Docutils -- Python Documentation Utilities" +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2"}, + {file = "docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f"}, +] + [[package]] name = "exceptiongroup" -version = "1.2.2" +version = "1.3.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version <= \"3.10\"" files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, + {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, + {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} + [package.extras] test = ["pytest (>=6)"] @@ -414,6 +740,8 @@ version = "3.12.2" description = "A platform independent file lock." optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "filelock-3.12.2-py3-none-any.whl", hash = "sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec"}, {file = "filelock-3.12.2.tar.gz", hash = "sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81"}, @@ -425,37 +753,62 @@ testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "p [[package]] name = "filelock" -version = "3.15.4" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" +groups = ["test"] +markers = "python_version == \"3.8\"" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] + +[[package]] +name = "filelock" +version = "3.18.0" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" files = [ - {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, - {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, + {file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"}, + {file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] -typing = ["typing-extensions (>=4.8)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] [[package]] name = "idna" -version = "3.7" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" +groups = ["doc"] files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "imagesize" version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["doc"] files = [ {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, @@ -467,6 +820,8 @@ version = "6.7.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.7" +groups = ["doc", "test"] +markers = "python_version == \"3.7\"" files = [ {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, @@ -479,26 +834,57 @@ zipp = ">=0.5" [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +testing = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\"", "pytest-perf (>=0.9.2)", "pytest-ruff"] [[package]] name = "importlib-metadata" -version = "8.2.0" +version = "8.5.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version == \"3.8\"" files = [ - {file = "importlib_metadata-8.2.0-py3-none-any.whl", hash = "sha256:11901fa0c2f97919b288679932bb64febaeacf289d18ac84dd68cb2e74213369"}, - {file = "importlib_metadata-8.2.0.tar.gz", hash = "sha256:72e8d4399996132204f9a16dcc751af254a48f8d1b20b9ff0f98d4a8f901e73d"}, + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, ] [package.dependencies] -zipp = ">=0.5" +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["doc", "test"] +markers = "python_version == \"3.9\"" +files = [ + {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, + {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, +] + +[package.dependencies] +zipp = ">=3.20" [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] [[package]] name = "iniconfig" @@ -506,20 +892,36 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +groups = ["test"] +markers = "python_version >= \"3.8\"" +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + [[package]] name = "jinja2" -version = "3.1.4" +version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" +groups = ["doc"] files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, ] [package.dependencies] @@ -528,12 +930,40 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "markupsafe" version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" +groups = ["doc"] +markers = "python_version < \"3.9\"" files = [ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, @@ -597,12 +1027,99 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "mypy" version = "1.4.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.7" +groups = ["lint"] +markers = "python_version == \"3.7\"" files = [ {file = "mypy-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:566e72b0cd6598503e48ea610e0052d1b8168e60a46e0bfd34b3acf2d57f96a8"}, {file = "mypy-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878"}, @@ -646,47 +1163,117 @@ reports = ["lxml"] [[package]] name = "mypy" -version = "1.11.1" +version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" -files = [ - {file = "mypy-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a32fc80b63de4b5b3e65f4be82b4cfa362a46702672aa6a0f443b4689af7008c"}, - {file = "mypy-1.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1952f5ea8a5a959b05ed5f16452fddadbaae48b5d39235ab4c3fc444d5fd411"}, - {file = "mypy-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1e30dc3bfa4e157e53c1d17a0dad20f89dc433393e7702b813c10e200843b03"}, - {file = "mypy-1.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2c63350af88f43a66d3dfeeeb8d77af34a4f07d760b9eb3a8697f0386c7590b4"}, - {file = "mypy-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:a831671bad47186603872a3abc19634f3011d7f83b083762c942442d51c58d58"}, - {file = "mypy-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7b6343d338390bb946d449677726edf60102a1c96079b4f002dedff375953fc5"}, - {file = "mypy-1.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4fe9f4e5e521b458d8feb52547f4bade7ef8c93238dfb5bbc790d9ff2d770ca"}, - {file = "mypy-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:886c9dbecc87b9516eff294541bf7f3655722bf22bb898ee06985cd7269898de"}, - {file = "mypy-1.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fca4a60e1dd9fd0193ae0067eaeeb962f2d79e0d9f0f66223a0682f26ffcc809"}, - {file = "mypy-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:0bd53faf56de9643336aeea1c925012837432b5faf1701ccca7fde70166ccf72"}, - {file = "mypy-1.11.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f39918a50f74dc5969807dcfaecafa804fa7f90c9d60506835036cc1bc891dc8"}, - {file = "mypy-1.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bc71d1fb27a428139dd78621953effe0d208aed9857cb08d002280b0422003a"}, - {file = "mypy-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b868d3bcff720dd7217c383474008ddabaf048fad8d78ed948bb4b624870a417"}, - {file = "mypy-1.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a707ec1527ffcdd1c784d0924bf5cb15cd7f22683b919668a04d2b9c34549d2e"}, - {file = "mypy-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:64f4a90e3ea07f590c5bcf9029035cf0efeae5ba8be511a8caada1a4893f5525"}, - {file = "mypy-1.11.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:749fd3213916f1751fff995fccf20c6195cae941dc968f3aaadf9bb4e430e5a2"}, - {file = "mypy-1.11.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b639dce63a0b19085213ec5fdd8cffd1d81988f47a2dec7100e93564f3e8fb3b"}, - {file = "mypy-1.11.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c956b49c5d865394d62941b109728c5c596a415e9c5b2be663dd26a1ff07bc0"}, - {file = "mypy-1.11.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45df906e8b6804ef4b666af29a87ad9f5921aad091c79cc38e12198e220beabd"}, - {file = "mypy-1.11.1-cp38-cp38-win_amd64.whl", hash = "sha256:d44be7551689d9d47b7abc27c71257adfdb53f03880841a5db15ddb22dc63edb"}, - {file = "mypy-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2684d3f693073ab89d76da8e3921883019ea8a3ec20fa5d8ecca6a2db4c54bbe"}, - {file = "mypy-1.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79c07eb282cb457473add5052b63925e5cc97dfab9812ee65a7c7ab5e3cb551c"}, - {file = "mypy-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11965c2f571ded6239977b14deebd3f4c3abd9a92398712d6da3a772974fad69"}, - {file = "mypy-1.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a2b43895a0f8154df6519706d9bca8280cda52d3d9d1514b2d9c3e26792a0b74"}, - {file = "mypy-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:1a81cf05975fd61aec5ae16501a091cfb9f605dc3e3c878c0da32f250b74760b"}, - {file = "mypy-1.11.1-py3-none-any.whl", hash = "sha256:0624bdb940255d2dd24e829d99a13cfeb72e4e9031f9492148f410ed30bcab54"}, - {file = "mypy-1.11.1.tar.gz", hash = "sha256:f404a0b069709f18bbdb702eb3dcfe51910602995de00bd39cea3050b5772d08"}, +groups = ["lint"] +markers = "python_version == \"3.8\"" +files = [ + {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, + {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, + {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, + {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, + {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, + {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, + {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, + {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, + {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, + {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, + {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, + {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, + {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, + {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, + {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" +mypy_extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.6.0" +typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy" +version = "1.16.1" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.9" +groups = ["lint"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "mypy-1.16.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b4f0fed1022a63c6fec38f28b7fc77fca47fd490445c69d0a66266c59dd0b88a"}, + {file = "mypy-1.16.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:86042bbf9f5a05ea000d3203cf87aa9d0ccf9a01f73f71c58979eb9249f46d72"}, + {file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ea7469ee5902c95542bea7ee545f7006508c65c8c54b06dc2c92676ce526f3ea"}, + {file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:352025753ef6a83cb9e7f2427319bb7875d1fdda8439d1e23de12ab164179574"}, + {file = "mypy-1.16.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ff9fa5b16e4c1364eb89a4d16bcda9987f05d39604e1e6c35378a2987c1aac2d"}, + {file = "mypy-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:1256688e284632382f8f3b9e2123df7d279f603c561f099758e66dd6ed4e8bd6"}, + {file = "mypy-1.16.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:472e4e4c100062488ec643f6162dd0d5208e33e2f34544e1fc931372e806c0cc"}, + {file = "mypy-1.16.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea16e2a7d2714277e349e24d19a782a663a34ed60864006e8585db08f8ad1782"}, + {file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08e850ea22adc4d8a4014651575567b0318ede51e8e9fe7a68f25391af699507"}, + {file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22d76a63a42619bfb90122889b903519149879ddbf2ba4251834727944c8baca"}, + {file = "mypy-1.16.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2c7ce0662b6b9dc8f4ed86eb7a5d505ee3298c04b40ec13b30e572c0e5ae17c4"}, + {file = "mypy-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:211287e98e05352a2e1d4e8759c5490925a7c784ddc84207f4714822f8cf99b6"}, + {file = "mypy-1.16.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:af4792433f09575d9eeca5c63d7d90ca4aeceda9d8355e136f80f8967639183d"}, + {file = "mypy-1.16.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66df38405fd8466ce3517eda1f6640611a0b8e70895e2a9462d1d4323c5eb4b9"}, + {file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44e7acddb3c48bd2713994d098729494117803616e116032af192871aed80b79"}, + {file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ab5eca37b50188163fa7c1b73c685ac66c4e9bdee4a85c9adac0e91d8895e15"}, + {file = "mypy-1.16.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb6229b2c9086247e21a83c309754b9058b438704ad2f6807f0d8227f6ebdd"}, + {file = "mypy-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:1f0435cf920e287ff68af3d10a118a73f212deb2ce087619eb4e648116d1fe9b"}, + {file = "mypy-1.16.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ddc91eb318c8751c69ddb200a5937f1232ee8efb4e64e9f4bc475a33719de438"}, + {file = "mypy-1.16.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:87ff2c13d58bdc4bbe7dc0dedfe622c0f04e2cb2a492269f3b418df2de05c536"}, + {file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a7cfb0fe29fe5a9841b7c8ee6dffb52382c45acdf68f032145b75620acfbd6f"}, + {file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:051e1677689c9d9578b9c7f4d206d763f9bbd95723cd1416fad50db49d52f359"}, + {file = "mypy-1.16.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d5d2309511cc56c021b4b4e462907c2b12f669b2dbeb68300110ec27723971be"}, + {file = "mypy-1.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:4f58ac32771341e38a853c5d0ec0dfe27e18e27da9cdb8bbc882d2249c71a3ee"}, + {file = "mypy-1.16.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7fc688329af6a287567f45cc1cefb9db662defeb14625213a5b7da6e692e2069"}, + {file = "mypy-1.16.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e198ab3f55924c03ead626ff424cad1732d0d391478dfbf7bb97b34602395da"}, + {file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09aa4f91ada245f0a45dbc47e548fd94e0dd5a8433e0114917dc3b526912a30c"}, + {file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13c7cd5b1cb2909aa318a90fd1b7e31f17c50b242953e7dd58345b2a814f6383"}, + {file = "mypy-1.16.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:58e07fb958bc5d752a280da0e890c538f1515b79a65757bbdc54252ba82e0b40"}, + {file = "mypy-1.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:f895078594d918f93337a505f8add9bd654d1a24962b4c6ed9390e12531eb31b"}, + {file = "mypy-1.16.1-py3-none-any.whl", hash = "sha256:5fc2ac4027d0ef28d6ba69a0343737a23c4d1b83672bf38d1fe237bdc0643b37"}, + {file = "mypy-1.16.1.tar.gz", hash = "sha256:6bd00a0a2094841c5e47e7374bb42b83d64c527a502e3334e1173a0c24437bab"}, +] + +[package.dependencies] +mypy_extensions = ">=1.0.0" +pathspec = ">=0.9.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing_extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -697,17 +1284,34 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["lint"] +markers = "python_version == \"3.7\"" files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] +[[package]] +name = "mypy-extensions" +version = "1.1.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.8" +groups = ["lint"] +markers = "python_version >= \"3.8\"" +files = [ + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, +] + [[package]] name = "packaging" version = "24.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" +groups = ["doc", "test"] +markers = "python_version == \"3.7\"" files = [ {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, @@ -715,13 +1319,28 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["doc", "test"] +markers = "python_version >= \"3.8\"" +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +groups = ["lint"] +markers = "python_version >= \"3.9\"" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] [[package]] @@ -730,6 +1349,8 @@ version = "4.0.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "platformdirs-4.0.0-py3-none-any.whl", hash = "sha256:118c954d7e949b35437270383a3f2531e99dd93cf7ce4dc8340d3356d30f173b"}, {file = "platformdirs-4.0.0.tar.gz", hash = "sha256:cb633b2bcf10c51af60beb0ab06d2f1d69064b43abf4c185ca6b28865f3f9731"}, @@ -744,19 +1365,39 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-co [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" +groups = ["test"] +markers = "python_version == \"3.8\"" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "platformdirs" +version = "4.3.8" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] [[package]] name = "pluggy" @@ -764,6 +1405,8 @@ version = "1.2.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "pluggy-1.2.0-py3-none-any.whl", hash = "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849"}, {file = "pluggy-1.2.0.tar.gz", hash = "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"}, @@ -782,6 +1425,8 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["test"] +markers = "python_version == \"3.8\"" files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -791,12 +1436,31 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + [[package]] name = "py" version = "1.11.0" description = "library with cross-python path, ini-parsing, io, code, log facilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, @@ -808,49 +1472,82 @@ version = "9.0.0" description = "Get CPU info with pure Python" optional = false python-versions = "*" +groups = ["test"] files = [ {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, ] +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["test"] +markers = "python_version == \"3.7\"" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + [[package]] name = "pygments" version = "2.17.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.7" +groups = ["doc"] +markers = "python_version == \"3.7\"" files = [ {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, ] [package.extras] -plugins = ["importlib-metadata"] +plugins = ["importlib-metadata ; python_version < \"3.8\""] windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pygments" -version = "2.18.0" +version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" +groups = ["doc", "test"] files = [ - {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, - {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, + {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, + {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, ] +markers = {doc = "python_version >= \"3.8\"", test = "python_version >= \"3.9\""} [package.extras] windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pyproject-api" -version = "1.7.1" +version = "1.8.0" description = "API to interact with the python pyproject.toml based projects" optional = false python-versions = ">=3.8" +groups = ["test"] +markers = "python_version == \"3.8\"" files = [ - {file = "pyproject_api-1.7.1-py3-none-any.whl", hash = "sha256:2dc1654062c2b27733d8fd4cdda672b22fe8741ef1dde8e3a998a9547b071eeb"}, - {file = "pyproject_api-1.7.1.tar.gz", hash = "sha256:7ebc6cd10710f89f4cf2a2731710a98abce37ebff19427116ff2174c9236a827"}, + {file = "pyproject_api-1.8.0-py3-none-any.whl", hash = "sha256:3d7d347a047afe796fd5d1885b1e391ba29be7169bd2f102fcd378f04273d228"}, + {file = "pyproject_api-1.8.0.tar.gz", hash = "sha256:77b8049f2feb5d33eefcc21b57f1e279636277a8ac8ad6b5871037b243778496"}, ] [package.dependencies] @@ -858,8 +1555,29 @@ packaging = ">=24.1" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} [package.extras] -docs = ["furo (>=2024.5.6)", "sphinx-autodoc-typehints (>=2.2.1)"] -testing = ["covdefaults (>=2.3)", "pytest (>=8.2.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "setuptools (>=70.1)"] +docs = ["furo (>=2024.8.6)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "pytest (>=8.3.3)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "setuptools (>=75.1)"] + +[[package]] +name = "pyproject-api" +version = "1.9.1" +description = "API to interact with the python pyproject.toml based projects" +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "pyproject_api-1.9.1-py3-none-any.whl", hash = "sha256:7d6238d92f8962773dd75b5f0c4a6a27cce092a14b623b811dba656f3b628948"}, + {file = "pyproject_api-1.9.1.tar.gz", hash = "sha256:43c9918f49daab37e302038fc1aed54a8c7a91a9fa935d00b9a485f37e0f5335"}, +] + +[package.dependencies] +packaging = ">=25" +tomli = {version = ">=2.2.1", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx-autodoc-typehints (>=3.2)"] +testing = ["covdefaults (>=2.3)", "pytest (>=8.3.5)", "pytest-cov (>=6.1.1)", "pytest-mock (>=3.14)", "setuptools (>=80.3.1)"] [[package]] name = "pytest" @@ -867,6 +1585,8 @@ version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, @@ -886,13 +1606,15 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no [[package]] name = "pytest" -version = "8.3.2" +version = "8.3.5" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["test"] +markers = "python_version == \"3.8\"" files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, + {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, + {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, ] [package.dependencies] @@ -906,12 +1628,39 @@ tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest" +version = "8.4.1" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, + {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, +] + +[package.dependencies] +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""} +iniconfig = ">=1" +packaging = ">=20" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] + [[package]] name = "pytest-asyncio" version = "0.21.2" description = "Pytest support for asyncio" optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"}, {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"}, @@ -927,28 +1676,52 @@ testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy [[package]] name = "pytest-asyncio" -version = "0.23.8" +version = "0.24.0" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["test"] +markers = "python_version == \"3.8\"" files = [ - {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, - {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, + {file = "pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b"}, + {file = "pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276"}, ] [package.dependencies] -pytest = ">=7.0.0,<9" +pytest = ">=8.2,<9" [package.extras] docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] +[[package]] +name = "pytest-asyncio" +version = "0.25.3" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3"}, + {file = "pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + [[package]] name = "pytest-benchmark" version = "4.0.0" description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version < \"3.9\"" files = [ {file = "pytest-benchmark-4.0.0.tar.gz", hash = "sha256:fb0785b83efe599a6a956361c0691ae1dbb5318018561af10f3e915caa0048d1"}, {file = "pytest_benchmark-4.0.0-py3-none-any.whl", hash = "sha256:fdb7db64e31c8b277dff9850d2a2556d8b60bcb0ea6524e36e28ffd7c87f71d6"}, @@ -963,12 +1736,93 @@ aspect = ["aspectlib"] elasticsearch = ["elasticsearch"] histogram = ["pygal", "pygaljs"] +[[package]] +name = "pytest-benchmark" +version = "5.1.0" +description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "pytest-benchmark-5.1.0.tar.gz", hash = "sha256:9ea661cdc292e8231f7cd4c10b0319e56a2118e2c09d9f50e1b3d150d2aca105"}, + {file = "pytest_benchmark-5.1.0-py3-none-any.whl", hash = "sha256:922de2dfa3033c227c96da942d1878191afa135a29485fb942e85dff1c592c89"}, +] + +[package.dependencies] +py-cpuinfo = "*" +pytest = ">=8.1" + +[package.extras] +aspect = ["aspectlib"] +elasticsearch = ["elasticsearch"] +histogram = ["pygal", "pygaljs", "setuptools"] + +[[package]] +name = "pytest-codspeed" +version = "2.2.1" +description = "Pytest plugin to create CodSpeed benchmarks" +optional = false +python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" +files = [ + {file = "pytest_codspeed-2.2.1-py3-none-any.whl", hash = "sha256:aad08033015f3e6c8c14c8bf0eca475921a9b088e92c98b626bf8af8f516471e"}, + {file = "pytest_codspeed-2.2.1.tar.gz", hash = "sha256:0adc24baf01c64a6ca0a0b83b3cd704351708997e09ec086b7776c32227d4e0a"}, +] + +[package.dependencies] +cffi = ">=1.15.1" +filelock = ">=3.12.2" +pytest = ">=3.8" + +[package.extras] +compat = ["pytest-benchmark (>=4.0.0,<4.1.0)", "pytest-xdist (>=2.0.0,<2.1.0)"] +lint = ["mypy (>=1.3.0,<1.4.0)", "ruff (>=0.3.3,<0.4.0)"] +test = ["pytest (>=7.0,<8.0)", "pytest-cov (>=4.0.0,<4.1.0)"] + +[[package]] +name = "pytest-codspeed" +version = "3.2.0" +description = "Pytest plugin to create CodSpeed benchmarks" +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "pytest_codspeed-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c5165774424c7ab8db7e7acdb539763a0e5657996effefdf0664d7fd95158d34"}, + {file = "pytest_codspeed-3.2.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9bd55f92d772592c04a55209950c50880413ae46876e66bd349ef157075ca26c"}, + {file = "pytest_codspeed-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf6f56067538f4892baa8d7ab5ef4e45bb59033be1ef18759a2c7fc55b32035"}, + {file = "pytest_codspeed-3.2.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:39a687b05c3d145642061b45ea78e47e12f13ce510104d1a2cda00eee0e36f58"}, + {file = "pytest_codspeed-3.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:46a1afaaa1ac4c2ca5b0700d31ac46d80a27612961d031067d73c6ccbd8d3c2b"}, + {file = "pytest_codspeed-3.2.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c48ce3af3dfa78413ed3d69d1924043aa1519048dbff46edccf8f35a25dab3c2"}, + {file = "pytest_codspeed-3.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:66692506d33453df48b36a84703448cb8b22953eea51f03fbb2eb758dc2bdc4f"}, + {file = "pytest_codspeed-3.2.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:479774f80d0bdfafa16112700df4dbd31bf2a6757fac74795fd79c0a7b3c389b"}, + {file = "pytest_codspeed-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:109f9f4dd1088019c3b3f887d003b7d65f98a7736ca1d457884f5aa293e8e81c"}, + {file = "pytest_codspeed-3.2.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e2f69a03b52c9bb041aec1b8ee54b7b6c37a6d0a948786effa4c71157765b6da"}, + {file = "pytest_codspeed-3.2.0-py3-none-any.whl", hash = "sha256:54b5c2e986d6a28e7b0af11d610ea57bd5531cec8326abe486f1b55b09d91c39"}, + {file = "pytest_codspeed-3.2.0.tar.gz", hash = "sha256:f9d1b1a3b2c69cdc0490a1e8b1ced44bffbd0e8e21d81a7160cfdd923f6e8155"}, +] + +[package.dependencies] +cffi = ">=1.17.1" +importlib-metadata = {version = ">=8.5.0", markers = "python_version < \"3.10\""} +pytest = ">=3.8" +rich = ">=13.8.1" + +[package.extras] +compat = ["pytest-benchmark (>=5.0.0,<5.1.0)", "pytest-xdist (>=3.6.1,<3.7.0)"] +lint = ["mypy (>=1.11.2,<1.12.0)", "ruff (>=0.6.5,<0.7.0)"] +test = ["pytest (>=7.0,<8.0)", "pytest-cov (>=4.0.0,<4.1.0)"] + [[package]] name = "pytest-cov" version = "4.1.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, @@ -987,6 +1841,8 @@ version = "5.0.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.8" +groups = ["test"] +markers = "python_version == \"3.8\"" files = [ {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, @@ -999,12 +1855,34 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] +[[package]] +name = "pytest-cov" +version = "6.2.1" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5"}, + {file = "pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2"}, +] + +[package.dependencies] +coverage = {version = ">=7.5", extras = ["toml"]} +pluggy = ">=1.2" +pytest = ">=6.2.5" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + [[package]] name = "pytest-describe" version = "2.2.0" description = "Describe-style plugin for pytest" optional = false python-versions = ">=3.7" +groups = ["test"] files = [ {file = "pytest-describe-2.2.0.tar.gz", hash = "sha256:39bb05eb90f2497d9ca342ef9a0b7fa5bada7e58505aec33f66d661d631955b7"}, {file = "pytest_describe-2.2.0-py3-none-any.whl", hash = "sha256:bd9e2c73acb4b9522a8400823d98f5b6a081667d3bfd7243a8598336896b544d"}, @@ -1015,13 +1893,14 @@ pytest = ">=4.6,<9" [[package]] name = "pytest-timeout" -version = "2.3.1" +version = "2.4.0" description = "pytest plugin to abort hanging tests" optional = false python-versions = ">=3.7" +groups = ["test"] files = [ - {file = "pytest-timeout-2.3.1.tar.gz", hash = "sha256:12397729125c6ecbdaca01035b9e5239d4db97352320af155b3f5de1ba5165d9"}, - {file = "pytest_timeout-2.3.1-py3-none-any.whl", hash = "sha256:68188cb703edfc6a18fad98dc25a3c61e9f24d644b0b70f33af545219fc7813e"}, + {file = "pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2"}, + {file = "pytest_timeout-2.4.0.tar.gz", hash = "sha256:7e68e90b01f9eff71332b25001f85c75495fc4e3a836701876183c4bcfd0540a"}, ] [package.dependencies] @@ -1029,13 +1908,15 @@ pytest = ">=7.0.0" [[package]] name = "pytz" -version = "2024.1" +version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["doc"] +markers = "python_version < \"3.9\"" files = [ - {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, - {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, + {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, + {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, ] [[package]] @@ -1044,6 +1925,8 @@ version = "2.31.0" description = "Python HTTP for Humans." optional = false python-versions = ">=3.7" +groups = ["doc"] +markers = "python_version == \"3.7\"" files = [ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, @@ -1061,18 +1944,20 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "requests" -version = "2.32.3" +version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version >= \"3.8\"" files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, + {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, + {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, ] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" +charset_normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" @@ -1080,53 +1965,78 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rich" +version = "14.0.0" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0"}, + {file = "rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + [[package]] name = "ruff" -version = "0.5.7" +version = "0.12.0" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" -files = [ - {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"}, - {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"}, - {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"}, - {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"}, - {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"}, - {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"}, - {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"}, +groups = ["lint"] +files = [ + {file = "ruff-0.12.0-py3-none-linux_armv6l.whl", hash = "sha256:5652a9ecdb308a1754d96a68827755f28d5dfb416b06f60fd9e13f26191a8848"}, + {file = "ruff-0.12.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:05ed0c914fabc602fc1f3b42c53aa219e5736cb030cdd85640c32dbc73da74a6"}, + {file = "ruff-0.12.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:07a7aa9b69ac3fcfda3c507916d5d1bca10821fe3797d46bad10f2c6de1edda0"}, + {file = "ruff-0.12.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7731c3eec50af71597243bace7ec6104616ca56dda2b99c89935fe926bdcd48"}, + {file = "ruff-0.12.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:952d0630eae628250ab1c70a7fffb641b03e6b4a2d3f3ec6c1d19b4ab6c6c807"}, + {file = "ruff-0.12.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c021f04ea06966b02614d442e94071781c424ab8e02ec7af2f037b4c1e01cc82"}, + {file = "ruff-0.12.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7d235618283718ee2fe14db07f954f9b2423700919dc688eacf3f8797a11315c"}, + {file = "ruff-0.12.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c0758038f81beec8cc52ca22de9685b8ae7f7cc18c013ec2050012862cc9165"}, + {file = "ruff-0.12.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:139b3d28027987b78fc8d6cfb61165447bdf3740e650b7c480744873688808c2"}, + {file = "ruff-0.12.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68853e8517b17bba004152aebd9dd77d5213e503a5f2789395b25f26acac0da4"}, + {file = "ruff-0.12.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3a9512af224b9ac4757f7010843771da6b2b0935a9e5e76bb407caa901a1a514"}, + {file = "ruff-0.12.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b08df3d96db798e5beb488d4df03011874aff919a97dcc2dd8539bb2be5d6a88"}, + {file = "ruff-0.12.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6a315992297a7435a66259073681bb0d8647a826b7a6de45c6934b2ca3a9ed51"}, + {file = "ruff-0.12.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1e55e44e770e061f55a7dbc6e9aed47feea07731d809a3710feda2262d2d4d8a"}, + {file = "ruff-0.12.0-py3-none-win32.whl", hash = "sha256:7162a4c816f8d1555eb195c46ae0bd819834d2a3f18f98cc63819a7b46f474fb"}, + {file = "ruff-0.12.0-py3-none-win_amd64.whl", hash = "sha256:d00b7a157b8fb6d3827b49d3324da34a1e3f93492c1f97b08e222ad7e9b291e0"}, + {file = "ruff-0.12.0-py3-none-win_arm64.whl", hash = "sha256:8cd24580405ad8c1cc64d61725bca091d6b6da7eb3d36f72cc605467069d7e8b"}, + {file = "ruff-0.12.0.tar.gz", hash = "sha256:4d047db3662418d4a848a3fdbfaf17488b34b62f527ed6f10cb8afd78135bc5c"}, ] [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] name = "snowballstemmer" -version = "2.2.0" -description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." +version = "3.0.1" +description = "This package provides 32 stemmers for 30 languages generated from Snowball algorithms." optional = false -python-versions = "*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*" +groups = ["doc"] files = [ - {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, - {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, + {file = "snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064"}, + {file = "snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895"}, ] [[package]] @@ -1135,6 +2045,8 @@ version = "5.3.0" description = "Python documentation generator" optional = false python-versions = ">=3.6" +groups = ["doc"] +markers = "python_version == \"3.7\"" files = [ {file = "Sphinx-5.3.0.tar.gz", hash = "sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5"}, {file = "sphinx-5.3.0-py3-none-any.whl", hash = "sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d"}, @@ -1162,7 +2074,7 @@ sphinxcontrib-serializinghtml = ">=1.1.5" [package.extras] docs = ["sphinxcontrib-websupport"] lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-bugbear", "flake8-comprehensions", "flake8-simplify", "isort", "mypy (>=0.981)", "sphinx-lint", "types-requests", "types-typed-ast"] -test = ["cython", "html5lib", "pytest (>=4.6)", "typed_ast"] +test = ["cython", "html5lib", "pytest (>=4.6)", "typed_ast ; python_version < \"3.8\""] [[package]] name = "sphinx" @@ -1170,6 +2082,8 @@ version = "7.1.2" description = "Python documentation generator" optional = false python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version == \"3.8\"" files = [ {file = "sphinx-7.1.2-py3-none-any.whl", hash = "sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe"}, {file = "sphinx-7.1.2.tar.gz", hash = "sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f"}, @@ -1199,12 +2113,89 @@ docs = ["sphinxcontrib-websupport"] lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"] test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] +[[package]] +name = "sphinx" +version = "7.4.7" +description = "Python documentation generator" +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version == \"3.9\"" +files = [ + {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"}, + {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"}, +] + +[package.dependencies] +alabaster = ">=0.7.14,<0.8.0" +babel = ">=2.13" +colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""} +docutils = ">=0.20,<0.22" +imagesize = ">=1.3" +importlib-metadata = {version = ">=6.0", markers = "python_version < \"3.10\""} +Jinja2 = ">=3.1" +packaging = ">=23.0" +Pygments = ">=2.17" +requests = ">=2.30.0" +snowballstemmer = ">=2.2" +sphinxcontrib-applehelp = "*" +sphinxcontrib-devhelp = "*" +sphinxcontrib-htmlhelp = ">=2.0.0" +sphinxcontrib-jsmath = "*" +sphinxcontrib-qthelp = "*" +sphinxcontrib-serializinghtml = ">=1.1.9" +tomli = {version = ">=2", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["sphinxcontrib-websupport"] +lint = ["flake8 (>=6.0)", "importlib-metadata (>=6.0)", "mypy (==1.10.1)", "pytest (>=6.0)", "ruff (==0.5.2)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-docutils (==0.21.0.20240711)", "types-requests (>=2.30.0)"] +test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"] + +[[package]] +name = "sphinx" +version = "8.1.3" +description = "Python documentation generator" +optional = false +python-versions = ">=3.10" +groups = ["doc"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2"}, + {file = "sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927"}, +] + +[package.dependencies] +alabaster = ">=0.7.14" +babel = ">=2.13" +colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""} +docutils = ">=0.20,<0.22" +imagesize = ">=1.3" +Jinja2 = ">=3.1" +packaging = ">=23.0" +Pygments = ">=2.17" +requests = ">=2.30.0" +snowballstemmer = ">=2.2" +sphinxcontrib-applehelp = ">=1.0.7" +sphinxcontrib-devhelp = ">=1.0.6" +sphinxcontrib-htmlhelp = ">=2.0.6" +sphinxcontrib-jsmath = ">=1.0.1" +sphinxcontrib-qthelp = ">=1.0.6" +sphinxcontrib-serializinghtml = ">=1.1.9" +tomli = {version = ">=2", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["sphinxcontrib-websupport"] +lint = ["flake8 (>=6.0)", "mypy (==1.11.1)", "pyright (==1.1.384)", "pytest (>=6.0)", "ruff (==0.6.9)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-Pillow (==10.2.0.20240822)", "types-Pygments (==2.18.0.20240506)", "types-colorama (==0.4.15.20240311)", "types-defusedxml (==0.7.0.20240218)", "types-docutils (==0.21.0.20241005)", "types-requests (==2.32.0.20240914)", "types-urllib3 (==1.26.25.14)"] +test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"] + [[package]] name = "sphinx-rtd-theme" version = "2.0.0" description = "Read the Docs theme for Sphinx" optional = false python-versions = ">=3.6" +groups = ["doc"] +markers = "python_version == \"3.7\"" files = [ {file = "sphinx_rtd_theme-2.0.0-py2.py3-none-any.whl", hash = "sha256:ec93d0856dc280cf3aee9a4c9807c60e027c7f7b461b77aeffed682e68f0e586"}, {file = "sphinx_rtd_theme-2.0.0.tar.gz", hash = "sha256:bd5d7b80622406762073a04ef8fadc5f9151261563d47027de09910ce03afe6b"}, @@ -1218,12 +2209,35 @@ sphinxcontrib-jquery = ">=4,<5" [package.extras] dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client", "wheel"] +[[package]] +name = "sphinx-rtd-theme" +version = "3.0.2" +description = "Read the Docs theme for Sphinx" +optional = false +python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version >= \"3.8\"" +files = [ + {file = "sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13"}, + {file = "sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85"}, +] + +[package.dependencies] +docutils = ">0.18,<0.22" +sphinx = ">=6,<9" +sphinxcontrib-jquery = ">=4,<5" + +[package.extras] +dev = ["bump2version", "transifex-client", "twine", "wheel"] + [[package]] name = "sphinxcontrib-applehelp" version = "1.0.2" description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books" optional = false python-versions = ">=3.5" +groups = ["doc"] +markers = "python_version == \"3.7\"" files = [ {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"}, {file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"}, @@ -1239,6 +2253,8 @@ version = "1.0.4" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" optional = false python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version == \"3.8\"" files = [ {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"}, {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"}, @@ -1248,12 +2264,32 @@ files = [ lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"}, + {file = "sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + [[package]] name = "sphinxcontrib-devhelp" version = "1.0.2" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." optional = false python-versions = ">=3.5" +groups = ["doc"] +markers = "python_version < \"3.9\"" files = [ {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, @@ -1263,12 +2299,32 @@ files = [ lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"}, + {file = "sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + [[package]] name = "sphinxcontrib-htmlhelp" version = "2.0.0" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" optional = false python-versions = ">=3.6" +groups = ["doc"] +markers = "python_version == \"3.7\"" files = [ {file = "sphinxcontrib-htmlhelp-2.0.0.tar.gz", hash = "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"}, {file = "sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl", hash = "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07"}, @@ -1284,6 +2340,8 @@ version = "2.0.1" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" optional = false python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version == \"3.8\"" files = [ {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"}, {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"}, @@ -1293,12 +2351,31 @@ files = [ lint = ["docutils-stubs", "flake8", "mypy"] test = ["html5lib", "pytest"] +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"}, + {file = "sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["html5lib", "pytest"] + [[package]] name = "sphinxcontrib-jquery" version = "4.1" description = "Extension to include jQuery on newer Sphinx releases" optional = false python-versions = ">=2.7" +groups = ["doc"] files = [ {file = "sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a"}, {file = "sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae"}, @@ -1313,6 +2390,7 @@ version = "1.0.1" description = "A sphinx extension which renders display math in HTML via JavaScript" optional = false python-versions = ">=3.5" +groups = ["doc"] files = [ {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, @@ -1327,6 +2405,8 @@ version = "1.0.3" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." optional = false python-versions = ">=3.5" +groups = ["doc"] +markers = "python_version < \"3.9\"" files = [ {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, @@ -1336,12 +2416,32 @@ files = [ lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"}, + {file = "sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["defusedxml (>=0.7.1)", "pytest"] + [[package]] name = "sphinxcontrib-serializinghtml" version = "1.1.5" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." optional = false python-versions = ">=3.5" +groups = ["doc"] +markers = "python_version < \"3.9\"" files = [ {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, @@ -1351,23 +2451,88 @@ files = [ lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"}, + {file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.7" +groups = ["lint", "test"] +markers = "python_version == \"3.7\"" files = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "tomli" +version = "2.2.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +groups = ["doc", "lint", "test"] +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, +] +markers = {doc = "python_version >= \"3.9\" and python_version < \"3.11\"", lint = "python_version >= \"3.8\" and python_version <= \"3.10\"", test = "python_version >= \"3.8\" and python_full_version <= \"3.11.0a6\""} + [[package]] name = "tox" version = "3.28.0" description = "tox is a generic virtualenv management and test command line tool" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ {file = "tox-3.28.0-py2.py3-none-any.whl", hash = "sha256:57b5ab7e8bb3074edc3c0c0b4b192a4f3799d3723b2c5b76f1fa9f2d40316eea"}, {file = "tox-3.28.0.tar.gz", hash = "sha256:d0d28f3fe6d6d7195c27f8b054c3e99d5451952b54abdae673b71609a581f640"}, @@ -1386,34 +2551,65 @@ virtualenv = ">=16.0.0,<20.0.0 || >20.0.0,<20.0.1 || >20.0.1,<20.0.2 || >20.0.2, [package.extras] docs = ["pygments-github-lexers (>=0.0.5)", "sphinx (>=2.0.0)", "sphinxcontrib-autoprogram (>=0.1.5)", "towncrier (>=18.5.0)"] -testing = ["flaky (>=3.4.0)", "freezegun (>=0.3.11)", "pathlib2 (>=2.3.3)", "psutil (>=5.6.1)", "pytest (>=4.0.0)", "pytest-cov (>=2.5.1)", "pytest-mock (>=1.10.0)", "pytest-randomly (>=1.0.0)"] +testing = ["flaky (>=3.4.0)", "freezegun (>=0.3.11)", "pathlib2 (>=2.3.3) ; python_version < \"3.4\"", "psutil (>=5.6.1) ; platform_python_implementation == \"cpython\"", "pytest (>=4.0.0)", "pytest-cov (>=2.5.1)", "pytest-mock (>=1.10.0)", "pytest-randomly (>=1.0.0)"] [[package]] name = "tox" -version = "4.17.1" +version = "4.25.0" description = "tox is a generic virtualenv management and test command line tool" optional = false python-versions = ">=3.8" +groups = ["test"] +markers = "python_version == \"3.8\"" files = [ - {file = "tox-4.17.1-py3-none-any.whl", hash = "sha256:2974597c0353577126ab014f52d1a399fb761049e165ff34427f84e8cfe6c990"}, - {file = "tox-4.17.1.tar.gz", hash = "sha256:2c41565a571e34480bd401d668a4899806169a4633e972ac296c54406d2ded8a"}, + {file = "tox-4.25.0-py3-none-any.whl", hash = "sha256:4dfdc7ba2cc6fdc6688dde1b21e7b46ff6c41795fb54586c91a3533317b5255c"}, + {file = "tox-4.25.0.tar.gz", hash = "sha256:dd67f030317b80722cf52b246ff42aafd3ed27ddf331c415612d084304cf5e52"}, ] [package.dependencies] -cachetools = ">=5.4" +cachetools = ">=5.5.1" chardet = ">=5.2" colorama = ">=0.4.6" -filelock = ">=3.15.4" -packaging = ">=24.1" -platformdirs = ">=4.2.2" +filelock = ">=3.16.1" +packaging = ">=24.2" +platformdirs = ">=4.3.6" pluggy = ">=1.5" -pyproject-api = ">=1.7.1" -tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} -virtualenv = ">=20.26.3" +pyproject-api = ">=1.8" +tomli = {version = ">=2.2.1", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.12.2", markers = "python_version < \"3.11\""} +virtualenv = ">=20.29.1" [package.extras] -docs = ["furo (>=2024.7.18)", "sphinx (>=7.4.7)", "sphinx-argparse-cli (>=1.16)", "sphinx-autodoc-typehints (>=2.2.3)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.11)"] -testing = ["build[virtualenv] (>=1.2.1)", "covdefaults (>=2.3)", "detect-test-pollution (>=1.2)", "devpi-process (>=1)", "diff-cover (>=9.1.1)", "distlib (>=0.3.8)", "flaky (>=3.8.1)", "hatch-vcs (>=0.4)", "hatchling (>=1.25)", "psutil (>=6)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-xdist (>=3.6.1)", "re-assert (>=1.1)", "setuptools (>=70.3)", "time-machine (>=2.14.2)", "wheel (>=0.43)"] +test = ["devpi-process (>=1.0.2)", "pytest (>=8.3.4)", "pytest-mock (>=3.14)"] + +[[package]] +name = "tox" +version = "4.27.0" +description = "tox is a generic virtualenv management and test command line tool" +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "python_version >= \"3.9\"" +files = [ + {file = "tox-4.27.0-py3-none-any.whl", hash = "sha256:2b8a7fb986b82aa2c830c0615082a490d134e0626dbc9189986da46a313c4f20"}, + {file = "tox-4.27.0.tar.gz", hash = "sha256:b97d5ecc0c0d5755bcc5348387fef793e1bfa68eb33746412f4c60881d7f5f57"}, +] + +[package.dependencies] +cachetools = ">=5.5.1" +chardet = ">=5.2" +colorama = ">=0.4.6" +filelock = ">=3.16.1" +packaging = ">=24.2" +platformdirs = ">=4.3.6" +pluggy = ">=1.5" +pyproject-api = ">=1.8" +tomli = {version = ">=2.2.1", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.12.2", markers = "python_version < \"3.11\""} +virtualenv = ">=20.31" + +[package.extras] +test = ["devpi-process (>=1.0.2)", "pytest (>=8.3.4)", "pytest-mock (>=3.14)"] [[package]] name = "typed-ast" @@ -1421,6 +2617,8 @@ version = "1.5.5" description = "a fork of Python 2 and 3 ast modules with type comment support" optional = false python-versions = ">=3.6" +groups = ["lint"] +markers = "python_version == \"3.7\"" files = [ {file = "typed_ast-1.5.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4bc1efe0ce3ffb74784e06460f01a223ac1f6ab31c6bc0376a21184bf5aabe3b"}, {file = "typed_ast-1.5.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f7a8c46a8b333f71abd61d7ab9255440d4a588f34a21f126bbfc95f6049e686"}, @@ -1471,6 +2669,8 @@ version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" optional = false python-versions = ">=3.7" +groups = ["main", "doc", "lint", "test"] +markers = "python_version == \"3.7\"" files = [ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, @@ -1478,58 +2678,98 @@ files = [ [[package]] name = "typing-extensions" -version = "4.12.2" +version = "4.13.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "lint", "test"] +markers = "python_version == \"3.8\"" files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, + {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, + {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, ] +[[package]] +name = "typing-extensions" +version = "4.14.0" +description = "Backported and Experimental Type Hints for Python 3.9+" +optional = false +python-versions = ">=3.9" +groups = ["main", "lint", "test"] +files = [ + {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, + {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, +] +markers = {main = "python_version == \"3.9\"", lint = "python_version >= \"3.9\"", test = "python_version >= \"3.9\" and python_version < \"3.11\""} + [[package]] name = "urllib3" version = "2.0.7" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.7" +groups = ["doc"] +markers = "python_version == \"3.7\"" files = [ {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"}, {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "urllib3" -version = "2.2.2" +version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version == \"3.8\"" +files = [ + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "urllib3" +version = "2.5.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["doc"] +markers = "python_version >= \"3.9\"" files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.26.3" +version = "20.26.6" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" +groups = ["test"] +markers = "python_version == \"3.7\"" files = [ - {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, - {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, + {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"}, + {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"}, ] [package.dependencies] @@ -1540,7 +2780,29 @@ platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] + +[[package]] +name = "virtualenv" +version = "20.31.2" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.8" +groups = ["test"] +markers = "python_version >= \"3.8\"" +files = [ + {file = "virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11"}, + {file = "virtualenv-20.31.2.tar.gz", hash = "sha256:e10c0a9d02835e592521be48b332b6caee6887f332c111aa79a09b9e79efc2af"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] [[package]] name = "zipp" @@ -1548,6 +2810,8 @@ version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.7" +groups = ["doc", "test"] +markers = "python_version == \"3.7\"" files = [ {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, @@ -1555,24 +2819,51 @@ files = [ [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8 ; python_version < \"3.12\"", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\""] [[package]] name = "zipp" -version = "3.20.0" +version = "3.20.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version == \"3.8\"" +files = [ + {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, + {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[[package]] +name = "zipp" +version = "3.23.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["doc", "test"] +markers = "python_version == \"3.9\"" files = [ - {file = "zipp-3.20.0-py3-none-any.whl", hash = "sha256:58da6168be89f0be59beb194da1250516fdaa062ccebd30127ac65d30045e10d"}, - {file = "zipp-3.20.0.tar.gz", hash = "sha256:0145e43d89664cfe1a2e533adc75adafed82fe2da404b4bbb6b026c0157bdb31"}, + {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, + {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, ] [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = "^3.7" -content-hash = "de9ad44d919a23237212508ca6da20b929c8c6cc8aa0da01406ef2f731debe10" +content-hash = "3a799a01d8f5813c295459d99bcfb2bed3ac5a8a0b25f89115755e996bbd219b" diff --git a/pyproject.toml b/pyproject.toml index e149de23..c5c1ca31 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "graphql-core" -version = "3.3.0a6" +version = "3.3.0a9" description = """\ GraphQL-core is a Python port of GraphQL.js,\ the JavaScript reference implementation for GraphQL.""" @@ -22,6 +22,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13" ] packages = [ { include = "graphql", from = "src" }, @@ -43,7 +44,7 @@ Changelog = "https://github.com/graphql-python/graphql-core/releases" [tool.poetry.dependencies] python = "^3.7" typing-extensions = [ - { version = "^4.12", python = ">=3.8,<3.10" }, + { version = "^4.12.2", python = ">=3.8,<3.10" }, { version = "^4.7.1", python = "<3.8" }, ] @@ -53,21 +54,30 @@ optional = true [tool.poetry.group.test.dependencies] pytest = [ { version = "^8.3", python = ">=3.8" }, - { version = "^7.4", python = "<3.8"} + { version = "^7.4", python = "<3.8" } ] pytest-asyncio = [ - { version = "^0.23.8", python = ">=3.8" }, - { version = "~0.21.1", python = "<3.8"} + { version = "^0.25.2", python = ">=3.9" }, + { version = "~0.24.0", python = ">=3.8,<3.9" }, + { version = "~0.21.1", python = "<3.8" } +] +pytest-benchmark = [ + { version = "^5.1", python = ">=3.9" }, + { version = "^4.0", python = "<3.9" } ] -pytest-benchmark = "^4.0" pytest-cov = [ - { version = "^5.0", python = ">=3.8" }, + { version = "^6.0", python = ">=3.9" }, + { version = "^5.0", python = ">=3.8,<3.9" }, { version = "^4.1", python = "<3.8" }, ] pytest-describe = "^2.2" -pytest-timeout = "^2.3" +pytest-timeout = "^2.4" +pytest-codspeed = [ + { version = "^3.1.2", python = ">=3.9" }, + { version = "^2.2.1", python = "<3.8" } +] tox = [ - { version = "^4.16", python = ">=3.8" }, + { version = "^4.24", python = ">=3.8" }, { version = "^3.28", python = "<3.8" } ] @@ -75,22 +85,23 @@ tox = [ optional = true [tool.poetry.group.lint.dependencies] -ruff = ">=0.5.7,<0.6" +ruff = ">=0.12,<0.13" mypy = [ - { version = "^1.11", python = ">=3.8" }, + { version = "^1.16", python = ">=3.9" }, + { version = "~1.14", python = ">=3.8,<3.9" }, { version = "~1.4", python = "<3.8" } ] -bump2version = ">=1.0,<2" +bump2version = ">=1,<2" [tool.poetry.group.doc] optional = true [tool.poetry.group.doc.dependencies] sphinx = [ - { version = ">=7,<8", python = ">=3.8" }, + { version = ">=7,<9", python = ">=3.8" }, { version = ">=4,<6", python = "<3.8" } ] -sphinx_rtd_theme = "^2.0" +sphinx_rtd_theme = ">=2,<4" [tool.ruff] line-length = 88 @@ -144,7 +155,7 @@ select = [ "YTT", # flake8-2020 ] ignore = [ - "ANN101", "ANN102", # no type annotation for self and cls needed + "A005", # allow using standard-lib module names "ANN401", # allow explicit Any "COM812", # allow trailing commas for auto-formatting "D105", "D107", # no docstring needed for magic methods @@ -154,6 +165,7 @@ ignore = [ "D401", # do not always require imperative mood in first line "FBT001", "FBT002", "FBT003", # allow boolean parameters "ISC001", # allow string literal concatenation for auto-formatting + "PLC0415", # allow run-time imports to avoid circular dependencies "PGH003", # type ignores do not need to be specific "PLR2004", # allow some "magic" values "PYI034", # do not check return value of new method @@ -308,13 +320,17 @@ minversion = "7.4" addopts = "--benchmark-disable" # Deactivate default name pattern for test classes (we use pytest_describe). python_classes = "PyTest*" -# Handle all async fixtures and tests automatically by asyncio +# Handle all async fixtures and tests automatically by asyncio, asyncio_mode = "auto" # Set a timeout in seconds for aborting tests that run too long. timeout = "100" # Ignore config options not (yet) available in older Python versions. filterwarnings = "ignore::pytest.PytestConfigWarning" +# All tests can be found in the tests directory. +testpaths = ["tests"] +# Use the functions scope as the default for asynchronous tests. +asyncio_default_fixture_loop_scope = "function" [build-system] -requires = ["poetry_core>=1.6.1,<2"] +requires = ["poetry_core>=1.6.1,<3"] build-backend = "poetry.core.masonry.api" diff --git a/src/graphql/__init__.py b/src/graphql/__init__.py index e85c51ee..6938435a 100644 --- a/src/graphql/__init__.py +++ b/src/graphql/__init__.py @@ -259,6 +259,7 @@ GraphQLStreamDirective, GraphQLDeprecatedDirective, GraphQLSpecifiedByDirective, + GraphQLOneOfDirective, # "Enum" of Type Kinds TypeKind, # Constant Deprecation Reason @@ -473,344 +474,345 @@ __all__ = [ - "version", - "version_info", - "version_js", - "version_info_js", - "graphql", - "graphql_sync", - "GraphQLSchema", - "GraphQLDirective", - "GraphQLScalarType", - "GraphQLObjectType", - "GraphQLInterfaceType", - "GraphQLUnionType", - "GraphQLEnumType", - "GraphQLInputObjectType", - "GraphQLList", - "GraphQLNonNull", - "specified_scalar_types", - "GraphQLInt", - "GraphQLFloat", - "GraphQLString", - "GraphQLBoolean", - "GraphQLID", + "BREAK", + "DEFAULT_DEPRECATION_REASON", "GRAPHQL_MAX_INT", "GRAPHQL_MIN_INT", - "specified_directives", - "GraphQLIncludeDirective", - "GraphQLSkipDirective", - "GraphQLDeferDirective", - "GraphQLStreamDirective", - "GraphQLDeprecatedDirective", - "GraphQLSpecifiedByDirective", - "TypeKind", - "DEFAULT_DEPRECATION_REASON", - "introspection_types", - "SchemaMetaFieldDef", - "TypeMetaFieldDef", - "TypeNameMetaFieldDef", - "is_schema", - "is_directive", - "is_type", - "is_scalar_type", - "is_object_type", - "is_interface_type", - "is_union_type", - "is_enum_type", - "is_input_object_type", - "is_list_type", - "is_non_null_type", - "is_input_type", - "is_output_type", - "is_leaf_type", - "is_composite_type", - "is_abstract_type", - "is_wrapping_type", - "is_nullable_type", - "is_named_type", - "is_required_argument", - "is_required_input_field", - "is_specified_scalar_type", - "is_introspection_type", - "is_specified_directive", - "assert_schema", - "assert_directive", - "assert_type", - "assert_scalar_type", - "assert_object_type", - "assert_interface_type", - "assert_union_type", - "assert_enum_type", - "assert_input_object_type", - "assert_list_type", - "assert_non_null_type", - "assert_input_type", - "assert_output_type", - "assert_leaf_type", - "assert_composite_type", - "assert_abstract_type", - "assert_wrapping_type", - "assert_nullable_type", - "assert_named_type", - "get_nullable_type", - "get_named_type", - "resolve_thunk", - "validate_schema", - "assert_valid_schema", - "assert_name", - "assert_enum_value_name", - "GraphQLType", - "GraphQLInputType", - "GraphQLOutputType", - "GraphQLLeafType", - "GraphQLCompositeType", + "IDLE", + "REMOVE", + "SKIP", + "ASTValidationRule", + "ArgumentNode", + "BooleanValueNode", + "BreakingChange", + "BreakingChangeType", + "ConstArgumentNode", + "ConstDirectiveNode", + "ConstListValueNode", + "ConstObjectFieldNode", + "ConstObjectValueNode", + "ConstValueNode", + "DangerousChange", + "DangerousChangeType", + "DefinitionNode", + "DirectiveDefinitionNode", + "DirectiveLocation", + "DirectiveNode", + "DocumentNode", + "EnumTypeDefinitionNode", + "EnumTypeExtensionNode", + "EnumValueDefinitionNode", + "EnumValueNode", + "ErrorBoundaryNode", + "ExecutableDefinitionNode", + "ExecutableDefinitionsRule", + "ExecutionContext", + "ExecutionResult", + "ExperimentalIncrementalExecutionResults", + "FieldDefinitionNode", + "FieldNode", + "FieldsOnCorrectTypeRule", + "FloatValueNode", + "FormattedExecutionResult", + "FormattedIncrementalDeferResult", + "FormattedIncrementalResult", + "FormattedIncrementalStreamResult", + "FormattedInitialIncrementalExecutionResult", + "FormattedSubsequentIncrementalExecutionResult", + "FragmentDefinitionNode", + "FragmentSpreadNode", + "FragmentsOnCompositeTypesRule", "GraphQLAbstractType", - "GraphQLWrappingType", - "GraphQLNullableType", - "GraphQLNullableInputType", - "GraphQLNullableOutputType", - "GraphQLNamedType", - "GraphQLNamedInputType", - "GraphQLNamedOutputType", - "Thunk", - "ThunkCollection", - "ThunkMapping", "GraphQLArgument", + "GraphQLArgumentKwargs", "GraphQLArgumentMap", + "GraphQLBoolean", + "GraphQLCompositeType", + "GraphQLDeferDirective", + "GraphQLDeprecatedDirective", + "GraphQLDirective", + "GraphQLDirectiveKwargs", + "GraphQLEnumType", + "GraphQLEnumTypeKwargs", "GraphQLEnumValue", + "GraphQLEnumValueKwargs", "GraphQLEnumValueMap", + "GraphQLError", + "GraphQLErrorExtensions", "GraphQLField", + "GraphQLFieldKwargs", "GraphQLFieldMap", "GraphQLFieldResolver", + "GraphQLFloat", + "GraphQLFormattedError", + "GraphQLID", + "GraphQLIncludeDirective", "GraphQLInputField", + "GraphQLInputFieldKwargs", "GraphQLInputFieldMap", "GraphQLInputFieldOutType", - "GraphQLScalarSerializer", - "GraphQLScalarValueParser", - "GraphQLScalarLiteralParser", - "GraphQLIsTypeOfFn", - "GraphQLResolveInfo", - "ResponsePath", - "GraphQLTypeResolver", - "GraphQLArgumentKwargs", - "GraphQLDirectiveKwargs", - "GraphQLEnumTypeKwargs", - "GraphQLEnumValueKwargs", - "GraphQLFieldKwargs", - "GraphQLInputFieldKwargs", + "GraphQLInputObjectType", "GraphQLInputObjectTypeKwargs", + "GraphQLInputType", + "GraphQLInt", + "GraphQLInterfaceType", "GraphQLInterfaceTypeKwargs", + "GraphQLIsTypeOfFn", + "GraphQLLeafType", + "GraphQLList", + "GraphQLNamedInputType", + "GraphQLNamedOutputType", + "GraphQLNamedType", "GraphQLNamedTypeKwargs", + "GraphQLNonNull", + "GraphQLNullableInputType", + "GraphQLNullableOutputType", + "GraphQLNullableType", + "GraphQLObjectType", "GraphQLObjectTypeKwargs", + "GraphQLOneOfDirective", + "GraphQLOutputType", + "GraphQLResolveInfo", + "GraphQLScalarLiteralParser", + "GraphQLScalarSerializer", + "GraphQLScalarType", "GraphQLScalarTypeKwargs", + "GraphQLScalarValueParser", + "GraphQLSchema", "GraphQLSchemaKwargs", - "GraphQLUnionTypeKwargs", - "Source", - "get_location", - "print_location", - "print_source_location", - "Lexer", - "TokenKind", - "parse", - "parse_value", - "parse_const_value", - "parse_type", - "print_ast", - "visit", - "ParallelVisitor", - "TypeInfoVisitor", - "Visitor", - "VisitorAction", - "VisitorKeyMap", - "BREAK", - "SKIP", - "REMOVE", - "IDLE", - "DirectiveLocation", - "is_definition_node", - "is_executable_definition_node", - "is_nullability_assertion_node", - "is_selection_node", - "is_value_node", - "is_const_value_node", - "is_type_node", - "is_type_system_definition_node", - "is_type_definition_node", - "is_type_system_extension_node", - "is_type_extension_node", - "SourceLocation", - "Location", - "Token", - "Node", - "NameNode", - "DocumentNode", - "DefinitionNode", - "ExecutableDefinitionNode", - "OperationDefinitionNode", - "OperationType", - "VariableDefinitionNode", - "VariableNode", - "SelectionSetNode", - "SelectionNode", - "FieldNode", - "ArgumentNode", - "NullabilityAssertionNode", - "NonNullAssertionNode", - "ErrorBoundaryNode", - "ListNullabilityOperatorNode", - "ConstArgumentNode", - "FragmentSpreadNode", - "InlineFragmentNode", - "FragmentDefinitionNode", - "ValueNode", - "ConstValueNode", - "IntValueNode", - "FloatValueNode", - "StringValueNode", - "BooleanValueNode", - "NullValueNode", - "EnumValueNode", - "ListValueNode", - "ConstListValueNode", - "ObjectValueNode", - "ConstObjectValueNode", - "ObjectFieldNode", - "ConstObjectFieldNode", - "DirectiveNode", - "ConstDirectiveNode", - "TypeNode", - "NamedTypeNode", - "ListTypeNode", - "NonNullTypeNode", - "TypeSystemDefinitionNode", - "SchemaDefinitionNode", - "OperationTypeDefinitionNode", - "TypeDefinitionNode", - "ScalarTypeDefinitionNode", - "ObjectTypeDefinitionNode", - "FieldDefinitionNode", - "InputValueDefinitionNode", - "InterfaceTypeDefinitionNode", - "UnionTypeDefinitionNode", - "EnumTypeDefinitionNode", - "EnumValueDefinitionNode", - "InputObjectTypeDefinitionNode", - "DirectiveDefinitionNode", - "TypeSystemExtensionNode", - "SchemaExtensionNode", - "TypeExtensionNode", - "ScalarTypeExtensionNode", - "ObjectTypeExtensionNode", - "InterfaceTypeExtensionNode", - "UnionTypeExtensionNode", - "EnumTypeExtensionNode", - "InputObjectTypeExtensionNode", - "execute", - "execute_sync", - "default_field_resolver", - "default_type_resolver", - "get_argument_values", - "get_directive_values", - "get_variable_values", - "ExecutionContext", - "ExecutionResult", - "ExperimentalIncrementalExecutionResults", - "InitialIncrementalExecutionResult", - "SubsequentIncrementalExecutionResult", + "GraphQLSkipDirective", + "GraphQLSpecifiedByDirective", + "GraphQLStreamDirective", + "GraphQLString", + "GraphQLSyntaxError", + "GraphQLType", + "GraphQLTypeResolver", + "GraphQLUnionType", + "GraphQLUnionTypeKwargs", + "GraphQLWrappingType", "IncrementalDeferResult", - "IncrementalStreamResult", "IncrementalResult", - "FormattedExecutionResult", - "FormattedInitialIncrementalExecutionResult", - "FormattedSubsequentIncrementalExecutionResult", - "FormattedIncrementalDeferResult", - "FormattedIncrementalStreamResult", - "FormattedIncrementalResult", - "Middleware", - "MiddlewareManager", - "subscribe", - "create_source_event_stream", - "map_async_iterable", - "validate", - "ValidationContext", - "ValidationRule", - "ASTValidationRule", - "SDLValidationRule", - "specified_rules", - "ExecutableDefinitionsRule", - "FieldsOnCorrectTypeRule", - "FragmentsOnCompositeTypesRule", + "IncrementalStreamResult", + "InitialIncrementalExecutionResult", + "InlineFragmentNode", + "InputObjectTypeDefinitionNode", + "InputObjectTypeExtensionNode", + "InputValueDefinitionNode", + "IntValueNode", + "InterfaceTypeDefinitionNode", + "InterfaceTypeExtensionNode", + "IntrospectionQuery", "KnownArgumentNamesRule", "KnownDirectivesRule", "KnownFragmentNamesRule", "KnownTypeNamesRule", + "Lexer", + "ListNullabilityOperatorNode", + "ListTypeNode", + "ListValueNode", + "Location", "LoneAnonymousOperationRule", + "LoneSchemaDefinitionRule", + "Middleware", + "MiddlewareManager", + "NameNode", + "NamedTypeNode", + "NoDeprecatedCustomRule", "NoFragmentCyclesRule", + "NoSchemaIntrospectionCustomRule", "NoUndefinedVariablesRule", "NoUnusedFragmentsRule", "NoUnusedVariablesRule", + "Node", + "NonNullAssertionNode", + "NonNullTypeNode", + "NullValueNode", + "NullabilityAssertionNode", + "ObjectFieldNode", + "ObjectTypeDefinitionNode", + "ObjectTypeExtensionNode", + "ObjectValueNode", + "OperationDefinitionNode", + "OperationType", + "OperationTypeDefinitionNode", "OverlappingFieldsCanBeMergedRule", + "ParallelVisitor", "PossibleFragmentSpreadsRule", + "PossibleTypeExtensionsRule", "ProvidedRequiredArgumentsRule", + "ResponsePath", + "SDLValidationRule", "ScalarLeafsRule", + "ScalarTypeDefinitionNode", + "ScalarTypeExtensionNode", + "SchemaDefinitionNode", + "SchemaExtensionNode", + "SchemaMetaFieldDef", + "SelectionNode", + "SelectionSetNode", "SingleFieldSubscriptionsRule", + "Source", + "SourceLocation", + "StringValueNode", + "SubsequentIncrementalExecutionResult", + "Thunk", + "ThunkCollection", + "ThunkMapping", + "Token", + "TokenKind", + "TypeDefinitionNode", + "TypeExtensionNode", + "TypeInfo", + "TypeInfoVisitor", + "TypeKind", + "TypeMetaFieldDef", + "TypeNameMetaFieldDef", + "TypeNode", + "TypeSystemDefinitionNode", + "TypeSystemExtensionNode", + "Undefined", + "UndefinedType", + "UnionTypeDefinitionNode", + "UnionTypeExtensionNode", + "UniqueArgumentDefinitionNamesRule", "UniqueArgumentNamesRule", + "UniqueDirectiveNamesRule", "UniqueDirectivesPerLocationRule", + "UniqueEnumValueNamesRule", + "UniqueFieldDefinitionNamesRule", "UniqueFragmentNamesRule", "UniqueInputFieldNamesRule", "UniqueOperationNamesRule", + "UniqueOperationTypesRule", + "UniqueTypeNamesRule", "UniqueVariableNamesRule", + "ValidationContext", + "ValidationRule", + "ValueNode", "ValuesOfCorrectTypeRule", + "VariableDefinitionNode", + "VariableNode", "VariablesAreInputTypesRule", "VariablesInAllowedPositionRule", - "LoneSchemaDefinitionRule", - "UniqueOperationTypesRule", - "UniqueTypeNamesRule", - "UniqueEnumValueNamesRule", - "UniqueFieldDefinitionNamesRule", - "UniqueArgumentDefinitionNamesRule", - "UniqueDirectiveNamesRule", - "PossibleTypeExtensionsRule", - "NoDeprecatedCustomRule", - "NoSchemaIntrospectionCustomRule", - "GraphQLError", - "GraphQLErrorExtensions", - "GraphQLFormattedError", - "GraphQLSyntaxError", - "located_error", - "get_introspection_query", - "IntrospectionQuery", - "get_operation_ast", - "introspection_from_schema", - "build_client_schema", + "Visitor", + "VisitorAction", + "VisitorKeyMap", + "assert_abstract_type", + "assert_composite_type", + "assert_directive", + "assert_enum_type", + "assert_enum_value_name", + "assert_input_object_type", + "assert_input_type", + "assert_interface_type", + "assert_leaf_type", + "assert_list_type", + "assert_name", + "assert_named_type", + "assert_non_null_type", + "assert_nullable_type", + "assert_object_type", + "assert_output_type", + "assert_scalar_type", + "assert_schema", + "assert_type", + "assert_union_type", + "assert_valid_schema", + "assert_wrapping_type", + "ast_from_value", + "ast_to_dict", "build_ast_schema", + "build_client_schema", "build_schema", + "coerce_input_value", + "concat_ast", + "create_source_event_stream", + "default_field_resolver", + "default_type_resolver", + "do_types_overlap", + "execute", + "execute_sync", "extend_schema", + "find_breaking_changes", + "find_dangerous_changes", + "get_argument_values", + "get_directive_values", + "get_introspection_query", + "get_location", + "get_named_type", + "get_nullable_type", + "get_operation_ast", + "get_variable_values", + "graphql", + "graphql_sync", + "introspection_from_schema", + "introspection_types", + "is_abstract_type", + "is_composite_type", + "is_const_value_node", + "is_definition_node", + "is_directive", + "is_enum_type", + "is_equal_type", + "is_executable_definition_node", + "is_input_object_type", + "is_input_type", + "is_interface_type", + "is_introspection_type", + "is_leaf_type", + "is_list_type", + "is_named_type", + "is_non_null_type", + "is_nullability_assertion_node", + "is_nullable_type", + "is_object_type", + "is_output_type", + "is_required_argument", + "is_required_input_field", + "is_scalar_type", + "is_schema", + "is_selection_node", + "is_specified_directive", + "is_specified_scalar_type", + "is_type", + "is_type_definition_node", + "is_type_extension_node", + "is_type_node", + "is_type_sub_type_of", + "is_type_system_definition_node", + "is_type_system_extension_node", + "is_union_type", + "is_value_node", + "is_wrapping_type", "lexicographic_sort_schema", - "print_schema", - "print_type", + "located_error", + "map_async_iterable", + "parse", + "parse_const_value", + "parse_type", + "parse_value", + "print_ast", "print_directive", "print_introspection_schema", + "print_location", + "print_schema", + "print_source_location", + "print_type", + "resolve_thunk", + "separate_operations", + "specified_directives", + "specified_rules", + "specified_scalar_types", + "strip_ignored_characters", + "subscribe", "type_from_ast", + "validate", + "validate_schema", "value_from_ast", "value_from_ast_untyped", - "ast_from_value", - "ast_to_dict", - "TypeInfo", - "coerce_input_value", - "concat_ast", - "separate_operations", - "strip_ignored_characters", - "is_equal_type", - "is_type_sub_type_of", - "do_types_overlap", - "find_breaking_changes", - "find_dangerous_changes", - "BreakingChange", - "BreakingChangeType", - "DangerousChange", - "DangerousChangeType", - "Undefined", - "UndefinedType", + "version", + "version_info", + "version_info_js", + "version_js", + "visit", ] diff --git a/src/graphql/error/graphql_error.py b/src/graphql/error/graphql_error.py index ff128748..8123a713 100644 --- a/src/graphql/error/graphql_error.py +++ b/src/graphql/error/graphql_error.py @@ -108,14 +108,14 @@ class GraphQLError(Exception): """Extension fields to add to the formatted error""" __slots__ = ( + "extensions", + "locations", "message", "nodes", - "source", - "positions", - "locations", - "path", "original_error", - "extensions", + "path", + "positions", + "source", ) __hash__ = Exception.__hash__ diff --git a/src/graphql/execution/__init__.py b/src/graphql/execution/__init__.py index aec85be1..375ec400 100644 --- a/src/graphql/execution/__init__.py +++ b/src/graphql/execution/__init__.py @@ -14,21 +14,21 @@ default_type_resolver, subscribe, ExecutionContext, - ExecutionResult, - ExperimentalIncrementalExecutionResults, - InitialIncrementalExecutionResult, - FormattedExecutionResult, - FormattedInitialIncrementalExecutionResult, Middleware, ) from .incremental_publisher import ( + ExecutionResult, + ExperimentalIncrementalExecutionResults, FormattedSubsequentIncrementalExecutionResult, FormattedIncrementalDeferResult, FormattedIncrementalResult, FormattedIncrementalStreamResult, + FormattedExecutionResult, + FormattedInitialIncrementalExecutionResult, IncrementalDeferResult, IncrementalResult, IncrementalStreamResult, + InitialIncrementalExecutionResult, SubsequentIncrementalExecutionResult, ) from .async_iterables import map_async_iterable @@ -37,31 +37,31 @@ __all__ = [ "ASYNC_DELAY", - "create_source_event_stream", - "execute", - "experimental_execute_incrementally", - "execute_sync", - "default_field_resolver", - "default_type_resolver", - "subscribe", "ExecutionContext", "ExecutionResult", "ExperimentalIncrementalExecutionResults", - "InitialIncrementalExecutionResult", - "SubsequentIncrementalExecutionResult", - "IncrementalDeferResult", - "IncrementalStreamResult", - "IncrementalResult", "FormattedExecutionResult", - "FormattedInitialIncrementalExecutionResult", - "FormattedSubsequentIncrementalExecutionResult", "FormattedIncrementalDeferResult", - "FormattedIncrementalStreamResult", "FormattedIncrementalResult", - "map_async_iterable", + "FormattedIncrementalStreamResult", + "FormattedInitialIncrementalExecutionResult", + "FormattedSubsequentIncrementalExecutionResult", + "IncrementalDeferResult", + "IncrementalResult", + "IncrementalStreamResult", + "InitialIncrementalExecutionResult", "Middleware", "MiddlewareManager", + "SubsequentIncrementalExecutionResult", + "create_source_event_stream", + "default_field_resolver", + "default_type_resolver", + "execute", + "execute_sync", + "experimental_execute_incrementally", "get_argument_values", "get_directive_values", "get_variable_values", + "map_async_iterable", + "subscribe", ] diff --git a/src/graphql/execution/async_iterables.py b/src/graphql/execution/async_iterables.py index 747a515d..b8faad88 100644 --- a/src/graphql/execution/async_iterables.py +++ b/src/graphql/execution/async_iterables.py @@ -2,7 +2,7 @@ from __future__ import annotations -from contextlib import AbstractAsyncContextManager +from contextlib import AbstractAsyncContextManager, suppress from typing import ( AsyncGenerator, AsyncIterable, @@ -20,6 +20,8 @@ AsyncIterableOrGenerator = Union[AsyncGenerator[T, None], AsyncIterable[T]] +suppress_exceptions = suppress(Exception) + class aclosing(AbstractAsyncContextManager, Generic[T]): # noqa: N801 """Async context manager for safely finalizing an async iterator or generator. @@ -40,7 +42,8 @@ async def __aexit__(self, *_exc_info: object) -> None: except AttributeError: pass # do not complain if the iterator has no aclose() method else: - await aclose() + with suppress_exceptions: # or if the aclose() method fails + await aclose() async def map_async_iterable( diff --git a/src/graphql/execution/build_field_plan.py b/src/graphql/execution/build_field_plan.py new file mode 100644 index 00000000..a8937a0d --- /dev/null +++ b/src/graphql/execution/build_field_plan.py @@ -0,0 +1,135 @@ +"""Build field plan""" + +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING, Dict, NamedTuple + +from ..pyutils import RefMap, RefSet +from .collect_fields import DeferUsage, FieldDetails + +if TYPE_CHECKING: + from ..language import FieldNode + +try: + from typing import TypeAlias +except ImportError: # Python < 3.10 + from typing_extensions import TypeAlias + +__all__ = [ + "DeferUsageSet", + "FieldGroup", + "FieldPlan", + "GroupedFieldSet", + "NewGroupedFieldSetDetails", + "build_field_plan", +] + + +DeferUsageSet: TypeAlias = RefSet[DeferUsage] + + +class FieldGroup(NamedTuple): + """A group of fields with defer usages.""" + + fields: list[FieldDetails] + defer_usages: DeferUsageSet | None = None + + def to_nodes(self) -> list[FieldNode]: + """Return the field nodes in this group.""" + return [field_details.node for field_details in self.fields] + + +if sys.version_info < (3, 9): + GroupedFieldSet: TypeAlias = Dict[str, FieldGroup] +else: # Python >= 3.9 + GroupedFieldSet: TypeAlias = dict[str, FieldGroup] + + +class NewGroupedFieldSetDetails(NamedTuple): + """Details of a new grouped field set.""" + + grouped_field_set: GroupedFieldSet + should_initiate_defer: bool + + +class FieldPlan(NamedTuple): + """A plan for executing fields.""" + + grouped_field_set: GroupedFieldSet + new_grouped_field_set_details_map: RefMap[DeferUsageSet, NewGroupedFieldSetDetails] + + +def build_field_plan( + fields: dict[str, list[FieldDetails]], + parent_defer_usages: DeferUsageSet | None = None, +) -> FieldPlan: + """Build a plan for executing fields.""" + if parent_defer_usages is None: + parent_defer_usages = RefSet() + + grouped_field_set: GroupedFieldSet = {} + + new_grouped_field_set_details_map: RefMap[ + DeferUsageSet, NewGroupedFieldSetDetails + ] = RefMap() + + map_: dict[str, tuple[DeferUsageSet, list[FieldDetails]]] = {} + + for response_key, field_details_list in fields.items(): + defer_usage_set: RefSet[DeferUsage] = RefSet() + in_original_result = False + for field_details in field_details_list: + defer_usage = field_details.defer_usage + if defer_usage is None: + in_original_result = True + continue + defer_usage_set.add(defer_usage) + if in_original_result: + defer_usage_set.clear() + else: + defer_usage_set -= { + defer_usage + for defer_usage in defer_usage_set + if any( + ancestor in defer_usage_set for ancestor in defer_usage.ancestors + ) + } + map_[response_key] = (defer_usage_set, field_details_list) + + for response_key, [defer_usage_set, field_details_list] in map_.items(): + if defer_usage_set == parent_defer_usages: + field_group = grouped_field_set.get(response_key) + if field_group is None: # pragma: no cover else + field_group = FieldGroup([], defer_usage_set) + grouped_field_set[response_key] = field_group + field_group.fields.extend(field_details_list) + continue + + for ( + new_grouped_field_set_defer_usage_set, + new_grouped_field_set_details, + ) in new_grouped_field_set_details_map.items(): + if new_grouped_field_set_defer_usage_set == defer_usage_set: + new_grouped_field_set = new_grouped_field_set_details.grouped_field_set + break + else: + new_grouped_field_set = {} + new_grouped_field_set_details = NewGroupedFieldSetDetails( + new_grouped_field_set, + any( + defer_usage not in parent_defer_usages + for defer_usage in defer_usage_set + ), + ) + new_grouped_field_set_details_map[defer_usage_set] = ( + new_grouped_field_set_details + ) + + field_group = new_grouped_field_set.get(response_key) + if field_group is None: # pragma: no cover else + field_group = FieldGroup([], defer_usage_set) + new_grouped_field_set[response_key] = field_group + field_group.fields.extend(field_details_list) + + return FieldPlan(grouped_field_set, new_grouped_field_set_details_map) diff --git a/src/graphql/execution/collect_fields.py b/src/graphql/execution/collect_fields.py index 5cb5a723..0c2ae348 100644 --- a/src/graphql/execution/collect_fields.py +++ b/src/graphql/execution/collect_fields.py @@ -2,9 +2,8 @@ from __future__ import annotations -import sys from collections import defaultdict -from typing import Any, Dict, List, NamedTuple +from typing import Any, NamedTuple from ..language import ( FieldNode, @@ -26,40 +25,56 @@ from ..utilities.type_from_ast import type_from_ast from .values import get_directive_values -try: - from typing import TypeAlias -except ImportError: # Python < 3.10 - from typing_extensions import TypeAlias - - __all__ = [ + "CollectFieldsContext", + "CollectedFields", + "DeferUsage", + "FieldDetails", "collect_fields", "collect_subfields", - "FieldGroup", - "FieldsAndPatches", - "GroupedFieldSet", ] -if sys.version_info < (3, 9): - FieldGroup: TypeAlias = List[FieldNode] - GroupedFieldSet = Dict[str, FieldGroup] -else: # Python >= 3.9 - FieldGroup: TypeAlias = list[FieldNode] - GroupedFieldSet = dict[str, FieldGroup] - -class PatchFields(NamedTuple): - """Optionally labelled set of fields to be used as a patch.""" +class DeferUsage(NamedTuple): + """An optionally labelled linked list of defer usages.""" label: str | None - grouped_field_set: GroupedFieldSet + parent_defer_usage: DeferUsage | None + + @property + def ancestors(self) -> list[DeferUsage]: + """Get the ancestors of this defer usage.""" + ancestors: list[DeferUsage] = [] + parent_defer_usage = self.parent_defer_usage + while parent_defer_usage is not None: + ancestors.append(parent_defer_usage) + parent_defer_usage = parent_defer_usage.parent_defer_usage + return ancestors[::-1] + + +class FieldDetails(NamedTuple): + """A field node and its defer usage.""" + node: FieldNode + defer_usage: DeferUsage | None -class FieldsAndPatches(NamedTuple): - """Tuple of collected fields and patches to be applied.""" - grouped_field_set: GroupedFieldSet - patches: list[PatchFields] +class CollectFieldsContext(NamedTuple): + """Context for collecting fields.""" + + schema: GraphQLSchema + fragments: dict[str, FragmentDefinitionNode] + variable_values: dict[str, Any] + operation: OperationDefinitionNode + runtime_type: GraphQLObjectType + visited_fragment_names: set[str] + + +class CollectedFields(NamedTuple): + """Collected fields with new defer usages.""" + + fields: dict[str, list[FieldDetails]] + new_defer_usages: list[DeferUsage] def collect_fields( @@ -68,7 +83,7 @@ def collect_fields( variable_values: dict[str, Any], runtime_type: GraphQLObjectType, operation: OperationDefinitionNode, -) -> FieldsAndPatches: +) -> CollectedFields: """Collect fields. Given a selection_set, collects all the fields and returns them. @@ -79,20 +94,21 @@ def collect_fields( For internal use only. """ - grouped_field_set: dict[str, list[FieldNode]] = defaultdict(list) - patches: list[PatchFields] = [] - collect_fields_impl( + grouped_field_set: dict[str, list[FieldDetails]] = defaultdict(list) + new_defer_usages: list[DeferUsage] = [] + context = CollectFieldsContext( schema, fragments, variable_values, operation, runtime_type, - operation.selection_set, - grouped_field_set, - patches, set(), ) - return FieldsAndPatches(grouped_field_set, patches) + + collect_fields_impl( + context, operation.selection_set, grouped_field_set, new_defer_usages + ) + return CollectedFields(grouped_field_set, new_defer_usages) def collect_subfields( @@ -101,8 +117,8 @@ def collect_subfields( variable_values: dict[str, Any], operation: OperationDefinitionNode, return_type: GraphQLObjectType, - field_group: FieldGroup, -) -> FieldsAndPatches: + field_details: list[FieldDetails], +) -> CollectedFields: """Collect subfields. Given a list of field nodes, collects all the subfields of the passed in fields, @@ -114,138 +130,126 @@ def collect_subfields( For internal use only. """ - sub_grouped_field_set: dict[str, list[FieldNode]] = defaultdict(list) - visited_fragment_names: set[str] = set() - - sub_patches: list[PatchFields] = [] - sub_fields_and_patches = FieldsAndPatches(sub_grouped_field_set, sub_patches) + context = CollectFieldsContext( + schema, + fragments, + variable_values, + operation, + return_type, + set(), + ) + sub_grouped_field_set: dict[str, list[FieldDetails]] = defaultdict(list) + new_defer_usages: list[DeferUsage] = [] - for node in field_group: + for field_detail in field_details: + node = field_detail.node if node.selection_set: collect_fields_impl( - schema, - fragments, - variable_values, - operation, - return_type, + context, node.selection_set, sub_grouped_field_set, - sub_patches, - visited_fragment_names, + new_defer_usages, + field_detail.defer_usage, ) - return sub_fields_and_patches + + return CollectedFields(sub_grouped_field_set, new_defer_usages) def collect_fields_impl( - schema: GraphQLSchema, - fragments: dict[str, FragmentDefinitionNode], - variable_values: dict[str, Any], - operation: OperationDefinitionNode, - runtime_type: GraphQLObjectType, + context: CollectFieldsContext, selection_set: SelectionSetNode, - grouped_field_set: dict[str, list[FieldNode]], - patches: list[PatchFields], - visited_fragment_names: set[str], + grouped_field_set: dict[str, list[FieldDetails]], + new_defer_usages: list[DeferUsage], + defer_usage: DeferUsage | None = None, ) -> None: """Collect fields (internal implementation).""" - patch_fields: dict[str, list[FieldNode]] + ( + schema, + fragments, + variable_values, + operation, + runtime_type, + visited_fragment_names, + ) = context for selection in selection_set.selections: if isinstance(selection, FieldNode): if not should_include_node(variable_values, selection): continue - grouped_field_set[get_field_entry_key(selection)].append(selection) + key = get_field_entry_key(selection) + grouped_field_set[key].append(FieldDetails(selection, defer_usage)) elif isinstance(selection, InlineFragmentNode): if not should_include_node( variable_values, selection ) or not does_fragment_condition_match(schema, selection, runtime_type): continue - defer = get_defer_values(operation, variable_values, selection) - if defer: - patch_fields = defaultdict(list) + new_defer_usage = get_defer_usage( + operation, variable_values, selection, defer_usage + ) + + if new_defer_usage is None: collect_fields_impl( - schema, - fragments, - variable_values, - operation, - runtime_type, + context, selection.selection_set, - patch_fields, - patches, - visited_fragment_names, + grouped_field_set, + new_defer_usages, + defer_usage, ) - patches.append(PatchFields(defer.label, patch_fields)) else: + new_defer_usages.append(new_defer_usage) collect_fields_impl( - schema, - fragments, - variable_values, - operation, - runtime_type, + context, selection.selection_set, grouped_field_set, - patches, - visited_fragment_names, + new_defer_usages, + new_defer_usage, ) elif isinstance(selection, FragmentSpreadNode): # pragma: no cover else frag_name = selection.name.value - if not should_include_node(variable_values, selection): - continue + new_defer_usage = get_defer_usage( + operation, variable_values, selection, defer_usage + ) - defer = get_defer_values(operation, variable_values, selection) - if frag_name in visited_fragment_names and not defer: + if new_defer_usage is None and ( + frag_name in visited_fragment_names + or not should_include_node(variable_values, selection) + ): continue fragment = fragments.get(frag_name) - if not fragment or not does_fragment_condition_match( + if fragment is None or not does_fragment_condition_match( schema, fragment, runtime_type ): continue - if not defer: + if new_defer_usage is None: visited_fragment_names.add(frag_name) - - if defer: - patch_fields = defaultdict(list) collect_fields_impl( - schema, - fragments, - variable_values, - operation, - runtime_type, + context, fragment.selection_set, - patch_fields, - patches, - visited_fragment_names, + grouped_field_set, + new_defer_usages, + defer_usage, ) - patches.append(PatchFields(defer.label, patch_fields)) else: + new_defer_usages.append(new_defer_usage) collect_fields_impl( - schema, - fragments, - variable_values, - operation, - runtime_type, + context, fragment.selection_set, grouped_field_set, - patches, - visited_fragment_names, + new_defer_usages, + new_defer_usage, ) -class DeferValues(NamedTuple): - """Values of an active defer directive.""" - - label: str | None - - -def get_defer_values( +def get_defer_usage( operation: OperationDefinitionNode, variable_values: dict[str, Any], node: FragmentSpreadNode | InlineFragmentNode, -) -> DeferValues | None: + parent_defer_usage: DeferUsage | None, +) -> DeferUsage | None: """Get values of defer directive if active. Returns an object containing the `@defer` arguments if a field should be @@ -264,7 +268,7 @@ def get_defer_values( ) raise TypeError(msg) - return DeferValues(defer.get("label")) + return DeferUsage(defer.get("label"), parent_defer_usage) def should_include_node( diff --git a/src/graphql/execution/execute.py b/src/graphql/execution/execute.py index e370bcc1..c8ae17b8 100644 --- a/src/graphql/execution/execute.py +++ b/src/graphql/execution/execute.py @@ -2,9 +2,14 @@ from __future__ import annotations -from asyncio import ensure_future, gather, shield, wait_for -from collections.abc import Mapping +from asyncio import ( + CancelledError, + ensure_future, + shield, + wait_for, +) from contextlib import suppress +from copy import copy from typing import ( Any, AsyncGenerator, @@ -13,8 +18,8 @@ Awaitable, Callable, Iterable, - Iterator, List, + Mapping, NamedTuple, Optional, Sequence, @@ -24,22 +29,18 @@ ) try: - from typing import TypedDict -except ImportError: # Python < 3.8 - from typing_extensions import TypedDict -try: - from typing import TypeAlias, TypeGuard + from typing import TypeAlias, TypeGuard # noqa: F401 except ImportError: # Python < 3.10 - from typing_extensions import TypeAlias, TypeGuard + from typing_extensions import TypeAlias try: # only needed for Python < 3.11 - # noinspection PyCompatibility - from asyncio.exceptions import TimeoutError + from asyncio.exceptions import TimeoutError # noqa: A004 except ImportError: # Python < 3.7 - from concurrent.futures import TimeoutError + from concurrent.futures import TimeoutError # noqa: A004 -from ..error import GraphQLError, GraphQLFormattedError, located_error +from ..error import GraphQLError, located_error from ..language import ( DocumentNode, + FieldNode, FragmentDefinitionNode, OperationDefinitionNode, OperationType, @@ -47,8 +48,10 @@ from ..pyutils import ( AwaitableOrValue, Path, + RefMap, Undefined, async_reduce, + gather_with_cancel, inspect, is_iterable, ) @@ -73,36 +76,42 @@ is_object_type, ) from .async_iterables import map_async_iterable -from .collect_fields import ( +from .build_field_plan import ( + DeferUsageSet, FieldGroup, - FieldsAndPatches, GroupedFieldSet, - collect_fields, - collect_subfields, + NewGroupedFieldSetDetails, + build_field_plan, ) +from .collect_fields import DeferUsage, FieldDetails, collect_fields, collect_subfields from .incremental_publisher import ( ASYNC_DELAY, - FormattedIncrementalResult, + DeferredFragmentRecord, + DeferredGroupedFieldSetRecord, + ExecutionResult, + ExperimentalIncrementalExecutionResults, IncrementalDataRecord, IncrementalPublisher, - IncrementalResult, + InitialResultRecord, StreamItemsRecord, - SubsequentIncrementalExecutionResult, + StreamRecord, ) from .middleware import MiddlewareManager from .values import get_argument_values, get_directive_values, get_variable_values try: # pragma: no cover - anext # noqa: B018 + anext # noqa: B018 # pyright: ignore except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator: AsyncIterator) -> Any: # noqa: A001 + async def anext(iterator: AsyncIterator) -> Any: """Return the next item from an async iterator.""" return await iterator.__anext__() __all__ = [ "ASYNC_DELAY", + "ExecutionContext", + "Middleware", "create_source_event_stream", "default_field_resolver", "default_type_resolver", @@ -110,13 +119,6 @@ async def anext(iterator: AsyncIterator) -> Any: # noqa: A001 "execute_sync", "experimental_execute_incrementally", "subscribe", - "ExecutionResult", - "ExecutionContext", - "ExperimentalIncrementalExecutionResults", - "FormattedExecutionResult", - "FormattedInitialIncrementalExecutionResult", - "InitialIncrementalExecutionResult", - "Middleware", ] suppress_exceptions = suppress(Exception) @@ -142,198 +144,23 @@ async def anext(iterator: AsyncIterator) -> Any: # noqa: A001 # 3) inline fragment "spreads" e.g. "...on Type { a }" -class FormattedExecutionResult(TypedDict, total=False): - """Formatted execution result""" - - data: dict[str, Any] | None - errors: list[GraphQLFormattedError] - extensions: dict[str, Any] - - -class ExecutionResult: - """The result of GraphQL execution. - - - ``data`` is the result of a successful execution of the query. - - ``errors`` is included when any errors occurred as a non-empty list. - - ``extensions`` is reserved for adding non-standard properties. - """ - - __slots__ = "data", "errors", "extensions" - - data: dict[str, Any] | None - errors: list[GraphQLError] | None - extensions: dict[str, Any] | None - - def __init__( - self, - data: dict[str, Any] | None = None, - errors: list[GraphQLError] | None = None, - extensions: dict[str, Any] | None = None, - ) -> None: - self.data = data - self.errors = errors - self.extensions = extensions - - def __repr__(self) -> str: - name = self.__class__.__name__ - ext = "" if self.extensions is None else f", extensions={self.extensions}" - return f"{name}(data={self.data!r}, errors={self.errors!r}{ext})" - - def __iter__(self) -> Iterator[Any]: - return iter((self.data, self.errors)) - - @property - def formatted(self) -> FormattedExecutionResult: - """Get execution result formatted according to the specification.""" - formatted: FormattedExecutionResult = {"data": self.data} - if self.errors is not None: - formatted["errors"] = [error.formatted for error in self.errors] - if self.extensions is not None: - formatted["extensions"] = self.extensions - return formatted - - def __eq__(self, other: object) -> bool: - if isinstance(other, dict): - if "extensions" not in other: - return other == {"data": self.data, "errors": self.errors} - return other == { - "data": self.data, - "errors": self.errors, - "extensions": self.extensions, - } - if isinstance(other, tuple): - if len(other) == 2: - return other == (self.data, self.errors) - return other == (self.data, self.errors, self.extensions) - return ( - isinstance(other, self.__class__) - and other.data == self.data - and other.errors == self.errors - and other.extensions == self.extensions - ) - - def __ne__(self, other: object) -> bool: - return not self == other - - -class FormattedInitialIncrementalExecutionResult(TypedDict, total=False): - """Formatted initial incremental execution result""" - - data: dict[str, Any] | None - errors: list[GraphQLFormattedError] - hasNext: bool - incremental: list[FormattedIncrementalResult] - extensions: dict[str, Any] - - -class InitialIncrementalExecutionResult: - """Initial incremental execution result. - - - ``has_next`` is True if a future payload is expected. - - ``incremental`` is a list of the results from defer/stream directives. - """ - - data: dict[str, Any] | None - errors: list[GraphQLError] | None - incremental: Sequence[IncrementalResult] | None - has_next: bool - extensions: dict[str, Any] | None - - __slots__ = "data", "errors", "has_next", "incremental", "extensions" - - def __init__( - self, - data: dict[str, Any] | None = None, - errors: list[GraphQLError] | None = None, - incremental: Sequence[IncrementalResult] | None = None, - has_next: bool = False, - extensions: dict[str, Any] | None = None, - ) -> None: - self.data = data - self.errors = errors - self.incremental = incremental - self.has_next = has_next - self.extensions = extensions - - def __repr__(self) -> str: - name = self.__class__.__name__ - args: list[str] = [f"data={self.data!r}, errors={self.errors!r}"] - if self.incremental: - args.append(f"incremental[{len(self.incremental)}]") - if self.has_next: - args.append("has_next") - if self.extensions: - args.append(f"extensions={self.extensions}") - return f"{name}({', '.join(args)})" - - @property - def formatted(self) -> FormattedInitialIncrementalExecutionResult: - """Get execution result formatted according to the specification.""" - formatted: FormattedInitialIncrementalExecutionResult = {"data": self.data} - if self.errors is not None: - formatted["errors"] = [error.formatted for error in self.errors] - if self.incremental: - formatted["incremental"] = [result.formatted for result in self.incremental] - formatted["hasNext"] = self.has_next - if self.extensions is not None: - formatted["extensions"] = self.extensions - return formatted - - def __eq__(self, other: object) -> bool: - if isinstance(other, dict): - return ( - other.get("data") == self.data - and other.get("errors") == self.errors - and ( - "incremental" not in other - or other["incremental"] == self.incremental - ) - and ("hasNext" not in other or other["hasNext"] == self.has_next) - and ( - "extensions" not in other or other["extensions"] == self.extensions - ) - ) - if isinstance(other, tuple): - size = len(other) - return ( - 1 < size < 6 - and ( - self.data, - self.errors, - self.incremental, - self.has_next, - self.extensions, - )[:size] - == other - ) - return ( - isinstance(other, self.__class__) - and other.data == self.data - and other.errors == self.errors - and other.incremental == self.incremental - and other.has_next == self.has_next - and other.extensions == self.extensions - ) - - def __ne__(self, other: object) -> bool: - return not self == other +Middleware: TypeAlias = Optional[Union[Tuple, List, MiddlewareManager]] -class StreamArguments(NamedTuple): - """Arguments of the stream directive""" +class StreamUsage(NamedTuple): + """Stream directive usage information""" - initial_count: int label: str | None + initial_count: int + field_group: FieldGroup -class ExperimentalIncrementalExecutionResults(NamedTuple): - """Execution results when retrieved incrementally.""" - - initial_result: InitialIncrementalExecutionResult - subsequent_results: AsyncGenerator[SubsequentIncrementalExecutionResult, None] - +class SubFieldPlan(NamedTuple): + """A plan for executing fields with defer usages.""" -Middleware: TypeAlias = Optional[Union[Tuple, List, MiddlewareManager]] + grouped_field_set: GroupedFieldSet + new_grouped_field_set_details_map: RefMap[DeferUsageSet, NewGroupedFieldSetDetails] + new_defer_usages: list[DeferUsage] class ExecutionContext: @@ -352,13 +179,10 @@ class ExecutionContext: field_resolver: GraphQLFieldResolver type_resolver: GraphQLTypeResolver subscribe_field_resolver: GraphQLFieldResolver - errors: list[GraphQLError] incremental_publisher: IncrementalPublisher middleware_manager: MiddlewareManager | None - is_awaitable: Callable[[Any], TypeGuard[Awaitable]] = staticmethod( - default_is_awaitable - ) + is_awaitable: Callable[[Any], bool] = staticmethod(default_is_awaitable) def __init__( self, @@ -371,7 +195,6 @@ def __init__( field_resolver: GraphQLFieldResolver, type_resolver: GraphQLTypeResolver, subscribe_field_resolver: GraphQLFieldResolver, - errors: list[GraphQLError], incremental_publisher: IncrementalPublisher, middleware_manager: MiddlewareManager | None, is_awaitable: Callable[[Any], bool] | None, @@ -385,14 +208,14 @@ def __init__( self.field_resolver = field_resolver self.type_resolver = type_resolver self.subscribe_field_resolver = subscribe_field_resolver - self.errors = errors self.incremental_publisher = incremental_publisher self.middleware_manager = middleware_manager if is_awaitable: self.is_awaitable = is_awaitable self._canceled_iterators: set[AsyncIterator] = set() - self._subfields_cache: dict[tuple, FieldsAndPatches] = {} + self._sub_field_plan_cache: dict[tuple, SubFieldPlan] = {} self._tasks: set[Awaitable] = set() + self._stream_usages: RefMap[FieldGroup, StreamUsage] = RefMap() @classmethod def build( @@ -408,6 +231,7 @@ def build( subscribe_field_resolver: GraphQLFieldResolver | None = None, middleware: Middleware | None = None, is_awaitable: Callable[[Any], bool] | None = None, + **custom_args: Any, ) -> list[GraphQLError] | ExecutionContext: """Build an execution context @@ -478,57 +302,27 @@ def build( field_resolver or default_field_resolver, type_resolver or default_type_resolver, subscribe_field_resolver or default_field_resolver, - [], IncrementalPublisher(), middleware_manager, is_awaitable, + **custom_args, ) - @staticmethod - def build_response( - data: dict[str, Any] | None, errors: list[GraphQLError] - ) -> ExecutionResult: - """Build response. - - Given a completed execution context and data, build the (data, errors) response - defined by the "Response" section of the GraphQL spec. - """ - if not errors: - return ExecutionResult(data, None) - # Sort the error list in order to make it deterministic, since we might have - # been using parallel execution. - errors.sort( - key=lambda error: (error.locations or [], error.path or [], error.message) - ) - return ExecutionResult(data, errors) - def build_per_event_execution_context(self, payload: Any) -> ExecutionContext: """Create a copy of the execution context for usage with subscribe events.""" - return self.__class__( - self.schema, - self.fragments, - payload, - self.context_value, - self.operation, - self.variable_values, - self.field_resolver, - self.type_resolver, - self.subscribe_field_resolver, - [], - # no need to update incrementalPublisher, - # incremental delivery is not supported for subscriptions - self.incremental_publisher, - self.middleware_manager, - self.is_awaitable, - ) + context = copy(self) + context.root_value = payload + return context - def execute_operation(self) -> AwaitableOrValue[dict[str, Any]]: + def execute_operation( + self, initial_result_record: InitialResultRecord + ) -> AwaitableOrValue[dict[str, Any]]: """Execute an operation. Implements the "Executing operations" section of the spec. """ - schema = self.schema operation = self.operation + schema = self.schema root_type = schema.get_root_type(operation.operation) if root_type is None: msg = ( @@ -537,12 +331,23 @@ def execute_operation(self) -> AwaitableOrValue[dict[str, Any]]: ) raise GraphQLError(msg, operation) - grouped_field_set, patches = collect_fields( - schema, - self.fragments, - self.variable_values, - root_type, - operation, + fields, new_defer_usages = collect_fields( + schema, self.fragments, self.variable_values, root_type, operation + ) + grouped_field_set, new_grouped_field_set_details_map = build_field_plan(fields) + + incremental_publisher = self.incremental_publisher + new_defer_map = add_new_deferred_fragments( + incremental_publisher, new_defer_usages, initial_result_record + ) + + path: Path | None = None + + new_deferred_grouped_field_set_records = add_new_deferred_grouped_field_sets( + incremental_publisher, + new_grouped_field_set_details_map, + new_defer_map, + path, ) root_value = self.root_value @@ -551,13 +356,22 @@ def execute_operation(self) -> AwaitableOrValue[dict[str, Any]]: self.execute_fields_serially if operation.operation == OperationType.MUTATION else self.execute_fields - )(root_type, root_value, None, grouped_field_set) # type: ignore + )( + root_type, + root_value, + path, + grouped_field_set, + initial_result_record, + new_defer_map, + ) - for patch in patches: - label, patch_grouped_filed_set = patch - self.execute_deferred_fragment( - root_type, root_value, patch_grouped_filed_set, label, None - ) + self.execute_deferred_grouped_field_sets( + root_type, + root_value, + path, + new_deferred_grouped_field_set_records, + new_defer_map, + ) return result @@ -567,6 +381,8 @@ def execute_fields_serially( source_value: Any, path: Path | None, grouped_field_set: GroupedFieldSet, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[dict[str, Any]]: """Execute the given fields serially. @@ -581,7 +397,12 @@ def reducer( response_name, field_group = field_item field_path = Path(path, response_name, parent_type.name) result = self.execute_field( - parent_type, source_value, field_group, field_path + parent_type, + source_value, + field_group, + field_path, + incremental_data_record, + defer_map, ) if result is Undefined: return results @@ -607,7 +428,8 @@ def execute_fields( source_value: Any, path: Path | None, grouped_field_set: GroupedFieldSet, - incremental_data_record: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[dict[str, Any]]: """Execute the given fields concurrently. @@ -626,6 +448,7 @@ def execute_fields( field_group, field_path, incremental_data_record, + defer_map, ) if result is not Undefined: results[response_name] = result @@ -646,12 +469,11 @@ async def get_results() -> dict[str, Any]: field = awaitable_fields[0] results[field] = await results[field] else: - results.update( - zip( - awaitable_fields, - await gather(*(results[field] for field in awaitable_fields)), - ) + awaited_results = await gather_with_cancel( + *(results[field] for field in awaitable_fields) ) + results.update(zip(awaitable_fields, awaited_results)) + return results return get_results() @@ -662,7 +484,8 @@ def execute_field( source: Any, field_group: FieldGroup, path: Path, - incremental_data_record: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[Any]: """Resolve the field on the given source object. @@ -672,7 +495,7 @@ def execute_field( calling its resolve function, then calls complete_value to await coroutine objects, serialize scalars, or execute the sub-selection-set for objects. """ - field_name = field_group[0].name.value + field_name = field_group.fields[0].node.name.value field_def = self.schema.get_field(parent_type, field_name) if not field_def: return Undefined @@ -683,14 +506,18 @@ def execute_field( if self.middleware_manager: resolve_fn = self.middleware_manager.get_field_resolver(resolve_fn) - info = self.build_resolve_info(field_def, field_group, parent_type, path) + info = self.build_resolve_info( + field_def, field_group.to_nodes(), parent_type, path + ) # Get the resolve function, regardless of if its result is normal or abrupt # (error). try: # Build a dictionary of arguments from the field.arguments AST, using the # variables scope to fulfill any variable references. - args = get_argument_values(field_def, field_group[0], self.variable_values) + args = get_argument_values( + field_def, field_group.fields[0].node, self.variable_values + ) # Note that contrary to the JavaScript implementation, we pass the context # value as part of the resolve info. @@ -704,10 +531,17 @@ def execute_field( path, result, incremental_data_record, + defer_map, ) completed = self.complete_value( - return_type, field_group, info, path, result, incremental_data_record + return_type, + field_group, + info, + path, + result, + incremental_data_record, + defer_map, ) if self.is_awaitable(completed): # noinspection PyShadowingNames @@ -715,6 +549,10 @@ async def await_completed() -> Any: try: return await completed except Exception as raw_error: + # Before Python 3.8 CancelledError inherits Exception and + # so gets caught here. + if isinstance(raw_error, CancelledError): + raise # pragma: no cover self.handle_field_error( raw_error, return_type, @@ -743,7 +581,7 @@ async def await_completed() -> Any: def build_resolve_info( self, field_def: GraphQLField, - field_group: FieldGroup, + field_nodes: list[FieldNode], parent_type: GraphQLObjectType, path: Path, ) -> GraphQLResolveInfo: @@ -754,8 +592,8 @@ def build_resolve_info( # The resolve function's first argument is a collection of information about # the current execution state. return GraphQLResolveInfo( - field_group[0].name.value, - field_group, + field_nodes[0].name.value, + field_nodes, field_def.type, parent_type, path, @@ -774,23 +612,19 @@ def handle_field_error( return_type: GraphQLOutputType, field_group: FieldGroup, path: Path, - incremental_data_record: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, ) -> None: """Handle error properly according to the field type.""" - error = located_error(raw_error, field_group, path.as_list()) + error = located_error(raw_error, field_group.to_nodes(), path.as_list()) # If the field type is non-nullable, then it is resolved without any protection # from errors, however it still properly locates the error. if is_non_null_type(return_type): raise error - errors = ( - incremental_data_record.errors if incremental_data_record else self.errors - ) - # Otherwise, error protection is applied, logging the error and resolving a # null value for this field if one is encountered. - errors.append(error) + self.incremental_publisher.add_field_error(incremental_data_record, error) def complete_value( self, @@ -799,7 +633,8 @@ def complete_value( info: GraphQLResolveInfo, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[Any]: """Complete a value. @@ -837,6 +672,7 @@ def complete_value( path, result, incremental_data_record, + defer_map, ) if completed is None: msg = ( @@ -853,7 +689,13 @@ def complete_value( # If field type is List, complete each item in the list with inner type if is_list_type(return_type): return self.complete_list_value( - return_type, field_group, info, path, result, incremental_data_record + return_type, + field_group, + info, + path, + result, + incremental_data_record, + defer_map, ) # If field type is a leaf type, Scalar or Enum, serialize to a valid value, @@ -865,13 +707,25 @@ def complete_value( # Object type and complete for that type. if is_abstract_type(return_type): return self.complete_abstract_value( - return_type, field_group, info, path, result, incremental_data_record + return_type, + field_group, + info, + path, + result, + incremental_data_record, + defer_map, ) # If field type is Object, execute and complete all sub-selections. if is_object_type(return_type): return self.complete_object_value( - return_type, field_group, info, path, result, incremental_data_record + return_type, + field_group, + info, + path, + result, + incremental_data_record, + defer_map, ) # Not reachable. All possible output types have been considered. @@ -888,7 +742,8 @@ async def complete_awaitable_value( info: GraphQLResolveInfo, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> Any: """Complete an awaitable value.""" try: @@ -900,10 +755,15 @@ async def complete_awaitable_value( path, resolved, incremental_data_record, + defer_map, ) if self.is_awaitable(completed): completed = await completed except Exception as raw_error: + # Before Python 3.8 CancelledError inherits Exception and + # so gets caught here. + if isinstance(raw_error, CancelledError): + raise # pragma: no cover self.handle_field_error( raw_error, return_type, field_group, path, incremental_data_record ) @@ -911,12 +771,12 @@ async def complete_awaitable_value( completed = None return completed - def get_stream_values( + def get_stream_usage( self, field_group: FieldGroup, path: Path - ) -> StreamArguments | None: - """Get stream values. + ) -> StreamUsage | None: + """Get stream usage. - Returns an object containing the `@stream` arguments if a field should be + Returns an object containing info for streaming if a field should be streamed based on the experimental flag, stream directive present and not disabled by the "if" argument. """ @@ -924,10 +784,14 @@ def get_stream_values( if isinstance(path.key, int): return None + stream_usage = self._stream_usages.get(field_group) + if stream_usage is not None: + return stream_usage # pragma: no cover + # validation only allows equivalent streams on multiple fields, so it is # safe to only check the first field_node for the stream directive stream = get_directive_values( - GraphQLStreamDirective, field_group[0], self.variable_values + GraphQLStreamDirective, field_group.fields[0].node, self.variable_values ) if not stream or stream.get("if") is False: @@ -945,8 +809,20 @@ def get_stream_values( ) raise TypeError(msg) - label = stream.get("label") - return StreamArguments(initial_count=initial_count, label=label) + streamed_field_group = FieldGroup( + [ + FieldDetails(field_details.node, None) + for field_details in field_group.fields + ] + ) + + stream_usage = StreamUsage( + stream.get("label"), stream["initialCount"], streamed_field_group + ) + + self._stream_usages[field_group] = stream_usage + + return stream_usage async def complete_async_iterator_value( self, @@ -955,37 +831,40 @@ async def complete_async_iterator_value( info: GraphQLResolveInfo, path: Path, async_iterator: AsyncIterator[Any], - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> list[Any]: """Complete an async iterator. Complete an async iterator value by completing the result and calling recursively until all the results are completed. """ - stream = self.get_stream_values(field_group, path) + stream_usage = self.get_stream_usage(field_group, path) complete_list_item_value = self.complete_list_item_value awaitable_indices: list[int] = [] append_awaitable = awaitable_indices.append completed_results: list[Any] = [] index = 0 while True: - if ( - stream - and isinstance(stream.initial_count, int) - and index >= stream.initial_count - ): + if stream_usage and index >= stream_usage.initial_count: + try: + early_return = async_iterator.aclose # type: ignore + except AttributeError: + early_return = None + stream_record = StreamRecord(path, stream_usage.label, early_return) + with suppress_timeout_error: await wait_for( shield( self.execute_stream_async_iterator( index, async_iterator, - field_group, + stream_usage.field_group, info, item_type, path, - stream.label, incremental_data_record, + stream_record, ) ), timeout=ASYNC_DELAY, @@ -1000,7 +879,7 @@ async def complete_async_iterator_value( break except Exception as raw_error: raise located_error( - raw_error, field_group, path.as_list() + raw_error, field_group.to_nodes(), path.as_list() ) from raw_error if complete_list_item_value( value, @@ -1010,6 +889,7 @@ async def complete_async_iterator_value( info, item_path, incremental_data_record, + defer_map, ): append_awaitable(index) @@ -1023,13 +903,11 @@ async def complete_async_iterator_value( index = awaitable_indices[0] completed_results[index] = await completed_results[index] else: - for index, result in zip( - awaitable_indices, - await gather( - *(completed_results[index] for index in awaitable_indices) - ), - ): - completed_results[index] = result + awaited_results = await gather_with_cancel( + *(completed_results[index] for index in awaitable_indices) + ) + for index, sub_result in zip(awaitable_indices, awaited_results): + completed_results[index] = sub_result return completed_results def complete_list_value( @@ -1039,7 +917,8 @@ def complete_list_value( info: GraphQLResolveInfo, path: Path, result: AsyncIterable[Any] | Iterable[Any], - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[list[Any]]: """Complete a list value. @@ -1057,6 +936,7 @@ def complete_list_value( path, async_iterator, incremental_data_record, + defer_map, ) if not is_iterable(result): @@ -1066,35 +946,34 @@ def complete_list_value( ) raise GraphQLError(msg) - stream = self.get_stream_values(field_group, path) + stream_usage = self.get_stream_usage(field_group, path) # This is specified as a simple map, however we're optimizing the path where # the list contains no coroutine objects by avoiding creating another coroutine # object. complete_list_item_value = self.complete_list_item_value + current_parents = incremental_data_record awaitable_indices: list[int] = [] append_awaitable = awaitable_indices.append - previous_incremental_data_record = incremental_data_record completed_results: list[Any] = [] + stream_record: StreamRecord | None = None for index, item in enumerate(result): # No need to modify the info object containing the path, since from here on # it is not ever accessed by resolver functions. item_path = path.add_key(index, None) - if ( - stream - and isinstance(stream.initial_count, int) - and index >= stream.initial_count - ): - previous_incremental_data_record = self.execute_stream_field( + if stream_usage and index >= stream_usage.initial_count: + if stream_record is None: + stream_record = StreamRecord(path, stream_usage.label) + current_parents = self.execute_stream_field( path, item_path, item, - field_group, + stream_usage.field_group, info, item_type, - stream.label, - previous_incremental_data_record, + current_parents, + stream_record, ) continue @@ -1106,9 +985,15 @@ def complete_list_value( info, item_path, incremental_data_record, + defer_map, ): append_awaitable(index) + if stream_record is not None: + self.incremental_publisher.set_is_final_record( + cast("StreamItemsRecord", current_parents) + ) + if not awaitable_indices: return completed_results @@ -1119,12 +1004,10 @@ async def get_completed_results() -> list[Any]: index = awaitable_indices[0] completed_results[index] = await completed_results[index] else: - for index, sub_result in zip( - awaitable_indices, - await gather( - *(completed_results[index] for index in awaitable_indices) - ), - ): + awaited_results = await gather_with_cancel( + *(completed_results[index] for index in awaitable_indices) + ) + for index, sub_result in zip(awaitable_indices, awaited_results): completed_results[index] = sub_result return completed_results @@ -1138,7 +1021,8 @@ def complete_list_item_value( field_group: FieldGroup, info: GraphQLResolveInfo, item_path: Path, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> bool: """Complete a list item value by adding it to the completed results. @@ -1155,6 +1039,7 @@ def complete_list_item_value( item_path, item, incremental_data_record, + defer_map, ) ) return True @@ -1167,6 +1052,7 @@ def complete_list_item_value( item_path, item, incremental_data_record, + defer_map, ) if is_awaitable(completed_item): @@ -1229,7 +1115,8 @@ def complete_abstract_value( info: GraphQLResolveInfo, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[Any]: """Complete an abstract value. @@ -1240,7 +1127,7 @@ def complete_abstract_value( runtime_type = resolve_type_fn(result, info, return_type) if self.is_awaitable(runtime_type): - runtime_type = cast(Awaitable, runtime_type) + runtime_type = cast("Awaitable", runtime_type) async def await_complete_object_value() -> Any: value = self.complete_object_value( @@ -1256,13 +1143,14 @@ async def await_complete_object_value() -> Any: path, result, incremental_data_record, + defer_map, ) if self.is_awaitable(value): return await value # type: ignore return value # pragma: no cover return await_complete_object_value() - runtime_type = cast(Optional[str], runtime_type) + runtime_type = cast("Optional[str]", runtime_type) return self.complete_object_value( self.ensure_valid_runtime_type( @@ -1273,6 +1161,7 @@ async def await_complete_object_value() -> Any: path, result, incremental_data_record, + defer_map, ) def ensure_valid_runtime_type( @@ -1293,7 +1182,7 @@ def ensure_valid_runtime_type( " a 'resolve_type' function or each possible type should provide" " an 'is_type_of' function." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) if is_object_type(runtime_type_name): # pragma: no cover msg = ( @@ -1309,7 +1198,7 @@ def ensure_valid_runtime_type( f" for field '{info.parent_type.name}.{info.field_name}' with value" f" {inspect(result)}, received '{inspect(runtime_type_name)}'." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) runtime_type = self.schema.get_type(runtime_type_name) @@ -1318,21 +1207,21 @@ def ensure_valid_runtime_type( f"Abstract type '{return_type.name}' was resolved to a type" f" '{runtime_type_name}' that does not exist inside the schema." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) if not is_object_type(runtime_type): msg = ( f"Abstract type '{return_type.name}' was resolved" f" to a non-object type '{runtime_type_name}'." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) if not self.schema.is_sub_type(return_type, runtime_type): msg = ( f"Runtime Object type '{runtime_type.name}' is not a possible" f" type for '{return_type.name}'." ) - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_group.to_nodes()) # noinspection PyTypeChecker return runtime_type @@ -1344,7 +1233,8 @@ def complete_object_value( info: GraphQLResolveInfo, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[dict[str, Any]]: """Complete an Object value by executing all sub-selections.""" # If there is an `is_type_of()` predicate function, call it with the current @@ -1361,7 +1251,12 @@ async def execute_subfields_async() -> dict[str, Any]: return_type, result, field_group ) return self.collect_and_execute_subfields( - return_type, field_group, path, result, incremental_data_record + return_type, + field_group, + path, + result, + incremental_data_record, + defer_map, ) # type: ignore return execute_subfields_async() @@ -1370,7 +1265,7 @@ async def execute_subfields_async() -> dict[str, Any]: raise invalid_return_type_error(return_type, result, field_group) return self.collect_and_execute_subfields( - return_type, field_group, path, result, incremental_data_record + return_type, field_group, path, result, incremental_data_record, defer_map ) def collect_and_execute_subfields( @@ -1379,41 +1274,58 @@ def collect_and_execute_subfields( field_group: FieldGroup, path: Path, result: Any, - incremental_data_record: IncrementalDataRecord | None, + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> AwaitableOrValue[dict[str, Any]]: """Collect sub-fields to execute to complete this value.""" - sub_grouped_field_set, sub_patches = self.collect_subfields( - return_type, field_group + grouped_field_set, new_grouped_field_set_details_map, new_defer_usages = ( + self.build_sub_field_plan(return_type, field_group) + ) + + incremental_publisher = self.incremental_publisher + new_defer_map = add_new_deferred_fragments( + incremental_publisher, + new_defer_usages, + incremental_data_record, + defer_map, + path, + ) + new_deferred_grouped_field_set_records = add_new_deferred_grouped_field_sets( + incremental_publisher, + new_grouped_field_set_details_map, + new_defer_map, + path, ) sub_fields = self.execute_fields( - return_type, result, path, sub_grouped_field_set, incremental_data_record + return_type, + result, + path, + grouped_field_set, + incremental_data_record, + new_defer_map, ) - for sub_patch in sub_patches: - label, sub_patch_grouped_field_set = sub_patch - self.execute_deferred_fragment( - return_type, - result, - sub_patch_grouped_field_set, - label, - path, - incremental_data_record, - ) + self.execute_deferred_grouped_field_sets( + return_type, + result, + path, + new_deferred_grouped_field_set_records, + new_defer_map, + ) return sub_fields - def collect_subfields( + def build_sub_field_plan( self, return_type: GraphQLObjectType, field_group: FieldGroup - ) -> FieldsAndPatches: + ) -> SubFieldPlan: """Collect subfields. - A cached collection of relevant subfields with regard to the return type is - kept in the execution context as ``_subfields_cache``. This ensures the - subfields are not repeatedly calculated, which saves overhead when resolving - lists of values. + A memoized function for building subfield plans with regard to the return type. + Memoizing ensures the subfields are not repeatedly calculated, which saves + overhead when resolving lists of values. """ - cache = self._subfields_cache + cache = self._sub_field_plan_cache # We cannot use the field_group itself as key for the cache, since it # is not hashable as a list. We also do not want to use the field_group # itself (converted to a tuple) as keys, since hashing them is slow. @@ -1426,18 +1338,20 @@ def collect_subfields( if len(field_group) == 1 # optimize most frequent case else (return_type, *map(id, field_group)) ) - sub_fields_and_patches = cache.get(key) - if sub_fields_and_patches is None: - sub_fields_and_patches = collect_subfields( + sub_field_plan = cache.get(key) + if sub_field_plan is None: + sub_fields, new_defer_usages = collect_subfields( self.schema, self.fragments, self.variable_values, self.operation, return_type, - field_group, + field_group.fields, ) - cache[key] = sub_fields_and_patches - return sub_fields_and_patches + field_plan = build_field_plan(sub_fields, field_group.defer_usages) + sub_field_plan = SubFieldPlan(*field_plan, new_defer_usages) + cache[key] = sub_field_plan + return sub_field_plan def map_source_to_response( self, result_or_stream: ExecutionResult | AsyncIterable[Any] @@ -1462,64 +1376,98 @@ async def callback(payload: Any) -> ExecutionResult: # typecast to ExecutionResult, not possible to return # ExperimentalIncrementalExecutionResults when operation is 'subscription'. return ( - await cast(Awaitable[ExecutionResult], result) + await cast("Awaitable[ExecutionResult]", result) if self.is_awaitable(result) - else cast(ExecutionResult, result) + else cast("ExecutionResult", result) ) return map_async_iterable(result_or_stream, callback) - def execute_deferred_fragment( + def execute_deferred_grouped_field_sets( + self, + parent_type: GraphQLObjectType, + source_value: Any, + path: Path | None, + new_deferred_grouped_field_set_records: Sequence[DeferredGroupedFieldSetRecord], + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], + ) -> None: + """Execute deferred grouped field sets.""" + for deferred_grouped_field_set_record in new_deferred_grouped_field_set_records: + if deferred_grouped_field_set_record.should_initiate_defer: + + async def execute_deferred_grouped_field_set( + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + ) -> None: + self.execute_deferred_grouped_field_set( + parent_type, + source_value, + path, + deferred_grouped_field_set_record, + defer_map, + ) + + self.add_task( + execute_deferred_grouped_field_set( + deferred_grouped_field_set_record + ) + ) + + else: + self.execute_deferred_grouped_field_set( + parent_type, + source_value, + path, + deferred_grouped_field_set_record, + defer_map, + ) + + def execute_deferred_grouped_field_set( self, parent_type: GraphQLObjectType, source_value: Any, - fields: GroupedFieldSet, - label: str | None = None, - path: Path | None = None, - parent_context: IncrementalDataRecord | None = None, + path: Path | None, + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], ) -> None: - """Execute deferred fragment.""" + """Execute deferred grouped field set.""" incremental_publisher = self.incremental_publisher - incremental_data_record = ( - incremental_publisher.prepare_new_deferred_fragment_record( - label, path, parent_context - ) - ) try: - awaitable_or_data = self.execute_fields( - parent_type, source_value, path, fields, incremental_data_record + incremental_result = self.execute_fields( + parent_type, + source_value, + path, + deferred_grouped_field_set_record.grouped_field_set, + deferred_grouped_field_set_record, + defer_map, ) - if self.is_awaitable(awaitable_or_data): + if self.is_awaitable(incremental_result): + incremental_result = cast("Awaitable", incremental_result) - async def await_data() -> None: + async def await_incremental_result() -> None: try: - data = await awaitable_or_data # type: ignore + result = await incremental_result except GraphQLError as error: - incremental_publisher.add_field_error( - incremental_data_record, error - ) - incremental_publisher.complete_deferred_fragment_record( - incremental_data_record, None + incremental_publisher.mark_errored_deferred_grouped_field_set( + deferred_grouped_field_set_record, error ) else: - incremental_publisher.complete_deferred_fragment_record( - incremental_data_record, data + incremental_publisher.complete_deferred_grouped_field_set( + deferred_grouped_field_set_record, result ) - self.add_task(await_data()) + self.add_task(await_incremental_result()) else: - incremental_publisher.complete_deferred_fragment_record( - incremental_data_record, - awaitable_or_data, # type: ignore + incremental_publisher.complete_deferred_grouped_field_set( + deferred_grouped_field_set_record, + incremental_result, # type: ignore ) + except GraphQLError as error: - incremental_publisher.add_field_error(incremental_data_record, error) - incremental_publisher.complete_deferred_fragment_record( - incremental_data_record, None + incremental_publisher.mark_errored_deferred_grouped_field_set( + deferred_grouped_field_set_record, error ) - awaitable_or_data = None def execute_stream_field( self, @@ -1529,14 +1477,15 @@ def execute_stream_field( field_group: FieldGroup, info: GraphQLResolveInfo, item_type: GraphQLOutputType, - label: str | None = None, - parent_context: IncrementalDataRecord | None = None, - ) -> IncrementalDataRecord: + incremental_data_record: IncrementalDataRecord, + stream_record: StreamRecord, + ) -> StreamItemsRecord: """Execute stream field.""" is_awaitable = self.is_awaitable incremental_publisher = self.incremental_publisher - incremental_data_record = incremental_publisher.prepare_new_stream_items_record( - label, item_path, parent_context + stream_items_record = StreamItemsRecord(stream_record, item_path) + incremental_publisher.report_new_stream_items_record( + stream_items_record, incremental_data_record ) completed_item: Any @@ -1550,23 +1499,21 @@ async def await_completed_awaitable_item() -> None: info, item_path, item, - incremental_data_record, + stream_items_record, + RefMap(), ) except GraphQLError as error: - incremental_publisher.add_field_error( - incremental_data_record, error - ) - incremental_publisher.filter(path, incremental_data_record) - incremental_publisher.complete_stream_items_record( - incremental_data_record, None + incremental_publisher.filter(path, stream_items_record) + incremental_publisher.mark_errored_stream_items_record( + stream_items_record, error ) else: incremental_publisher.complete_stream_items_record( - incremental_data_record, [value] + stream_items_record, [value] ) self.add_task(await_completed_awaitable_item()) - return incremental_data_record + return stream_items_record try: try: @@ -1576,7 +1523,8 @@ async def await_completed_awaitable_item() -> None: info, item_path, item, - incremental_data_record, + stream_items_record, + RefMap(), ) except Exception as raw_error: self.handle_field_error( @@ -1584,17 +1532,16 @@ async def await_completed_awaitable_item() -> None: item_type, field_group, item_path, - incremental_data_record, + stream_items_record, ) completed_item = None - incremental_publisher.filter(item_path, incremental_data_record) + incremental_publisher.filter(item_path, stream_items_record) except GraphQLError as error: - incremental_publisher.add_field_error(incremental_data_record, error) - incremental_publisher.filter(path, incremental_data_record) - incremental_publisher.complete_stream_items_record( - incremental_data_record, None + incremental_publisher.filter(path, stream_items_record) + incremental_publisher.mark_errored_stream_items_record( + stream_items_record, error ) - return incremental_data_record + return stream_items_record if is_awaitable(completed_item): @@ -1608,30 +1555,27 @@ async def await_completed_item() -> None: item_type, field_group, item_path, - incremental_data_record, + stream_items_record, ) - incremental_publisher.filter(item_path, incremental_data_record) + incremental_publisher.filter(item_path, stream_items_record) value = None except GraphQLError as error: # pragma: no cover - incremental_publisher.add_field_error( - incremental_data_record, error - ) - incremental_publisher.filter(path, incremental_data_record) - incremental_publisher.complete_stream_items_record( - incremental_data_record, None + incremental_publisher.filter(path, stream_items_record) + incremental_publisher.mark_errored_stream_items_record( + stream_items_record, error ) else: incremental_publisher.complete_stream_items_record( - incremental_data_record, [value] + stream_items_record, [value] ) self.add_task(await_completed_item()) - return incremental_data_record + return stream_items_record incremental_publisher.complete_stream_items_record( - incremental_data_record, [completed_item] + stream_items_record, [completed_item] ) - return incremental_data_record + return stream_items_record async def execute_stream_async_iterator_item( self, @@ -1639,8 +1583,7 @@ async def execute_stream_async_iterator_item( field_group: FieldGroup, info: GraphQLResolveInfo, item_type: GraphQLOutputType, - incremental_data_record: StreamItemsRecord, - path: Path, + stream_items_record: StreamItemsRecord, item_path: Path, ) -> Any: """Execute stream iterator item.""" @@ -1650,14 +1593,27 @@ async def execute_stream_async_iterator_item( item = await anext(async_iterator) except StopAsyncIteration as raw_error: self.incremental_publisher.set_is_completed_async_iterator( - incremental_data_record + stream_items_record ) raise StopAsyncIteration from raw_error except Exception as raw_error: - raise located_error(raw_error, field_group, path.as_list()) from raw_error + raise located_error( + raw_error, + field_group.to_nodes(), + stream_items_record.stream_record.path, + ) from raw_error + else: + if stream_items_record.stream_record.errors: + raise StopAsyncIteration # pragma: no cover try: completed_item = self.complete_value( - item_type, field_group, info, item_path, item, incremental_data_record + item_type, + field_group, + info, + item_path, + item, + stream_items_record, + RefMap(), ) return ( await completed_item @@ -1666,9 +1622,9 @@ async def execute_stream_async_iterator_item( ) except Exception as raw_error: self.handle_field_error( - raw_error, item_type, field_group, item_path, incremental_data_record + raw_error, item_type, field_group, item_path, stream_items_record ) - self.incremental_publisher.filter(item_path, incremental_data_record) + self.incremental_publisher.filter(item_path, stream_items_record) async def execute_stream_async_iterator( self, @@ -1678,21 +1634,19 @@ async def execute_stream_async_iterator( info: GraphQLResolveInfo, item_type: GraphQLOutputType, path: Path, - label: str | None = None, - parent_context: IncrementalDataRecord | None = None, + incremental_data_record: IncrementalDataRecord, + stream_record: StreamRecord, ) -> None: """Execute stream iterator.""" incremental_publisher = self.incremental_publisher index = initial_index - previous_incremental_data_record = parent_context + current_incremental_data_record = incremental_data_record - done = False while True: item_path = Path(path, index, None) - incremental_data_record = ( - incremental_publisher.prepare_new_stream_items_record( - label, item_path, previous_incremental_data_record, async_iterator - ) + stream_items_record = StreamItemsRecord(stream_record, item_path) + incremental_publisher.report_new_stream_items_record( + stream_items_record, current_incremental_data_record ) try: @@ -1701,15 +1655,13 @@ async def execute_stream_async_iterator( field_group, info, item_type, - incremental_data_record, - path, + stream_items_record, item_path, ) except GraphQLError as error: - incremental_publisher.add_field_error(incremental_data_record, error) - incremental_publisher.filter(path, incremental_data_record) - incremental_publisher.complete_stream_items_record( - incremental_data_record, None + incremental_publisher.filter(path, stream_items_record) + incremental_publisher.mark_errored_stream_items_record( + stream_items_record, error ) if async_iterator: # pragma: no cover else with suppress_exceptions: @@ -1717,18 +1669,20 @@ async def execute_stream_async_iterator( # running generators cannot be closed since Python 3.8, # so we need to remember that this iterator is already canceled self._canceled_iterators.add(async_iterator) - break + return except StopAsyncIteration: done = True + completed_item = None + else: + done = False incremental_publisher.complete_stream_items_record( - incremental_data_record, - [completed_item], + stream_items_record, [completed_item] ) if done: break - previous_incremental_data_record = incremental_data_record + current_incremental_data_record = stream_items_record index += 1 def add_task(self, awaitable: Awaitable[Any]) -> None: @@ -1765,6 +1719,7 @@ def execute( middleware: Middleware | None = None, execution_context_class: type[ExecutionContext] | None = None, is_awaitable: Callable[[Any], bool] | None = None, + **custom_context_args: Any, ) -> AwaitableOrValue[ExecutionResult]: """Execute a GraphQL operation. @@ -1797,6 +1752,7 @@ def execute( middleware, execution_context_class, is_awaitable, + **custom_context_args, ) if isinstance(result, ExecutionResult): return result @@ -1825,6 +1781,7 @@ def experimental_execute_incrementally( middleware: Middleware | None = None, execution_context_class: type[ExecutionContext] | None = None, is_awaitable: Callable[[Any], bool] | None = None, + **custom_context_args: Any, ) -> AwaitableOrValue[ExecutionResult | ExperimentalIncrementalExecutionResults]: """Execute GraphQL operation incrementally (internal implementation). @@ -1853,6 +1810,7 @@ def experimental_execute_incrementally( subscribe_field_resolver, middleware, is_awaitable, + **custom_context_args, ) # Return early errors if execution context failed. @@ -1877,52 +1835,31 @@ def execute_impl( # Errors from sub-fields of a NonNull type may propagate to the top level, # at which point we still log the error and null the parent field, which # in this case is the entire response. - errors = context.errors incremental_publisher = context.incremental_publisher - build_response = context.build_response + initial_result_record = InitialResultRecord() try: - result = context.execute_operation() + data = context.execute_operation(initial_result_record) + if context.is_awaitable(data): - if context.is_awaitable(result): - # noinspection PyShadowingNames - async def await_result() -> Any: + async def await_response() -> ( + ExecutionResult | ExperimentalIncrementalExecutionResults + ): try: - initial_result = build_response( - await result, # type: ignore - errors, + return incremental_publisher.build_data_response( + initial_result_record, + await data, # type: ignore ) - incremental_publisher.publish_initial() - if incremental_publisher.has_next(): - return ExperimentalIncrementalExecutionResults( - initial_result=InitialIncrementalExecutionResult( - initial_result.data, - initial_result.errors, - has_next=True, - ), - subsequent_results=incremental_publisher.subscribe(), - ) except GraphQLError as error: - errors.append(error) - return build_response(None, errors) - return initial_result + return incremental_publisher.build_error_response( + initial_result_record, error + ) - return await_result() + return await_response() + + return incremental_publisher.build_data_response(initial_result_record, data) # type: ignore - initial_result = build_response(result, errors) # type: ignore - incremental_publisher.publish_initial() - if incremental_publisher.has_next(): - return ExperimentalIncrementalExecutionResults( - initial_result=InitialIncrementalExecutionResult( - initial_result.data, - initial_result.errors, - has_next=True, - ), - subsequent_results=incremental_publisher.subscribe(), - ) except GraphQLError as error: - errors.append(error) - return build_response(None, errors) - return initial_result + return incremental_publisher.build_error_response(initial_result_record, error) def assume_not_awaitable(_value: Any) -> bool: @@ -1978,11 +1915,11 @@ def execute_sync( result, ExperimentalIncrementalExecutionResults ): if default_is_awaitable(result): - ensure_future(cast(Awaitable[ExecutionResult], result)).cancel() + ensure_future(cast("Awaitable[ExecutionResult]", result)).cancel() msg = "GraphQL execution failed to complete synchronously." raise RuntimeError(msg) - return cast(ExecutionResult, result) + return cast("ExecutionResult", result) def invalid_return_type_error( @@ -1991,10 +1928,120 @@ def invalid_return_type_error( """Create a GraphQLError for an invalid return type.""" return GraphQLError( f"Expected value of type '{return_type.name}' but got: {inspect(result)}.", - field_group, + field_group.to_nodes(), ) +def add_new_deferred_fragments( + incremental_publisher: IncrementalPublisher, + new_defer_usages: Sequence[DeferUsage], + incremental_data_record: IncrementalDataRecord, + defer_map: RefMap[DeferUsage, DeferredFragmentRecord] | None = None, + path: Path | None = None, +) -> RefMap[DeferUsage, DeferredFragmentRecord]: + """Add new deferred fragments to the defer map. + + Instantiates new DeferredFragmentRecords for the given path within an + incremental data record, returning an updated map of DeferUsage + objects to DeferredFragmentRecords. + + Note: As defer directives may be used with operations returning lists, + a DeferUsage object may correspond to many DeferredFragmentRecords. + + DeferredFragmentRecord creation includes the following steps: + 1. The new DeferredFragmentRecord is instantiated at the given path. + 2. The parent result record is calculated from the given incremental data record. + 3. The IncrementalPublisher is notified that a new DeferredFragmentRecord + with the calculated parent has been added; the record will be released only + after the parent has completed. + """ + if not new_defer_usages: + # Given no DeferUsages, return the existing map, creating one if necessary. + return RefMap() if defer_map is None else defer_map + + # Create a copy of the old map. + new_defer_map = RefMap() if defer_map is None else RefMap(defer_map.items()) + + # For each new DeferUsage object: + for new_defer_usage in new_defer_usages: + parent_defer_usage = new_defer_usage.parent_defer_usage + + # If the parent defer usage is not defined, the parent result record is either: + # - the InitialResultRecord, or + # - a StreamItemsRecord, as `@defer` may be nested under `@stream`. + parent = ( + cast( + "Union[InitialResultRecord, StreamItemsRecord]", incremental_data_record + ) + if parent_defer_usage is None + else deferred_fragment_record_from_defer_usage( + parent_defer_usage, new_defer_map + ) + ) + + # Instantiate the new record. + deferred_fragment_record = DeferredFragmentRecord(path, new_defer_usage.label) + + # Report the new record to the Incremental Publisher. + incremental_publisher.report_new_defer_fragment_record( + deferred_fragment_record, parent + ) + + # Update the map. + new_defer_map[new_defer_usage] = deferred_fragment_record + + return new_defer_map + + +def deferred_fragment_record_from_defer_usage( + defer_usage: DeferUsage, defer_map: RefMap[DeferUsage, DeferredFragmentRecord] +) -> DeferredFragmentRecord: + """Get the deferred fragment record mapped to the given defer usage.""" + return defer_map[defer_usage] + + +def add_new_deferred_grouped_field_sets( + incremental_publisher: IncrementalPublisher, + new_grouped_field_set_details_map: Mapping[ + DeferUsageSet, NewGroupedFieldSetDetails + ], + defer_map: RefMap[DeferUsage, DeferredFragmentRecord], + path: Path | None = None, +) -> list[DeferredGroupedFieldSetRecord]: + """Add new deferred grouped field sets to the defer map.""" + new_deferred_grouped_field_set_records: list[DeferredGroupedFieldSetRecord] = [] + + for ( + defer_usage_set, + [grouped_field_set, should_initiate_defer], + ) in new_grouped_field_set_details_map.items(): + deferred_fragment_records = get_deferred_fragment_records( + defer_usage_set, defer_map + ) + deferred_grouped_field_set_record = DeferredGroupedFieldSetRecord( + deferred_fragment_records, + grouped_field_set, + should_initiate_defer, + path, + ) + incremental_publisher.report_new_deferred_grouped_filed_set_record( + deferred_grouped_field_set_record + ) + new_deferred_grouped_field_set_records.append(deferred_grouped_field_set_record) + + return new_deferred_grouped_field_set_records + + +def get_deferred_fragment_records( + defer_usages: DeferUsageSet, defer_map: RefMap[DeferUsage, DeferredFragmentRecord] +) -> list[DeferredFragmentRecord]: + """Get the deferred fragment records for the given defer usages.""" + return [ + deferred_fragment_record_from_defer_usage(defer_usage, defer_map) + for defer_usage in defer_usages + ] + + def get_typename(value: Any) -> str | None: """Get the ``__typename`` property of the given value.""" if isinstance(value, Mapping): @@ -2030,25 +2077,25 @@ def default_type_resolver( # Otherwise, test each possible type. possible_types = info.schema.get_possible_types(abstract_type) is_awaitable = info.is_awaitable - awaitable_is_type_of_results: list[Awaitable] = [] - append_awaitable_results = awaitable_is_type_of_results.append + awaitable_is_type_of_results: list[Awaitable[bool]] = [] + append_awaitable_result = awaitable_is_type_of_results.append awaitable_types: list[GraphQLObjectType] = [] - append_awaitable_types = awaitable_types.append + append_awaitable_type = awaitable_types.append for type_ in possible_types: if type_.is_type_of: is_type_of_result = type_.is_type_of(value, info) if is_awaitable(is_type_of_result): - append_awaitable_results(cast(Awaitable, is_type_of_result)) - append_awaitable_types(type_) + append_awaitable_result(cast("Awaitable[bool]", is_type_of_result)) + append_awaitable_type(type_) elif is_type_of_result: return type_.name if awaitable_is_type_of_results: # noinspection PyShadowingNames async def get_type() -> str | None: - is_type_of_results = await gather(*awaitable_is_type_of_results) + is_type_of_results = await gather_with_cancel(*awaitable_is_type_of_results) for is_type_of_result, type_ in zip(is_type_of_results, awaitable_types): if is_type_of_result: return type_.name @@ -2094,6 +2141,7 @@ def subscribe( subscribe_field_resolver: GraphQLFieldResolver | None = None, execution_context_class: type[ExecutionContext] | None = None, middleware: MiddlewareManager | None = None, + **custom_context_args: Any, ) -> AwaitableOrValue[AsyncIterator[ExecutionResult] | ExecutionResult]: """Create a GraphQL subscription. @@ -2134,6 +2182,7 @@ def subscribe( type_resolver, subscribe_field_resolver, middleware=middleware, + **custom_context_args, ) # Return early errors if execution context failed. @@ -2169,6 +2218,7 @@ def create_source_event_stream( type_resolver: GraphQLTypeResolver | None = None, subscribe_field_resolver: GraphQLFieldResolver | None = None, execution_context_class: type[ExecutionContext] | None = None, + **custom_context_args: Any, ) -> AwaitableOrValue[AsyncIterable[Any] | ExecutionResult]: """Create source event stream @@ -2205,6 +2255,7 @@ def create_source_event_stream( field_resolver, type_resolver, subscribe_field_resolver, + **custom_context_args, ) # Return early errors if execution context failed. @@ -2224,7 +2275,7 @@ def create_source_event_stream_impl( return ExecutionResult(None, errors=[error]) if context.is_awaitable(event_stream): - awaitable_event_stream = cast(Awaitable, event_stream) + awaitable_event_stream = cast("Awaitable", event_stream) # noinspection PyShadowingNames async def await_event_stream() -> AsyncIterable[Any] | ExecutionResult: @@ -2248,24 +2299,26 @@ def execute_subscription( msg = "Schema is not configured to execute subscription operation." raise GraphQLError(msg, context.operation) - grouped_field_set = collect_fields( + fields = collect_fields( schema, context.fragments, context.variable_values, root_type, context.operation, - ).grouped_field_set - first_root_field = next(iter(grouped_field_set.items())) - response_name, field_group = first_root_field - field_name = field_group[0].name.value + ).fields + + first_root_field = next(iter(fields.items())) + response_name, field_details_list = first_root_field + field_name = field_details_list[0].node.name.value field_def = schema.get_field(root_type, field_name) + field_nodes = [field_details.node for field_details in field_details_list] if not field_def: msg = f"The subscription field '{field_name}' is not defined." - raise GraphQLError(msg, field_group) + raise GraphQLError(msg, field_nodes) path = Path(None, response_name, root_type.name) - info = context.build_resolve_info(field_def, field_group, root_type, path) + info = context.build_resolve_info(field_def, field_nodes, root_type, path) # Implements the "ResolveFieldEventStream" algorithm from GraphQL specification. # It differs from "ResolveFieldValue" due to providing a different `resolveFn`. @@ -2273,7 +2326,7 @@ def execute_subscription( try: # Build a dictionary of arguments from the field.arguments AST, using the # variables scope to fulfill any variable references. - args = get_argument_values(field_def, field_group[0], context.variable_values) + args = get_argument_values(field_def, field_nodes[0], context.variable_values) # Call the `subscribe()` resolver or the default resolver to produce an # AsyncIterable yielding raw payloads. @@ -2286,14 +2339,14 @@ async def await_result() -> AsyncIterable[Any]: try: return assert_event_stream(await result) except Exception as error: - raise located_error(error, field_group, path.as_list()) from error + raise located_error(error, field_nodes, path.as_list()) from error return await_result() return assert_event_stream(result) except Exception as error: - raise located_error(error, field_group, path.as_list()) from error + raise located_error(error, field_nodes, path.as_list()) from error def assert_event_stream(result: Any) -> AsyncIterable: diff --git a/src/graphql/execution/incremental_publisher.py b/src/graphql/execution/incremental_publisher.py index fb660e85..6de707bc 100644 --- a/src/graphql/execution/incremental_publisher.py +++ b/src/graphql/execution/incremental_publisher.py @@ -2,17 +2,17 @@ from __future__ import annotations -from asyncio import Event, ensure_future, gather +from asyncio import Event, ensure_future, gather, sleep from contextlib import suppress from typing import ( TYPE_CHECKING, Any, AsyncGenerator, - AsyncIterator, Awaitable, + Callable, Collection, + Iterator, NamedTuple, - Sequence, Union, ) @@ -21,23 +21,31 @@ except ImportError: # Python < 3.8 from typing_extensions import TypedDict +from ..pyutils import RefSet if TYPE_CHECKING: from ..error import GraphQLError, GraphQLFormattedError from ..pyutils import Path + from .build_field_plan import GroupedFieldSet __all__ = [ "ASYNC_DELAY", "DeferredFragmentRecord", + "ExecutionResult", + "ExperimentalIncrementalExecutionResults", + "FormattedExecutionResult", "FormattedIncrementalDeferResult", "FormattedIncrementalResult", "FormattedIncrementalStreamResult", + "FormattedInitialIncrementalExecutionResult", "FormattedSubsequentIncrementalExecutionResult", "IncrementalDataRecord", "IncrementalDeferResult", "IncrementalPublisher", "IncrementalResult", "IncrementalStreamResult", + "InitialIncrementalExecutionResult", + "InitialResultRecord", "StreamItemsRecord", "SubsequentIncrementalExecutionResult", ] @@ -48,62 +56,263 @@ suppress_key_error = suppress(KeyError) -class FormattedIncrementalDeferResult(TypedDict, total=False): - """Formatted incremental deferred execution result""" +class FormattedPendingResult(TypedDict, total=False): + """Formatted pending execution result""" - data: dict[str, Any] | None - errors: list[GraphQLFormattedError] + id: str path: list[str | int] label: str + + +class PendingResult: # noqa: PLW1641 + """Pending execution result""" + + id: str + path: list[str | int] + label: str | None + + __slots__ = "id", "label", "path" + + def __init__( + self, + id: str, # noqa: A002 + path: list[str | int], + label: str | None = None, + ) -> None: + self.id = id + self.path = path + self.label = label + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [f"id={self.id!r}, path={self.path!r}"] + if self.label: + args.append(f"label={self.label!r}") + return f"{name}({', '.join(args)})" + + @property + def formatted(self) -> FormattedPendingResult: + """Get pending result formatted according to the specification.""" + formatted: FormattedPendingResult = {"id": self.id, "path": self.path} + if self.label is not None: + formatted["label"] = self.label + return formatted + + def __eq__(self, other: object) -> bool: + if isinstance(other, dict): + return ( + other.get("id") == self.id + and (other.get("path") or None) == (self.path or None) + and (other.get("label") or None) == (self.label or None) + ) + + if isinstance(other, tuple): + size = len(other) + return 1 < size < 4 and (self.id, self.path, self.label)[:size] == other + return ( + isinstance(other, self.__class__) + and other.id == self.id + and other.path == self.path + and other.label == self.label + ) + + def __ne__(self, other: object) -> bool: + return not self == other + + +class FormattedCompletedResult(TypedDict, total=False): + """Formatted completed execution result""" + + id: str + errors: list[GraphQLFormattedError] + + +class CompletedResult: # noqa: PLW1641 + """Completed execution result""" + + id: str + errors: list[GraphQLError] | None + + __slots__ = "errors", "id" + + def __init__( + self, + id: str, # noqa: A002 + errors: list[GraphQLError] | None = None, + ) -> None: + self.id = id + self.errors = errors + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [f"id={self.id!r}"] + if self.errors: + args.append(f"errors={self.errors!r}") + return f"{name}({', '.join(args)})" + + @property + def formatted(self) -> FormattedCompletedResult: + """Get completed result formatted according to the specification.""" + formatted: FormattedCompletedResult = {"id": self.id} + if self.errors is not None: + formatted["errors"] = [error.formatted for error in self.errors] + return formatted + + def __eq__(self, other: object) -> bool: + if isinstance(other, dict): + return other.get("id") == self.id and (other.get("errors") or None) == ( + self.errors or None + ) + if isinstance(other, tuple): + size = len(other) + return 1 < size < 3 and (self.id, self.errors)[:size] == other + return ( + isinstance(other, self.__class__) + and other.id == self.id + and other.errors == self.errors + ) + + def __ne__(self, other: object) -> bool: + return not self == other + + +class IncrementalUpdate(NamedTuple): + """Incremental update""" + + pending: list[PendingResult] + incremental: list[IncrementalResult] + completed: list[CompletedResult] + + +class FormattedExecutionResult(TypedDict, total=False): + """Formatted execution result""" + + data: dict[str, Any] | None + errors: list[GraphQLFormattedError] extensions: dict[str, Any] -class IncrementalDeferResult: - """Incremental deferred execution result""" +class ExecutionResult: # noqa: PLW1641 + """The result of GraphQL execution. + + - ``data`` is the result of a successful execution of the query. + - ``errors`` is included when any errors occurred as a non-empty list. + - ``extensions`` is reserved for adding non-standard properties. + """ + + __slots__ = "data", "errors", "extensions" data: dict[str, Any] | None errors: list[GraphQLError] | None - path: list[str | int] | None - label: str | None extensions: dict[str, Any] | None - __slots__ = "data", "errors", "path", "label", "extensions" + def __init__( + self, + data: dict[str, Any] | None = None, + errors: list[GraphQLError] | None = None, + extensions: dict[str, Any] | None = None, + ) -> None: + self.data = data + self.errors = errors + self.extensions = extensions + + def __repr__(self) -> str: + name = self.__class__.__name__ + ext = "" if self.extensions is None else f", extensions={self.extensions!r}" + return f"{name}(data={self.data!r}, errors={self.errors!r}{ext})" + + def __iter__(self) -> Iterator[Any]: + return iter((self.data, self.errors)) + + @property + def formatted(self) -> FormattedExecutionResult: + """Get execution result formatted according to the specification.""" + formatted: FormattedExecutionResult = {"data": self.data} + if self.errors is not None: + formatted["errors"] = [error.formatted for error in self.errors] + if self.extensions is not None: + formatted["extensions"] = self.extensions + return formatted + + def __eq__(self, other: object) -> bool: + if isinstance(other, dict): + return ( + (other.get("data") == self.data) + and (other.get("errors") or None) == (self.errors or None) + and (other.get("extensions") or None) == (self.extensions or None) + ) + if isinstance(other, tuple): + if len(other) == 2: + return other == (self.data, self.errors) + return other == (self.data, self.errors, self.extensions) + return ( + isinstance(other, self.__class__) + and other.data == self.data + and other.errors == self.errors + and other.extensions == self.extensions + ) + + def __ne__(self, other: object) -> bool: + return not self == other + + +class FormattedInitialIncrementalExecutionResult(TypedDict, total=False): + """Formatted initial incremental execution result""" + + data: dict[str, Any] | None + errors: list[GraphQLFormattedError] + pending: list[FormattedPendingResult] + hasNext: bool + incremental: list[FormattedIncrementalResult] + extensions: dict[str, Any] + + +class InitialIncrementalExecutionResult: # noqa: PLW1641 + """Initial incremental execution result.""" + + data: dict[str, Any] | None + errors: list[GraphQLError] | None + pending: list[PendingResult] + has_next: bool + extensions: dict[str, Any] | None + + __slots__ = "data", "errors", "extensions", "has_next", "pending" def __init__( self, data: dict[str, Any] | None = None, errors: list[GraphQLError] | None = None, - path: list[str | int] | None = None, - label: str | None = None, + pending: list[PendingResult] | None = None, + has_next: bool = False, extensions: dict[str, Any] | None = None, ) -> None: self.data = data self.errors = errors - self.path = path - self.label = label + self.pending = pending or [] + self.has_next = has_next self.extensions = extensions def __repr__(self) -> str: name = self.__class__.__name__ - args: list[str] = [f"data={self.data!r}, errors={self.errors!r}"] - if self.path: - args.append(f"path={self.path!r}") - if self.label: - args.append(f"label={self.label!r}") + args: list[str] = [f"data={self.data!r}"] + if self.errors: + args.append(f"errors={self.errors!r}") + if self.pending: + args.append(f"pending={self.pending!r}") + if self.has_next: + args.append("has_next") if self.extensions: - args.append(f"extensions={self.extensions}") + args.append(f"extensions={self.extensions!r}") return f"{name}({', '.join(args)})" @property - def formatted(self) -> FormattedIncrementalDeferResult: + def formatted(self) -> FormattedInitialIncrementalExecutionResult: """Get execution result formatted according to the specification.""" - formatted: FormattedIncrementalDeferResult = {"data": self.data} + formatted: FormattedInitialIncrementalExecutionResult = {"data": self.data} if self.errors is not None: formatted["errors"] = [error.formatted for error in self.errors] - if self.path is not None: - formatted["path"] = self.path - if self.label is not None: - formatted["label"] = self.label + formatted["pending"] = [pending.formatted for pending in self.pending] + formatted["hasNext"] = self.has_next if self.extensions is not None: formatted["extensions"] = self.extensions return formatted @@ -112,18 +321,119 @@ def __eq__(self, other: object) -> bool: if isinstance(other, dict): return ( other.get("data") == self.data - and other.get("errors") == self.errors - and ("path" not in other or other["path"] == self.path) - and ("label" not in other or other["label"] == self.label) + and (other.get("errors") or None) == (self.errors or None) + and (other.get("pending") or None) == (self.pending or None) + and (other.get("hasNext") or None) == (self.has_next or None) + and (other.get("extensions") or None) == (self.extensions or None) + ) + if isinstance(other, tuple): + size = len(other) + return ( + 1 < size < 6 and ( - "extensions" not in other or other["extensions"] == self.extensions - ) + self.data, + self.errors, + self.pending, + self.has_next, + self.extensions, + )[:size] + == other + ) + return ( + isinstance(other, self.__class__) + and other.data == self.data + and other.errors == self.errors + and other.pending == self.pending + and other.has_next == self.has_next + and other.extensions == self.extensions + ) + + def __ne__(self, other: object) -> bool: + return not self == other + + +class ExperimentalIncrementalExecutionResults(NamedTuple): + """Execution results when retrieved incrementally.""" + + initial_result: InitialIncrementalExecutionResult + subsequent_results: AsyncGenerator[SubsequentIncrementalExecutionResult, None] + + +class FormattedIncrementalDeferResult(TypedDict, total=False): + """Formatted incremental deferred execution result""" + + data: dict[str, Any] + id: str + subPath: list[str | int] + errors: list[GraphQLFormattedError] + extensions: dict[str, Any] + + +class IncrementalDeferResult: # noqa: PLW1641 + """Incremental deferred execution result""" + + data: dict[str, Any] + id: str + sub_path: list[str | int] | None + errors: list[GraphQLError] | None + extensions: dict[str, Any] | None + + __slots__ = "data", "errors", "extensions", "id", "sub_path" + + def __init__( + self, + data: dict[str, Any], + id: str, # noqa: A002 + sub_path: list[str | int] | None = None, + errors: list[GraphQLError] | None = None, + extensions: dict[str, Any] | None = None, + ) -> None: + self.data = data + self.id = id + self.sub_path = sub_path + self.errors = errors + self.extensions = extensions + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [f"data={self.data!r}, id={self.id!r}"] + if self.sub_path is not None: + args.append(f"sub_path={self.sub_path!r}") + if self.errors is not None: + args.append(f"errors={self.errors!r}") + if self.extensions is not None: + args.append(f"extensions={self.extensions!r}") + return f"{name}({', '.join(args)})" + + @property + def formatted(self) -> FormattedIncrementalDeferResult: + """Get execution result formatted according to the specification.""" + formatted: FormattedIncrementalDeferResult = { + "data": self.data, + "id": self.id, + } + if self.sub_path is not None: + formatted["subPath"] = self.sub_path + if self.errors is not None: + formatted["errors"] = [error.formatted for error in self.errors] + if self.extensions is not None: + formatted["extensions"] = self.extensions + return formatted + + def __eq__(self, other: object) -> bool: + if isinstance(other, dict): + return ( + other.get("data") == self.data + and other.get("id") == self.id + and (other.get("subPath") or None) == (self.sub_path or None) + and (other.get("errors") or None) == (self.errors or None) + and (other.get("extensions") or None) == (self.extensions or None) ) if isinstance(other, tuple): size = len(other) return ( 1 < size < 6 - and (self.data, self.errors, self.path, self.label, self.extensions)[ + and (self.data, self.id, self.sub_path, self.errors, self.extensions)[ :size ] == other @@ -131,9 +441,9 @@ def __eq__(self, other: object) -> bool: return ( isinstance(other, self.__class__) and other.data == self.data + and other.id == self.id + and other.sub_path == self.sub_path and other.errors == self.errors - and other.path == self.path - and other.label == self.label and other.extensions == self.extensions ) @@ -144,59 +454,60 @@ def __ne__(self, other: object) -> bool: class FormattedIncrementalStreamResult(TypedDict, total=False): """Formatted incremental stream execution result""" - items: list[Any] | None + items: list[Any] + id: str + subPath: list[str | int] errors: list[GraphQLFormattedError] - path: list[str | int] - label: str extensions: dict[str, Any] -class IncrementalStreamResult: +class IncrementalStreamResult: # noqa: PLW1641 """Incremental streamed execution result""" - items: list[Any] | None + items: list[Any] + id: str + sub_path: list[str | int] | None errors: list[GraphQLError] | None - path: list[str | int] | None - label: str | None extensions: dict[str, Any] | None - __slots__ = "items", "errors", "path", "label", "extensions" + __slots__ = "errors", "extensions", "id", "items", "label", "sub_path" def __init__( self, - items: list[Any] | None = None, + items: list[Any], + id: str, # noqa: A002 + sub_path: list[str | int] | None = None, errors: list[GraphQLError] | None = None, - path: list[str | int] | None = None, - label: str | None = None, extensions: dict[str, Any] | None = None, ) -> None: self.items = items + self.id = id + self.sub_path = sub_path self.errors = errors - self.path = path - self.label = label self.extensions = extensions def __repr__(self) -> str: name = self.__class__.__name__ - args: list[str] = [f"items={self.items!r}, errors={self.errors!r}"] - if self.path: - args.append(f"path={self.path!r}") - if self.label: - args.append(f"label={self.label!r}") - if self.extensions: - args.append(f"extensions={self.extensions}") + args: list[str] = [f"items={self.items!r}, id={self.id!r}"] + if self.sub_path is not None: + args.append(f"sub_path={self.sub_path!r}") + if self.errors is not None: + args.append(f"errors={self.errors!r}") + if self.extensions is not None: + args.append(f"extensions={self.extensions!r}") return f"{name}({', '.join(args)})" @property def formatted(self) -> FormattedIncrementalStreamResult: """Get execution result formatted according to the specification.""" - formatted: FormattedIncrementalStreamResult = {"items": self.items} + formatted: FormattedIncrementalStreamResult = { + "items": self.items, + "id": self.id, + } + if self.sub_path is not None: + formatted["subPath"] = self.sub_path if self.errors is not None: formatted["errors"] = [error.formatted for error in self.errors] - if self.path is not None: - formatted["path"] = self.path - if self.label is not None: - formatted["label"] = self.label if self.extensions is not None: formatted["extensions"] = self.extensions return formatted @@ -205,18 +516,16 @@ def __eq__(self, other: object) -> bool: if isinstance(other, dict): return ( other.get("items") == self.items - and other.get("errors") == self.errors - and ("path" not in other or other["path"] == self.path) - and ("label" not in other or other["label"] == self.label) - and ( - "extensions" not in other or other["extensions"] == self.extensions - ) + and other.get("id") == self.id + and (other.get("subPath", None) == (self.sub_path or None)) + and (other.get("errors") or None) == (self.errors or None) + and (other.get("extensions", None) == (self.extensions or None)) ) if isinstance(other, tuple): size = len(other) return ( 1 < size < 6 - and (self.items, self.errors, self.path, self.label, self.extensions)[ + and (self.items, self.id, self.sub_path, self.errors, self.extensions)[ :size ] == other @@ -224,9 +533,9 @@ def __eq__(self, other: object) -> bool: return ( isinstance(other, self.__class__) and other.items == self.items + and other.id == self.id + and other.sub_path == self.sub_path and other.errors == self.errors - and other.path == self.path - and other.label == self.label and other.extensions == self.extensions ) @@ -244,52 +553,64 @@ def __ne__(self, other: object) -> bool: class FormattedSubsequentIncrementalExecutionResult(TypedDict, total=False): """Formatted subsequent incremental execution result""" - incremental: list[FormattedIncrementalResult] hasNext: bool + pending: list[FormattedPendingResult] + incremental: list[FormattedIncrementalResult] + completed: list[FormattedCompletedResult] extensions: dict[str, Any] -class SubsequentIncrementalExecutionResult: - """Subsequent incremental execution result. - - - ``has_next`` is True if a future payload is expected. - - ``incremental`` is a list of the results from defer/stream directives. - """ +class SubsequentIncrementalExecutionResult: # noqa: PLW1641 + """Subsequent incremental execution result.""" - __slots__ = "has_next", "incremental", "extensions" + __slots__ = "completed", "extensions", "has_next", "incremental", "pending" - incremental: Sequence[IncrementalResult] | None has_next: bool + pending: list[PendingResult] | None + incremental: list[IncrementalResult] | None + completed: list[CompletedResult] | None extensions: dict[str, Any] | None def __init__( self, - incremental: Sequence[IncrementalResult] | None = None, has_next: bool = False, + pending: list[PendingResult] | None = None, + incremental: list[IncrementalResult] | None = None, + completed: list[CompletedResult] | None = None, extensions: dict[str, Any] | None = None, ) -> None: - self.incremental = incremental self.has_next = has_next + self.pending = pending or [] + self.incremental = incremental + self.completed = completed self.extensions = extensions def __repr__(self) -> str: name = self.__class__.__name__ args: list[str] = [] - if self.incremental: - args.append(f"incremental[{len(self.incremental)}]") if self.has_next: args.append("has_next") + if self.pending: + args.append(f"pending[{len(self.pending)}]") + if self.incremental: + args.append(f"incremental[{len(self.incremental)}]") + if self.completed: + args.append(f"completed[{len(self.completed)}]") if self.extensions: - args.append(f"extensions={self.extensions}") + args.append(f"extensions={self.extensions!r}") return f"{name}({', '.join(args)})" @property def formatted(self) -> FormattedSubsequentIncrementalExecutionResult: """Get execution result formatted according to the specification.""" formatted: FormattedSubsequentIncrementalExecutionResult = {} + formatted["hasNext"] = self.has_next + if self.pending: + formatted["pending"] = [result.formatted for result in self.pending] if self.incremental: formatted["incremental"] = [result.formatted for result in self.incremental] - formatted["hasNext"] = self.has_next + if self.completed: + formatted["completed"] = [result.formatted for result in self.completed] if self.extensions is not None: formatted["extensions"] = self.extensions return formatted @@ -297,27 +618,31 @@ def formatted(self) -> FormattedSubsequentIncrementalExecutionResult: def __eq__(self, other: object) -> bool: if isinstance(other, dict): return ( - ("incremental" not in other or other["incremental"] == self.incremental) - and ("hasNext" in other and other["hasNext"] == self.has_next) - and ( - "extensions" not in other or other["extensions"] == self.extensions - ) + (other.get("hasNext") or None) == (self.has_next or None) + and (other.get("pending") or None) == (self.pending or None) + and (other.get("incremental") or None) == (self.incremental or None) + and (other.get("completed") or None) == (self.completed or None) + and (other.get("extensions") or None) == (self.extensions or None) ) if isinstance(other, tuple): size = len(other) return ( - 1 < size < 4 + 1 < size < 6 and ( - self.incremental, self.has_next, + self.pending, + self.incremental, + self.completed, self.extensions, )[:size] == other ) return ( isinstance(other, self.__class__) - and other.incremental == self.incremental and other.has_next == self.has_next + and self.pending == other.pending + and other.incremental == self.incremental + and other.completed == self.completed and other.extensions == self.extensions ) @@ -340,140 +665,132 @@ class IncrementalPublisher: The internal publishing state is managed as follows: - ``_released``: the set of Incremental Data records that are ready to be sent to the + ``_released``: the set of Subsequent Result records that are ready to be sent to the client, i.e. their parents have completed and they have also completed. - ``_pending``: the set of Incremental Data records that are definitely pending, i.e. + ``_pending``: the set of Subsequent Result records that are definitely pending, i.e. their parents have completed so that they can no longer be filtered. This includes - all Incremental Data records in `released`, as well as Incremental Data records that - have not yet completed. - - ``_initial_result``: a record containing the state of the initial result, - as follows: - ``is_completed``: indicates whether the initial result has completed. - ``children``: the set of Incremental Data records that can be be published when the - initial result is completed. - - Each Incremental Data record also contains similar metadata, i.e. these records also - contain similar ``is_completed`` and ``children`` properties. + all Subsequent Result records in `released`, as well as the records that have not + yet completed. Note: Instead of sets we use dicts (with values set to None) which preserve order and thereby achieve more deterministic results. """ - _initial_result: InitialResult - _released: dict[IncrementalDataRecord, None] - _pending: dict[IncrementalDataRecord, None] + _next_id: int + _released: dict[SubsequentResultRecord, None] + _pending: dict[SubsequentResultRecord, None] _resolve: Event | None + _tasks: set[Awaitable] def __init__(self) -> None: - self._initial_result = InitialResult({}, False) + self._next_id = 0 self._released = {} self._pending = {} self._resolve = None # lazy initialization - self._tasks: set[Awaitable] = set() - - def has_next(self) -> bool: - """Check whether there is a next incremental result.""" - return bool(self._pending) - - async def subscribe( - self, - ) -> AsyncGenerator[SubsequentIncrementalExecutionResult, None]: - """Subscribe to the incremental results.""" - is_done = False - pending = self._pending - - try: - while not is_done: - released = self._released - for item in released: - with suppress_key_error: - del pending[item] - self._released = {} + self._tasks = set() - result = self._get_incremental_result(released) + @staticmethod + def report_new_defer_fragment_record( + deferred_fragment_record: DeferredFragmentRecord, + parent_incremental_result_record: InitialResultRecord + | DeferredFragmentRecord + | StreamItemsRecord, + ) -> None: + """Report a new deferred fragment record.""" + parent_incremental_result_record.children[deferred_fragment_record] = None - if not self.has_next(): - is_done = True + @staticmethod + def report_new_deferred_grouped_filed_set_record( + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + ) -> None: + """Report a new deferred grouped field set record.""" + for ( + deferred_fragment_record + ) in deferred_grouped_field_set_record.deferred_fragment_records: + deferred_fragment_record._pending[deferred_grouped_field_set_record] = None # noqa: SLF001 + deferred_fragment_record.deferred_grouped_field_set_records[ + deferred_grouped_field_set_record + ] = None + + @staticmethod + def report_new_stream_items_record( + stream_items_record: StreamItemsRecord, + parent_incremental_data_record: IncrementalDataRecord, + ) -> None: + """Report a new stream items record.""" + if isinstance(parent_incremental_data_record, DeferredGroupedFieldSetRecord): + for parent in parent_incremental_data_record.deferred_fragment_records: + parent.children[stream_items_record] = None + else: + parent_incremental_data_record.children[stream_items_record] = None - if result is not None: - yield result - else: - resolve = self._resolve - if resolve is None: - self._resolve = resolve = Event() - await resolve.wait() - finally: - close_async_iterators = [] - for incremental_data_record in pending: - if isinstance( - incremental_data_record, StreamItemsRecord - ): # pragma: no cover - async_iterator = incremental_data_record.async_iterator - if async_iterator: - try: - close_async_iterator = async_iterator.aclose() # type: ignore - except AttributeError: - pass - else: - close_async_iterators.append(close_async_iterator) - await gather(*close_async_iterators) - - def prepare_new_deferred_fragment_record( + def complete_deferred_grouped_field_set( self, - label: str | None, - path: Path | None, - parent_context: IncrementalDataRecord | None, - ) -> DeferredFragmentRecord: - """Prepare a new deferred fragment record.""" - deferred_fragment_record = DeferredFragmentRecord(label, path, parent_context) - - context = parent_context or self._initial_result - context.children[deferred_fragment_record] = None - return deferred_fragment_record - - def prepare_new_stream_items_record( + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + data: dict[str, Any], + ) -> None: + """Complete the given deferred grouped field set record with the given data.""" + deferred_grouped_field_set_record.data = data + for ( + deferred_fragment_record + ) in deferred_grouped_field_set_record.deferred_fragment_records: + pending = deferred_fragment_record._pending # noqa: SLF001 + del pending[deferred_grouped_field_set_record] + if not pending: + self.complete_deferred_fragment_record(deferred_fragment_record) + + def mark_errored_deferred_grouped_field_set( self, - label: str | None, - path: Path | None, - parent_context: IncrementalDataRecord | None, - async_iterator: AsyncIterator[Any] | None = None, - ) -> StreamItemsRecord: - """Prepare a new stream items record.""" - stream_items_record = StreamItemsRecord( - label, path, parent_context, async_iterator - ) - - context = parent_context or self._initial_result - context.children[stream_items_record] = None - return stream_items_record + deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord, + error: GraphQLError, + ) -> None: + """Mark the given deferred grouped field set record as errored.""" + for ( + deferred_fragment_record + ) in deferred_grouped_field_set_record.deferred_fragment_records: + deferred_fragment_record.errors.append(error) + self.complete_deferred_fragment_record(deferred_fragment_record) def complete_deferred_fragment_record( - self, - deferred_fragment_record: DeferredFragmentRecord, - data: dict[str, Any] | None, + self, deferred_fragment_record: DeferredFragmentRecord ) -> None: """Complete the given deferred fragment record.""" - deferred_fragment_record.data = data - deferred_fragment_record.is_completed = True self._release(deferred_fragment_record) def complete_stream_items_record( self, stream_items_record: StreamItemsRecord, - items: list[str] | None, + items: list[Any], ) -> None: """Complete the given stream items record.""" stream_items_record.items = items stream_items_record.is_completed = True self._release(stream_items_record) + def mark_errored_stream_items_record( + self, stream_items_record: StreamItemsRecord, error: GraphQLError + ) -> None: + """Mark the given stream items record as errored.""" + stream_items_record.stream_record.errors.append(error) + self.set_is_final_record(stream_items_record) + stream_items_record.is_completed = True + early_return = stream_items_record.stream_record.early_return + if early_return: + self._add_task(early_return()) + self._release(stream_items_record) + + @staticmethod + def set_is_final_record(stream_items_record: StreamItemsRecord) -> None: + """Mark stream items record as final.""" + stream_items_record.is_final_record = True + def set_is_completed_async_iterator( self, stream_items_record: StreamItemsRecord ) -> None: """Mark async iterator for stream items as completed.""" stream_items_record.is_completed_async_iterator = True + self.set_is_final_record(stream_items_record) def add_field_error( self, incremental_data_record: IncrementalDataRecord, error: GraphQLError @@ -481,39 +798,139 @@ def add_field_error( """Add a field error to the given incremental data record.""" incremental_data_record.errors.append(error) - def publish_initial(self) -> None: - """Publish the initial result.""" - for child in self._initial_result.children: - self._publish(child) + def build_data_response( + self, initial_result_record: InitialResultRecord, data: dict[str, Any] | None + ) -> ExecutionResult | ExperimentalIncrementalExecutionResults: + """Build response for the given data.""" + pending_sources = self._publish(initial_result_record.children) + + errors = initial_result_record.errors or None + if errors: + errors.sort( + key=lambda error: ( + error.locations or [], + error.path or [], + error.message, + ) + ) + if pending_sources: + return ExperimentalIncrementalExecutionResults( + initial_result=InitialIncrementalExecutionResult( + data, + errors, + pending=self._pending_sources_to_results(pending_sources), + has_next=True, + ), + subsequent_results=self._subscribe(), + ) + return ExecutionResult(data, errors) + + def build_error_response( + self, initial_result_record: InitialResultRecord, error: GraphQLError + ) -> ExecutionResult: + """Build response for the given error.""" + errors = initial_result_record.errors + errors.append(error) + # Sort the error list in order to make it deterministic, since we might have + # been using parallel execution. + errors.sort( + key=lambda error: (error.locations or [], error.path or [], error.message) + ) + return ExecutionResult(None, errors) def filter( self, - null_path: Path, - erroring_incremental_data_record: IncrementalDataRecord | None, + null_path: Path | None, + erroring_incremental_data_record: IncrementalDataRecord, ) -> None: """Filter out the given erroring incremental data record.""" - null_path_list = null_path.as_list() + null_path_list = null_path.as_list() if null_path else [] - children = (erroring_incremental_data_record or self._initial_result).children + streams: list[StreamRecord] = [] - for child in self._get_descendants(children): - if not self._matches_path(child.path, null_path_list): + children = self._get_children(erroring_incremental_data_record) + descendants = self._get_descendants(children) + + for child in descendants: + if not self._nulls_child_subsequent_result_record(child, null_path_list): continue - self._delete(child) - parent = child.parent_context or self._initial_result - with suppress_key_error: - del parent.children[child] + child.filtered = True if isinstance(child, StreamItemsRecord): - async_iterator = child.async_iterator - if async_iterator: - try: - close_async_iterator = async_iterator.aclose() # type:ignore - except AttributeError: # pragma: no cover - pass - else: - self._add_task(close_async_iterator) + streams.append(child.stream_record) + + early_returns = [] + for stream in streams: + early_return = stream.early_return + if early_return: + early_returns.append(early_return()) + if early_returns: + self._add_task(gather(*early_returns)) + + def _pending_sources_to_results( + self, + pending_sources: RefSet[DeferredFragmentRecord | StreamRecord], + ) -> list[PendingResult]: + """Convert pending sources to pending results.""" + pending_results: list[PendingResult] = [] + for pending_source in pending_sources: + pending_source.pending_sent = True + id_ = self._get_next_id() + pending_source.id = id_ + pending_results.append( + PendingResult(id_, pending_source.path, pending_source.label) + ) + return pending_results + + def _get_next_id(self) -> str: + """Get the next ID for pending results.""" + id_ = self._next_id + self._next_id += 1 + return str(id_) + + async def _subscribe( + self, + ) -> AsyncGenerator[SubsequentIncrementalExecutionResult, None]: + """Subscribe to the incremental results.""" + is_done = False + pending = self._pending + + await sleep(0) # execute pending tasks + + try: + while not is_done: + released = self._released + for item in released: + with suppress_key_error: + del pending[item] + self._released = {} + + result = self._get_incremental_result(released) + + if not self._pending: + is_done = True + + if result is not None: + yield result + else: + resolve = self._resolve + if resolve is None: + self._resolve = resolve = Event() + await resolve.wait() + finally: + streams: list[StreamRecord] = [] + descendants = self._get_descendants(pending) + for subsequent_result_record in descendants: # pragma: no cover + if isinstance(subsequent_result_record, StreamItemsRecord): + streams.append(subsequent_result_record.stream_record) + early_returns = [] + for stream in streams: # pragma: no cover + early_return = stream.early_return + if early_return: + early_returns.append(early_return()) + if early_returns: # pragma: no cover + await gather(*early_returns) def _trigger(self) -> None: """Trigger the resolve event.""" @@ -522,87 +939,199 @@ def _trigger(self) -> None: resolve.set() self._resolve = Event() - def _introduce(self, item: IncrementalDataRecord) -> None: + def _introduce(self, item: SubsequentResultRecord) -> None: """Introduce a new IncrementalDataRecord.""" self._pending[item] = None - def _release(self, item: IncrementalDataRecord) -> None: + def _release(self, item: SubsequentResultRecord) -> None: """Release the given IncrementalDataRecord.""" if item in self._pending: self._released[item] = None self._trigger() - def _push(self, item: IncrementalDataRecord) -> None: + def _push(self, item: SubsequentResultRecord) -> None: """Push the given IncrementalDataRecord.""" self._released[item] = None self._pending[item] = None self._trigger() - def _delete(self, item: IncrementalDataRecord) -> None: - """Delete the given IncrementalDataRecord.""" - with suppress_key_error: - del self._released[item] - with suppress_key_error: - del self._pending[item] - self._trigger() - def _get_incremental_result( - self, completed_records: Collection[IncrementalDataRecord] + self, completed_records: Collection[SubsequentResultRecord] ) -> SubsequentIncrementalExecutionResult | None: """Get the incremental result with the completed records.""" + update = self._process_pending(completed_records) + pending, incremental, completed = ( + update.pending, + update.incremental, + update.completed, + ) + + has_next = bool(self._pending) + if not incremental and not completed and has_next: + return None + + return SubsequentIncrementalExecutionResult( + has_next, pending or None, incremental or None, completed or None + ) + + def _process_pending( + self, + completed_records: Collection[SubsequentResultRecord], + ) -> IncrementalUpdate: + """Process the pending records.""" + new_pending_sources: RefSet[DeferredFragmentRecord | StreamRecord] = RefSet() incremental_results: list[IncrementalResult] = [] - encountered_completed_async_iterator = False - append_result = incremental_results.append - for incremental_data_record in completed_records: + completed_results: list[CompletedResult] = [] + to_result = self._completed_record_to_result + for subsequent_result_record in completed_records: + self._publish(subsequent_result_record.children, new_pending_sources) incremental_result: IncrementalResult - for child in incremental_data_record.children: - self._publish(child) - if isinstance(incremental_data_record, StreamItemsRecord): - items = incremental_data_record.items - if incremental_data_record.is_completed_async_iterator: + if isinstance(subsequent_result_record, StreamItemsRecord): + if subsequent_result_record.is_final_record: + stream_record = subsequent_result_record.stream_record + new_pending_sources.discard(stream_record) + completed_results.append(to_result(stream_record)) + if subsequent_result_record.is_completed_async_iterator: # async iterable resolver finished but there may be pending payload - encountered_completed_async_iterator = True - continue # pragma: no cover + continue + if subsequent_result_record.stream_record.errors: + continue incremental_result = IncrementalStreamResult( - items, - incremental_data_record.errors - if incremental_data_record.errors - else None, - incremental_data_record.path, - incremental_data_record.label, + # safe because `items` is always defined + # when the record is completed + subsequent_result_record.items, + # safe because `id` is defined + # once the stream has been released as pending + subsequent_result_record.stream_record.id, # type: ignore ) + if subsequent_result_record.errors: + incremental_result.errors = subsequent_result_record.errors + incremental_results.append(incremental_result) else: - data = incremental_data_record.data - incremental_result = IncrementalDeferResult( - data, - incremental_data_record.errors - if incremental_data_record.errors - else None, - incremental_data_record.path, - incremental_data_record.label, - ) - append_result(incremental_result) + new_pending_sources.discard(subsequent_result_record) + completed_results.append(to_result(subsequent_result_record)) + if subsequent_result_record.errors: + continue + for ( + deferred_grouped_field_set_record + ) in subsequent_result_record.deferred_grouped_field_set_records: + if not deferred_grouped_field_set_record.sent: + deferred_grouped_field_set_record.sent = True + incremental_result = self._get_incremental_defer_result( + deferred_grouped_field_set_record + ) + if deferred_grouped_field_set_record.errors: + incremental_result.errors = ( + deferred_grouped_field_set_record.errors + ) + incremental_results.append(incremental_result) + return IncrementalUpdate( + self._pending_sources_to_results(new_pending_sources), + incremental_results, + completed_results, + ) - if incremental_results: - return SubsequentIncrementalExecutionResult( - incremental=incremental_results, has_next=self.has_next() - ) - if encountered_completed_async_iterator and not self.has_next(): - return SubsequentIncrementalExecutionResult(has_next=False) - return None - - def _publish(self, incremental_data_record: IncrementalDataRecord) -> None: - """Publish the given incremental data record.""" - if incremental_data_record.is_completed: - self._push(incremental_data_record) + def _get_incremental_defer_result( + self, deferred_grouped_field_set_record: DeferredGroupedFieldSetRecord + ) -> IncrementalDeferResult: + """Get the incremental defer result from the grouped field set record.""" + data = deferred_grouped_field_set_record.data + fragment_records = deferred_grouped_field_set_record.deferred_fragment_records + max_length: int | None = None + id_with_longest_path: str | None = None + for fragment_record in fragment_records: + if fragment_record.id is None: # pragma: no cover + continue + length = len(fragment_record.path) + if max_length is None or length > max_length: + max_length = length + id_with_longest_path = fragment_record.id + + sub_path = deferred_grouped_field_set_record.path[max_length:] + + return IncrementalDeferResult( + # safe because `data` is always defined when the record is completed + data, # type: ignore + # safe because `id` is always defined once the fragment has been released + # as pending and at least one fragment has been completed, so must have been + # released as pending + id_with_longest_path, # type: ignore + sub_path or None, + ) + + @staticmethod + def _completed_record_to_result( + completed_record: DeferredFragmentRecord | StreamRecord, + ) -> CompletedResult: + """Convert the completed record to a result.""" + return CompletedResult( + # safe because `id` is defined once the stream has been released as pending + completed_record.id, # type: ignore + completed_record.errors or None, + ) + + def _publish( + self, + subsequent_result_records: dict[SubsequentResultRecord, None], + pending_sources: RefSet[DeferredFragmentRecord | StreamRecord] | None = None, + ) -> RefSet[DeferredFragmentRecord | StreamRecord]: + """Publish the given set of incremental data record.""" + if pending_sources is None: + pending_sources = RefSet() + empty_records: list[SubsequentResultRecord] = [] + + for subsequent_result_record in subsequent_result_records: + if subsequent_result_record.filtered: + continue + if isinstance(subsequent_result_record, StreamItemsRecord): + if subsequent_result_record.is_completed: + self._push(subsequent_result_record) + else: + self._introduce(subsequent_result_record) + + stream = subsequent_result_record.stream_record + if not stream.pending_sent: + pending_sources.add(stream) + continue + + if subsequent_result_record._pending: # noqa: SLF001 + self._introduce(subsequent_result_record) + elif not subsequent_result_record.deferred_grouped_field_set_records: + empty_records.append(subsequent_result_record) + continue + else: + self._push(subsequent_result_record) + + if not subsequent_result_record.pending_sent: # pragma: no cover else + pending_sources.add(subsequent_result_record) + + for empty_record in empty_records: + self._publish(empty_record.children, pending_sources) + + return pending_sources + + @staticmethod + def _get_children( + erroring_incremental_data_record: IncrementalDataRecord, + ) -> dict[SubsequentResultRecord, None]: + """Get the children of the given erroring incremental data record.""" + children: dict[SubsequentResultRecord, None] = {} + if isinstance(erroring_incremental_data_record, DeferredGroupedFieldSetRecord): + for ( + erroring_incremental_result_record + ) in erroring_incremental_data_record.deferred_fragment_records: + for child in erroring_incremental_result_record.children: + children[child] = None else: - self._introduce(incremental_data_record) + for child in erroring_incremental_data_record.children: + children[child] = None + return children def _get_descendants( self, - children: dict[IncrementalDataRecord, None], - descendants: dict[IncrementalDataRecord, None] | None = None, - ) -> dict[IncrementalDataRecord, None]: + children: dict[SubsequentResultRecord, None], + descendants: dict[SubsequentResultRecord, None] | None = None, + ) -> dict[SubsequentResultRecord, None]: """Get the descendants of the given children.""" if descendants is None: descendants = {} @@ -611,6 +1140,24 @@ def _get_descendants( self._get_descendants(child.children, descendants) return descendants + def _nulls_child_subsequent_result_record( + self, + subsequent_result_record: SubsequentResultRecord, + null_path: list[str | int], + ) -> bool: + """Check whether the given subsequent result record is nulled.""" + incremental_data_records: ( + list[SubsequentResultRecord] | dict[DeferredGroupedFieldSetRecord, None] + ) = ( + [subsequent_result_record] + if isinstance(subsequent_result_record, StreamItemsRecord) + else subsequent_result_record.deferred_grouped_field_set_records + ) + return any( + self._matches_path(incremental_data_record.path, null_path) + for incremental_data_record in incremental_data_records + ) + def _matches_path( self, test_path: list[str | int], base_path: list[str | int] ) -> bool: @@ -625,82 +1172,155 @@ def _add_task(self, awaitable: Awaitable[Any]) -> None: task.add_done_callback(tasks.discard) -class DeferredFragmentRecord: - """A record collecting data marked with the defer directive""" +class InitialResultRecord: + """Initial result record""" errors: list[GraphQLError] - label: str | None + children: dict[SubsequentResultRecord, None] + + def __init__(self) -> None: + self.errors = [] + self.children = {} + + +class DeferredGroupedFieldSetRecord: + """Deferred grouped field set record""" + path: list[str | int] + deferred_fragment_records: list[DeferredFragmentRecord] + grouped_field_set: GroupedFieldSet + should_initiate_defer: bool + errors: list[GraphQLError] data: dict[str, Any] | None - parent_context: IncrementalDataRecord | None - children: dict[IncrementalDataRecord, None] - is_completed: bool + sent: bool def __init__( self, - label: str | None, - path: Path | None, - parent_context: IncrementalDataRecord | None, + deferred_fragment_records: list[DeferredFragmentRecord], + grouped_field_set: GroupedFieldSet, + should_initiate_defer: bool, + path: Path | None = None, ) -> None: - self.label = label self.path = path.as_list() if path else [] - self.parent_context = parent_context + self.deferred_fragment_records = deferred_fragment_records + self.grouped_field_set = grouped_field_set + self.should_initiate_defer = should_initiate_defer self.errors = [] + self.sent = False + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [ + f"deferred_fragment_records={self.deferred_fragment_records!r}", + f"grouped_field_set={self.grouped_field_set!r}", + ] + if self.path: + args.append(f"path={self.path!r}") + return f"{name}({', '.join(args)})" + + +class DeferredFragmentRecord: + """Deferred fragment record""" + + path: list[str | int] + label: str | None + id: str | None + children: dict[SubsequentResultRecord, None] + deferred_grouped_field_set_records: dict[DeferredGroupedFieldSetRecord, None] + errors: list[GraphQLError] + filtered: bool + pending_sent: bool + _pending: dict[DeferredGroupedFieldSetRecord, None] + + def __init__(self, path: Path | None = None, label: str | None = None) -> None: + self.path = path.as_list() if path else [] + self.label = label + self.id = None self.children = {} - self.is_completed = False - self.data = None + self.filtered = False + self.pending_sent = False + self.deferred_grouped_field_set_records = {} + self.errors = [] + self._pending = {} + + def __repr__(self) -> str: + name = self.__class__.__name__ + args: list[str] = [] + if self.path: + args.append(f"path={self.path!r}") + if self.label: + args.append(f"label={self.label!r}") + return f"{name}({', '.join(args)})" + + +class StreamRecord: + """Stream record""" + + label: str | None + path: list[str | int] + id: str | None + errors: list[GraphQLError] + early_return: Callable[[], Awaitable[Any]] | None + pending_sent: bool + + def __init__( + self, + path: Path, + label: str | None = None, + early_return: Callable[[], Awaitable[Any]] | None = None, + ) -> None: + self.path = path.as_list() + self.label = label + self.id = None + self.errors = [] + self.early_return = early_return + self.pending_sent = False def __repr__(self) -> str: name = self.__class__.__name__ - args: list[str] = [f"path={self.path!r}"] + args: list[str] = [] + if self.path: + args.append(f"path={self.path!r}") if self.label: args.append(f"label={self.label!r}") - if self.parent_context: - args.append("parent_context") - if self.data is not None: - args.append("data") return f"{name}({', '.join(args)})" class StreamItemsRecord: - """A record collecting items marked with the stream directive""" + """Stream items record""" errors: list[GraphQLError] - label: str | None + stream_record: StreamRecord path: list[str | int] - items: list[str] | None - parent_context: IncrementalDataRecord | None - children: dict[IncrementalDataRecord, None] - async_iterator: AsyncIterator[Any] | None + items: list[str] + children: dict[SubsequentResultRecord, None] + is_final_record: bool is_completed_async_iterator: bool is_completed: bool + filtered: bool def __init__( self, - label: str | None, - path: Path | None, - parent_context: IncrementalDataRecord | None, - async_iterator: AsyncIterator[Any] | None = None, + stream_record: StreamRecord, + path: Path | None = None, ) -> None: - self.label = label + self.stream_record = stream_record self.path = path.as_list() if path else [] - self.parent_context = parent_context - self.async_iterator = async_iterator - self.errors = [] self.children = {} + self.errors = [] self.is_completed_async_iterator = self.is_completed = False - self.items = None + self.is_final_record = self.filtered = False def __repr__(self) -> str: name = self.__class__.__name__ - args: list[str] = [f"path={self.path!r}"] - if self.label: - args.append(f"label={self.label!r}") - if self.parent_context: - args.append("parent_context") - if self.items is not None: - args.append("items") + args: list[str] = [f"stream_record={self.stream_record!r}"] + if self.path: + args.append(f"path={self.path!r}") return f"{name}({', '.join(args)})" -IncrementalDataRecord = Union[DeferredFragmentRecord, StreamItemsRecord] +IncrementalDataRecord = Union[ + InitialResultRecord, DeferredGroupedFieldSetRecord, StreamItemsRecord +] + +SubsequentResultRecord = Union[DeferredFragmentRecord, StreamItemsRecord] diff --git a/src/graphql/execution/middleware.py b/src/graphql/execution/middleware.py index de99e12b..6d999171 100644 --- a/src/graphql/execution/middleware.py +++ b/src/graphql/execution/middleware.py @@ -30,7 +30,7 @@ class MiddlewareManager: """ # allow custom attributes (not used internally) - __slots__ = "__dict__", "middlewares", "_middleware_resolvers", "_cached_resolvers" + __slots__ = "__dict__", "_cached_resolvers", "_middleware_resolvers", "middlewares" _cached_resolvers: dict[GraphQLFieldResolver, GraphQLFieldResolver] _middleware_resolvers: list[Callable] | None diff --git a/src/graphql/execution/values.py b/src/graphql/execution/values.py index 4810a8bd..5309996a 100644 --- a/src/graphql/execution/values.py +++ b/src/graphql/execution/values.py @@ -26,6 +26,7 @@ GraphQLDirective, GraphQLField, GraphQLSchema, + is_input_object_type, is_input_type, is_non_null_type, ) @@ -128,16 +129,20 @@ def coerce_variable_values( continue def on_input_value_error( - path: list[str | int], invalid_value: Any, error: GraphQLError + path: list[str | int], + invalid_value: Any, + error: GraphQLError, + var_name: str = var_name, + var_def_node: VariableDefinitionNode = var_def_node, ) -> None: invalid_str = inspect(invalid_value) - prefix = f"Variable '${var_name}' got invalid value {invalid_str}" # noqa: B023 + prefix = f"Variable '${var_name}' got invalid value {invalid_str}" if path: - prefix += f" at '{var_name}{print_path_list(path)}'" # noqa: B023 + prefix += f" at '{var_name}{print_path_list(path)}'" on_error( GraphQLError( prefix + "; " + error.message, - var_def_node, # noqa: B023 + var_def_node, original_error=error, ) ) @@ -167,12 +172,15 @@ def get_argument_values( argument_node = arg_node_map.get(name) if argument_node is None: - if arg_def.default_value is not Undefined: - coerced_values[arg_def.out_name or name] = arg_def.default_value + value = arg_def.default_value + if value is not Undefined: + if is_input_object_type(arg_def.type): + # coerce input value so that out_names are used + value = coerce_input_value(value, arg_def.type) + coerced_values[arg_def.out_name or name] = value elif is_non_null_type(arg_type): # pragma: no cover else msg = ( - f"Argument '{name}' of required type '{arg_type}'" - " was not provided." + f"Argument '{name}' of required type '{arg_type}' was not provided." ) raise GraphQLError(msg, node) continue # pragma: no cover @@ -183,8 +191,12 @@ def get_argument_values( if isinstance(value_node, VariableNode): variable_name = value_node.name.value if variable_values is None or variable_name not in variable_values: - if arg_def.default_value is not Undefined: - coerced_values[arg_def.out_name or name] = arg_def.default_value + value = arg_def.default_value + if value is not Undefined: + if is_input_object_type(arg_def.type): + # coerce input value so that out_names are used + value = coerce_input_value(value, arg_def.type) + coerced_values[arg_def.out_name or name] = value elif is_non_null_type(arg_type): # pragma: no cover else msg = ( f"Argument '{name}' of required type '{arg_type}'" @@ -193,7 +205,8 @@ def get_argument_values( ) raise GraphQLError(msg, value_node) continue # pragma: no cover - is_null = variable_values[variable_name] is None + variable_value = variable_values[variable_name] + is_null = variable_value is None or variable_value is Undefined if is_null and is_non_null_type(arg_type): msg = f"Argument '{name}' of non-null type '{arg_type}' must not be null." diff --git a/src/graphql/graphql.py b/src/graphql/graphql.py index aacc7326..fe1dd5c7 100644 --- a/src/graphql/graphql.py +++ b/src/graphql/graphql.py @@ -96,9 +96,9 @@ async def graphql( ) if default_is_awaitable(result): - return await cast(Awaitable[ExecutionResult], result) + return await cast("Awaitable[ExecutionResult]", result) - return cast(ExecutionResult, result) + return cast("ExecutionResult", result) def assume_not_awaitable(_value: Any) -> bool: @@ -149,11 +149,11 @@ def graphql_sync( # Assert that the execution was synchronous. if default_is_awaitable(result): - ensure_future(cast(Awaitable[ExecutionResult], result)).cancel() + ensure_future(cast("Awaitable[ExecutionResult]", result)).cancel() msg = "GraphQL execution failed to complete synchronously." raise RuntimeError(msg) - return cast(ExecutionResult, result) + return cast("ExecutionResult", result) def graphql_impl( diff --git a/src/graphql/language/__init__.py b/src/graphql/language/__init__.py index 2f105a98..bd5e7be1 100644 --- a/src/graphql/language/__init__.py +++ b/src/graphql/language/__init__.py @@ -115,104 +115,104 @@ from .directive_locations import DirectiveLocation __all__ = [ - "get_location", - "SourceLocation", - "FormattedSourceLocation", - "print_location", - "print_source_location", - "TokenKind", - "Lexer", - "parse", - "parse_value", - "parse_const_value", - "parse_type", - "print_ast", - "Source", - "visit", - "Visitor", - "ParallelVisitor", - "VisitorAction", - "VisitorKeyMap", "BREAK", - "SKIP", - "REMOVE", "IDLE", - "Location", - "Token", + "REMOVE", + "SKIP", + "ArgumentNode", + "BooleanValueNode", + "ConstArgumentNode", + "ConstDirectiveNode", + "ConstListValueNode", + "ConstObjectFieldNode", + "ConstObjectValueNode", + "ConstValueNode", + "DefinitionNode", + "DirectiveDefinitionNode", "DirectiveLocation", - "Node", - "NameNode", + "DirectiveNode", "DocumentNode", - "DefinitionNode", + "EnumTypeDefinitionNode", + "EnumTypeExtensionNode", + "EnumValueDefinitionNode", + "EnumValueNode", + "ErrorBoundaryNode", "ExecutableDefinitionNode", - "OperationDefinitionNode", - "OperationType", - "VariableDefinitionNode", - "VariableNode", - "SelectionSetNode", - "SelectionNode", + "FieldDefinitionNode", "FieldNode", - "NullabilityAssertionNode", - "NonNullAssertionNode", - "ErrorBoundaryNode", - "ListNullabilityOperatorNode", - "ArgumentNode", - "ConstArgumentNode", + "FloatValueNode", + "FormattedSourceLocation", + "FragmentDefinitionNode", "FragmentSpreadNode", "InlineFragmentNode", - "FragmentDefinitionNode", - "ValueNode", - "ConstValueNode", + "InputObjectTypeDefinitionNode", + "InputObjectTypeExtensionNode", + "InputValueDefinitionNode", "IntValueNode", - "FloatValueNode", - "StringValueNode", - "BooleanValueNode", - "NullValueNode", - "EnumValueNode", + "InterfaceTypeDefinitionNode", + "InterfaceTypeExtensionNode", + "Lexer", + "ListNullabilityOperatorNode", + "ListTypeNode", "ListValueNode", - "ConstListValueNode", - "ObjectValueNode", - "ConstObjectValueNode", - "ObjectFieldNode", - "ConstObjectFieldNode", - "DirectiveNode", - "ConstDirectiveNode", - "TypeNode", + "Location", + "NameNode", "NamedTypeNode", - "ListTypeNode", + "Node", + "NonNullAssertionNode", "NonNullTypeNode", - "TypeSystemDefinitionNode", - "SchemaDefinitionNode", + "NullValueNode", + "NullabilityAssertionNode", + "ObjectFieldNode", + "ObjectTypeDefinitionNode", + "ObjectTypeExtensionNode", + "ObjectValueNode", + "OperationDefinitionNode", + "OperationType", "OperationTypeDefinitionNode", - "TypeDefinitionNode", + "ParallelVisitor", "ScalarTypeDefinitionNode", - "ObjectTypeDefinitionNode", - "FieldDefinitionNode", - "InputValueDefinitionNode", - "InterfaceTypeDefinitionNode", - "UnionTypeDefinitionNode", - "EnumTypeDefinitionNode", - "EnumValueDefinitionNode", - "InputObjectTypeDefinitionNode", - "DirectiveDefinitionNode", - "TypeSystemExtensionNode", + "ScalarTypeExtensionNode", + "SchemaDefinitionNode", "SchemaExtensionNode", + "SelectionNode", + "SelectionSetNode", + "Source", + "SourceLocation", + "StringValueNode", + "Token", + "TokenKind", + "TypeDefinitionNode", "TypeExtensionNode", - "ScalarTypeExtensionNode", - "ObjectTypeExtensionNode", - "InterfaceTypeExtensionNode", + "TypeNode", + "TypeSystemDefinitionNode", + "TypeSystemExtensionNode", + "UnionTypeDefinitionNode", "UnionTypeExtensionNode", - "EnumTypeExtensionNode", - "InputObjectTypeExtensionNode", + "ValueNode", + "VariableDefinitionNode", + "VariableNode", + "Visitor", + "VisitorAction", + "VisitorKeyMap", + "get_location", + "is_const_value_node", "is_definition_node", "is_executable_definition_node", "is_nullability_assertion_node", "is_selection_node", - "is_value_node", - "is_const_value_node", + "is_type_definition_node", + "is_type_extension_node", "is_type_node", "is_type_system_definition_node", - "is_type_definition_node", "is_type_system_extension_node", - "is_type_extension_node", + "is_value_node", + "parse", + "parse_const_value", + "parse_type", + "parse_value", + "print_ast", + "print_location", + "print_source_location", + "visit", ] diff --git a/src/graphql/language/ast.py b/src/graphql/language/ast.py index 5b61767d..a67ee1ea 100644 --- a/src/graphql/language/ast.py +++ b/src/graphql/language/ast.py @@ -19,73 +19,73 @@ __all__ = [ - "Location", - "Token", - "Node", - "NameNode", - "DocumentNode", + "QUERY_DOCUMENT_KEYS", + "ArgumentNode", + "BooleanValueNode", + "ConstArgumentNode", + "ConstDirectiveNode", + "ConstListValueNode", + "ConstObjectFieldNode", + "ConstObjectValueNode", + "ConstValueNode", "DefinitionNode", + "DirectiveDefinitionNode", + "DirectiveNode", + "DocumentNode", + "EnumTypeDefinitionNode", + "EnumTypeExtensionNode", + "EnumValueDefinitionNode", + "EnumValueNode", + "ErrorBoundaryNode", "ExecutableDefinitionNode", - "OperationDefinitionNode", - "VariableDefinitionNode", - "SelectionSetNode", - "SelectionNode", + "FieldDefinitionNode", "FieldNode", - "NullabilityAssertionNode", - "NonNullAssertionNode", - "ErrorBoundaryNode", - "ListNullabilityOperatorNode", - "ArgumentNode", - "ConstArgumentNode", + "FloatValueNode", + "FragmentDefinitionNode", "FragmentSpreadNode", "InlineFragmentNode", - "FragmentDefinitionNode", - "ValueNode", - "ConstValueNode", - "VariableNode", + "InputObjectTypeDefinitionNode", + "InputObjectTypeExtensionNode", + "InputValueDefinitionNode", "IntValueNode", - "FloatValueNode", - "StringValueNode", - "BooleanValueNode", - "NullValueNode", - "EnumValueNode", + "InterfaceTypeDefinitionNode", + "InterfaceTypeExtensionNode", + "ListNullabilityOperatorNode", + "ListTypeNode", "ListValueNode", - "ConstListValueNode", - "ObjectValueNode", - "ConstObjectValueNode", - "ObjectFieldNode", - "ConstObjectFieldNode", - "DirectiveNode", - "ConstDirectiveNode", - "TypeNode", + "Location", + "NameNode", "NamedTypeNode", - "ListTypeNode", + "Node", + "NonNullAssertionNode", "NonNullTypeNode", - "TypeSystemDefinitionNode", - "SchemaDefinitionNode", + "NullValueNode", + "NullabilityAssertionNode", + "ObjectFieldNode", + "ObjectTypeDefinitionNode", + "ObjectTypeExtensionNode", + "ObjectValueNode", + "OperationDefinitionNode", "OperationType", "OperationTypeDefinitionNode", - "TypeDefinitionNode", "ScalarTypeDefinitionNode", - "ObjectTypeDefinitionNode", - "FieldDefinitionNode", - "InputValueDefinitionNode", - "InterfaceTypeDefinitionNode", - "UnionTypeDefinitionNode", - "EnumTypeDefinitionNode", - "EnumValueDefinitionNode", - "InputObjectTypeDefinitionNode", - "DirectiveDefinitionNode", + "ScalarTypeExtensionNode", + "SchemaDefinitionNode", "SchemaExtensionNode", + "SelectionNode", + "SelectionSetNode", + "StringValueNode", + "Token", + "TypeDefinitionNode", "TypeExtensionNode", + "TypeNode", + "TypeSystemDefinitionNode", "TypeSystemExtensionNode", - "ScalarTypeExtensionNode", - "ObjectTypeExtensionNode", - "InterfaceTypeExtensionNode", + "UnionTypeDefinitionNode", "UnionTypeExtensionNode", - "EnumTypeExtensionNode", - "InputObjectTypeExtensionNode", - "QUERY_DOCUMENT_KEYS", + "ValueNode", + "VariableDefinitionNode", + "VariableNode", ] @@ -95,7 +95,7 @@ class Token: Represents a range of characters represented by a lexical token within a Source. """ - __slots__ = "kind", "start", "end", "line", "column", "prev", "next", "value" + __slots__ = "column", "end", "kind", "line", "next", "prev", "start", "value" kind: TokenKind # the kind of token start: int # the character offset at which this Node begins @@ -202,11 +202,11 @@ class Location: """ __slots__ = ( - "start", "end", - "start_token", "end_token", "source", + "start", + "start_token", ) start: int # character offset at which this Node begins @@ -345,7 +345,7 @@ class Node: """AST nodes""" # allow custom attributes and weak references (not used internally) - __slots__ = "__dict__", "__weakref__", "loc", "_hash" + __slots__ = "__dict__", "__weakref__", "_hash", "loc" loc: Location | None @@ -457,7 +457,7 @@ class DefinitionNode(Node): class ExecutableDefinitionNode(DefinitionNode): - __slots__ = "name", "directives", "variable_definitions", "selection_set" + __slots__ = "directives", "name", "selection_set", "variable_definitions" name: NameNode | None directives: tuple[DirectiveNode, ...] @@ -472,7 +472,7 @@ class OperationDefinitionNode(ExecutableDefinitionNode): class VariableDefinitionNode(Node): - __slots__ = "variable", "type", "default_value", "directives" + __slots__ = "default_value", "directives", "type", "variable" variable: VariableNode type: TypeNode @@ -493,7 +493,7 @@ class SelectionNode(Node): class FieldNode(SelectionNode): - __slots__ = "alias", "name", "arguments", "nullability_assertion", "selection_set" + __slots__ = "alias", "arguments", "name", "nullability_assertion", "selection_set" alias: NameNode | None name: NameNode @@ -542,7 +542,7 @@ class FragmentSpreadNode(SelectionNode): class InlineFragmentNode(SelectionNode): - __slots__ = "type_condition", "selection_set" + __slots__ = "selection_set", "type_condition" type_condition: NamedTypeNode selection_set: SelectionSetNode @@ -581,7 +581,7 @@ class FloatValueNode(ValueNode): class StringValueNode(ValueNode): - __slots__ = "value", "block" + __slots__ = "block", "value" value: str block: bool | None @@ -650,7 +650,7 @@ class ConstObjectFieldNode(ObjectFieldNode): class DirectiveNode(Node): - __slots__ = "name", "arguments" + __slots__ = "arguments", "name" name: NameNode arguments: tuple[ArgumentNode, ...] @@ -711,7 +711,7 @@ class OperationTypeDefinitionNode(Node): class TypeDefinitionNode(TypeSystemDefinitionNode): - __slots__ = "description", "name", "directives" + __slots__ = "description", "directives", "name" description: StringValueNode | None name: NameNode @@ -725,7 +725,7 @@ class ScalarTypeDefinitionNode(TypeDefinitionNode): class ObjectTypeDefinitionNode(TypeDefinitionNode): - __slots__ = "interfaces", "fields" + __slots__ = "fields", "interfaces" interfaces: tuple[NamedTypeNode, ...] directives: tuple[ConstDirectiveNode, ...] @@ -733,7 +733,7 @@ class ObjectTypeDefinitionNode(TypeDefinitionNode): class FieldDefinitionNode(DefinitionNode): - __slots__ = "description", "name", "directives", "arguments", "type" + __slots__ = "arguments", "description", "directives", "name", "type" description: StringValueNode | None name: NameNode @@ -743,7 +743,7 @@ class FieldDefinitionNode(DefinitionNode): class InputValueDefinitionNode(DefinitionNode): - __slots__ = "description", "name", "directives", "type", "default_value" + __slots__ = "default_value", "description", "directives", "name", "type" description: StringValueNode | None name: NameNode @@ -775,7 +775,7 @@ class EnumTypeDefinitionNode(TypeDefinitionNode): class EnumValueDefinitionNode(DefinitionNode): - __slots__ = "description", "name", "directives" + __slots__ = "description", "directives", "name" description: StringValueNode | None name: NameNode @@ -793,7 +793,7 @@ class InputObjectTypeDefinitionNode(TypeDefinitionNode): class DirectiveDefinitionNode(TypeSystemDefinitionNode): - __slots__ = "description", "name", "arguments", "repeatable", "locations" + __slots__ = "arguments", "description", "locations", "name", "repeatable" description: StringValueNode | None name: NameNode @@ -816,7 +816,7 @@ class SchemaExtensionNode(Node): class TypeExtensionNode(TypeSystemDefinitionNode): - __slots__ = "name", "directives" + __slots__ = "directives", "name" name: NameNode directives: tuple[ConstDirectiveNode, ...] @@ -830,14 +830,14 @@ class ScalarTypeExtensionNode(TypeExtensionNode): class ObjectTypeExtensionNode(TypeExtensionNode): - __slots__ = "interfaces", "fields" + __slots__ = "fields", "interfaces" interfaces: tuple[NamedTypeNode, ...] fields: tuple[FieldDefinitionNode, ...] class InterfaceTypeExtensionNode(TypeExtensionNode): - __slots__ = "interfaces", "fields" + __slots__ = "fields", "interfaces" interfaces: tuple[NamedTypeNode, ...] fields: tuple[FieldDefinitionNode, ...] diff --git a/src/graphql/language/block_string.py b/src/graphql/language/block_string.py index d784c236..248927b4 100644 --- a/src/graphql/language/block_string.py +++ b/src/graphql/language/block_string.py @@ -149,8 +149,7 @@ def print_block_string(value: str, minimize: bool = False) -> str: skip_leading_new_line = is_single_line and value and value[0] in " \t" before = ( "\n" - if print_as_multiple_lines - and not skip_leading_new_line + if (print_as_multiple_lines and not skip_leading_new_line) or force_leading_new_line else "" ) diff --git a/src/graphql/language/character_classes.py b/src/graphql/language/character_classes.py index 628bd60f..5d870576 100644 --- a/src/graphql/language/character_classes.py +++ b/src/graphql/language/character_classes.py @@ -1,6 +1,6 @@ """Character classes""" -__all__ = ["is_digit", "is_letter", "is_name_start", "is_name_continue"] +__all__ = ["is_digit", "is_letter", "is_name_continue", "is_name_start"] def is_digit(char: str) -> bool: diff --git a/src/graphql/language/lexer.py b/src/graphql/language/lexer.py index f93bd3b7..9ec37427 100644 --- a/src/graphql/language/lexer.py +++ b/src/graphql/language/lexer.py @@ -342,7 +342,7 @@ def read_escaped_unicode_variable_width(self, position: int) -> EscapeSequence: raise GraphQLSyntaxError( self.source, position, - f"Invalid Unicode escape sequence: '{body[position: position + size]}'.", + f"Invalid Unicode escape sequence: '{body[position : position + size]}'.", ) def read_escaped_unicode_fixed_width(self, position: int) -> EscapeSequence: @@ -368,7 +368,7 @@ def read_escaped_unicode_fixed_width(self, position: int) -> EscapeSequence: raise GraphQLSyntaxError( self.source, position, - f"Invalid Unicode escape sequence: '{body[position: position + 6]}'.", + f"Invalid Unicode escape sequence: '{body[position : position + 6]}'.", ) def read_escaped_character(self, position: int) -> EscapeSequence: @@ -380,7 +380,7 @@ def read_escaped_character(self, position: int) -> EscapeSequence: raise GraphQLSyntaxError( self.source, position, - f"Invalid character escape sequence: '{body[position: position + 2]}'.", + f"Invalid character escape sequence: '{body[position : position + 2]}'.", ) def read_block_string(self, start: int) -> Token: diff --git a/src/graphql/language/location.py b/src/graphql/language/location.py index 8b1ee38d..f4369974 100644 --- a/src/graphql/language/location.py +++ b/src/graphql/language/location.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from .source import Source -__all__ = ["get_location", "SourceLocation", "FormattedSourceLocation"] +__all__ = ["FormattedSourceLocation", "SourceLocation", "get_location"] class FormattedSourceLocation(TypedDict): @@ -41,6 +41,9 @@ def __eq__(self, other: object) -> bool: def __ne__(self, other: object) -> bool: return not self == other + def __hash__(self) -> int: + return hash((self.line, self.column)) + def get_location(source: Source, position: int) -> SourceLocation: """Get the line and column for a character position in the source. diff --git a/src/graphql/language/parser.py b/src/graphql/language/parser.py index 78d308d0..59299a1d 100644 --- a/src/graphql/language/parser.py +++ b/src/graphql/language/parser.py @@ -77,7 +77,7 @@ from typing_extensions import TypeAlias -__all__ = ["parse", "parse_type", "parse_value", "parse_const_value"] +__all__ = ["parse", "parse_const_value", "parse_type", "parse_value"] T = TypeVar("T") @@ -255,7 +255,7 @@ def __init__( experimental_client_controlled_nullability: bool = False, ) -> None: if not is_source(source): - source = Source(cast(str, source)) + source = Source(cast("str", source)) self._no_location = no_location self._max_tokens = max_tokens @@ -319,7 +319,7 @@ def parse_definition(self) -> DefinitionNode: ) if keyword_token.kind is TokenKind.NAME: - token_name = cast(str, keyword_token.value) + token_name = cast("str", keyword_token.value) method_name = self._parse_type_system_definition_method_names.get( token_name ) @@ -471,8 +471,11 @@ def parse_nullability_assertion(self) -> NullabilityAssertionNode | None: def parse_arguments(self, is_const: bool) -> list[ArgumentNode]: """Arguments[Const]: (Argument[?Const]+)""" item = self.parse_const_argument if is_const else self.parse_argument - item = cast(Callable[[], ArgumentNode], item) - return self.optional_many(TokenKind.PAREN_L, item, TokenKind.PAREN_R) + return self.optional_many( + TokenKind.PAREN_L, + cast("Callable[[], ArgumentNode]", item), + TokenKind.PAREN_R, + ) def parse_argument(self, is_const: bool = False) -> ArgumentNode: """Argument[Const]: Name : Value[?Const]""" @@ -486,7 +489,7 @@ def parse_argument(self, is_const: bool = False) -> ArgumentNode: def parse_const_argument(self) -> ConstArgumentNode: """Argument[Const]: Name : Value[Const]""" - return cast(ConstArgumentNode, self.parse_argument(True)) + return cast("ConstArgumentNode", self.parse_argument(True)) # Implement the parsing rules in the Fragments section. @@ -640,7 +643,7 @@ def parse_variable_value(self, is_const: bool) -> VariableNode: return self.parse_variable() def parse_const_value_literal(self) -> ConstValueNode: - return cast(ConstValueNode, self.parse_value_literal(True)) + return cast("ConstValueNode", self.parse_value_literal(True)) # Implement the parsing rules in the Directives section. @@ -653,7 +656,7 @@ def parse_directives(self, is_const: bool) -> list[DirectiveNode]: return directives def parse_const_directives(self) -> list[ConstDirectiveNode]: - return cast(List[ConstDirectiveNode], self.parse_directives(True)) + return cast("List[ConstDirectiveNode]", self.parse_directives(True)) def parse_directive(self, is_const: bool) -> DirectiveNode: """Directive[Const]: @ Name Arguments[?Const]?""" @@ -703,7 +706,7 @@ def parse_type_system_extension(self) -> TypeSystemExtensionNode: keyword_token = self._lexer.lookahead() if keyword_token.kind == TokenKind.NAME: method_name = self._parse_type_extension_method_names.get( - cast(str, keyword_token.value) + cast("str", keyword_token.value) ) if method_name: # pragma: no cover return getattr(self, f"parse_{method_name}")() diff --git a/src/graphql/language/predicates.py b/src/graphql/language/predicates.py index b65b1982..280662f8 100644 --- a/src/graphql/language/predicates.py +++ b/src/graphql/language/predicates.py @@ -26,17 +26,17 @@ __all__ = [ + "is_const_value_node", "is_definition_node", "is_executable_definition_node", "is_nullability_assertion_node", "is_selection_node", - "is_value_node", - "is_const_value_node", + "is_type_definition_node", + "is_type_extension_node", "is_type_node", "is_type_system_definition_node", - "is_type_definition_node", "is_type_system_extension_node", - "is_type_extension_node", + "is_value_node", ] diff --git a/src/graphql/language/print_location.py b/src/graphql/language/print_location.py index 03509732..21fb1b8a 100644 --- a/src/graphql/language/print_location.py +++ b/src/graphql/language/print_location.py @@ -73,7 +73,7 @@ def print_source_location(source: Source, source_location: SourceLocation) -> st def print_prefixed_lines(*lines: tuple[str, str | None]) -> str: """Print lines specified like this: ("prefix", "string")""" existing_lines = [ - cast(Tuple[str, str], line) for line in lines if line[1] is not None + cast("Tuple[str, str]", line) for line in lines if line[1] is not None ] pad_len = max(len(line[0]) for line in existing_lines) return "\n".join( diff --git a/src/graphql/language/source.py b/src/graphql/language/source.py index 01bb013f..17d5e15d 100644 --- a/src/graphql/language/source.py +++ b/src/graphql/language/source.py @@ -21,7 +21,7 @@ class Source: """A representation of source input to GraphQL.""" # allow custom attributes and weak references (not used internally) - __slots__ = "__weakref__", "__dict__", "body", "name", "location_offset" + __slots__ = "__dict__", "__weakref__", "body", "location_offset", "name" def __init__( self, @@ -72,6 +72,9 @@ def __eq__(self, other: object) -> bool: def __ne__(self, other: object) -> bool: return not self == other + def __hash__(self) -> int: + return hash(self.body) + def is_source(source: Any) -> TypeGuard[Source]: """Test if the given value is a Source object. diff --git a/src/graphql/language/visitor.py b/src/graphql/language/visitor.py index be410466..c9901230 100644 --- a/src/graphql/language/visitor.py +++ b/src/graphql/language/visitor.py @@ -25,15 +25,15 @@ __all__ = [ - "Visitor", + "BREAK", + "IDLE", + "REMOVE", + "SKIP", "ParallelVisitor", + "Visitor", "VisitorAction", "VisitorKeyMap", "visit", - "BREAK", - "SKIP", - "REMOVE", - "IDLE", ] @@ -289,7 +289,7 @@ def visit( else: stack = Stack(in_array, idx, keys, edits, stack) in_array = isinstance(node, tuple) - keys = node if in_array else visitor_keys.get(node.kind, ()) + keys = node if in_array else visitor_keys.get(node.kind, ()) # type: ignore idx = -1 edits = [] if parent: diff --git a/src/graphql/pyutils/__init__.py b/src/graphql/pyutils/__init__.py index e1aefd6a..e24418ef 100644 --- a/src/graphql/pyutils/__init__.py +++ b/src/graphql/pyutils/__init__.py @@ -9,6 +9,7 @@ """ from .async_reduce import async_reduce +from .gather_with_cancel import gather_with_cancel from .convert_case import camel_to_snake, snake_to_camel from .cached_property import cached_property from .description import ( @@ -33,34 +34,39 @@ from .print_path_list import print_path_list from .simple_pub_sub import SimplePubSub, SimplePubSubIterator from .undefined import Undefined, UndefinedType +from .ref_map import RefMap +from .ref_set import RefSet __all__ = [ + "AwaitableOrValue", + "Description", + "FrozenError", + "Path", + "RefMap", + "RefSet", + "SimplePubSub", + "SimplePubSubIterator", + "Undefined", + "UndefinedType", + "and_list", "async_reduce", - "camel_to_snake", - "snake_to_camel", "cached_property", + "camel_to_snake", "did_you_mean", - "or_list", - "and_list", - "Description", + "gather_with_cancel", "group_by", - "is_description", - "register_description", - "unregister_description", "identity_func", "inspect", "is_awaitable", "is_collection", + "is_description", "is_iterable", "merge_kwargs", "natural_comparison_key", - "AwaitableOrValue", - "suggestion_list", - "FrozenError", - "Path", + "or_list", "print_path_list", - "SimplePubSub", - "SimplePubSubIterator", - "Undefined", - "UndefinedType", + "register_description", + "snake_to_camel", + "suggestion_list", + "unregister_description", ] diff --git a/src/graphql/pyutils/async_reduce.py b/src/graphql/pyutils/async_reduce.py index 33d97f9c..4eb79748 100644 --- a/src/graphql/pyutils/async_reduce.py +++ b/src/graphql/pyutils/async_reduce.py @@ -36,10 +36,12 @@ def async_reduce( async def async_callback( current_accumulator: Awaitable[U], current_value: T ) -> U: - result = callback(await current_accumulator, current_value) - return await cast(Awaitable, result) if is_awaitable(result) else result + result: AwaitableOrValue[U] = callback( + await current_accumulator, current_value + ) + return await result if is_awaitable(result) else result # type: ignore - accumulator = async_callback(cast(Awaitable[U], accumulator), value) + accumulator = async_callback(cast("Awaitable[U]", accumulator), value) else: - accumulator = callback(cast(U, accumulator), value) + accumulator = callback(cast("U", accumulator), value) return accumulator diff --git a/src/graphql/pyutils/description.py b/src/graphql/pyutils/description.py index 812d61fe..9d43a86d 100644 --- a/src/graphql/pyutils/description.py +++ b/src/graphql/pyutils/description.py @@ -51,7 +51,7 @@ def unregister(cls, base: type) -> None: msg = "Only types can be unregistered." raise TypeError(msg) if isinstance(cls.bases, tuple): - if base in cls.bases: + if base in cls.bases: # pragma: no branch cls.bases = tuple(b for b in cls.bases if b is not base) if not cls.bases: cls.bases = object diff --git a/src/graphql/pyutils/format_list.py b/src/graphql/pyutils/format_list.py index 87184728..368e7ae0 100644 --- a/src/graphql/pyutils/format_list.py +++ b/src/graphql/pyutils/format_list.py @@ -4,7 +4,7 @@ from typing import Sequence -__all__ = ["or_list", "and_list"] +__all__ = ["and_list", "or_list"] def or_list(items: Sequence[str]) -> str: diff --git a/src/graphql/pyutils/gather_with_cancel.py b/src/graphql/pyutils/gather_with_cancel.py new file mode 100644 index 00000000..f318b28f --- /dev/null +++ b/src/graphql/pyutils/gather_with_cancel.py @@ -0,0 +1,36 @@ +"""Run awaitables concurrently with cancellation support.""" + +from __future__ import annotations + +from asyncio import Task, create_task, gather +from typing import Any, Awaitable + +__all__ = ["gather_with_cancel"] + + +async def gather_with_cancel(*awaitables: Awaitable[Any]) -> list[Any]: + """Run awaitable objects in the sequence concurrently. + + The first raised exception is immediately propagated to the task that awaits + on this function and all pending awaitables in the sequence will be cancelled. + + This is different from the default behavior or `asyncio.gather` which waits + for all tasks to complete even if one of them raises an exception. It is also + different from `asyncio.gather` with `return_exceptions` set, which does not + cancel the other tasks when one of them raises an exception. + """ + try: + tasks: list[Task[Any]] = [ + aw if isinstance(aw, Task) else create_task(aw) # type: ignore[arg-type] + for aw in awaitables + ] + except TypeError: + return await gather(*awaitables) + try: + return await gather(*tasks) + except Exception: + for task in tasks: + if not task.done(): + task.cancel() + await gather(*tasks, return_exceptions=True) + raise diff --git a/src/graphql/pyutils/identity_func.py b/src/graphql/pyutils/identity_func.py index 2876c570..1a13936b 100644 --- a/src/graphql/pyutils/identity_func.py +++ b/src/graphql/pyutils/identity_func.py @@ -11,7 +11,7 @@ T = TypeVar("T") -DEFAULT_VALUE = cast(Any, Undefined) +DEFAULT_VALUE = cast("Any", Undefined) def identity_func(x: T = DEFAULT_VALUE, *_args: Any) -> T: diff --git a/src/graphql/pyutils/inspect.py b/src/graphql/pyutils/inspect.py index ed4920be..37b95f9b 100644 --- a/src/graphql/pyutils/inspect.py +++ b/src/graphql/pyutils/inspect.py @@ -171,7 +171,7 @@ def trunc_list(s: list) -> list: if len(s) > max_list_size: i = max_list_size // 2 j = i - 1 - s = s[:i] + [ELLIPSIS] + s[-j:] + s = [*s[:i], ELLIPSIS, *s[-j:]] return s diff --git a/src/graphql/pyutils/is_awaitable.py b/src/graphql/pyutils/is_awaitable.py index ce8c93c0..158bcd40 100644 --- a/src/graphql/pyutils/is_awaitable.py +++ b/src/graphql/pyutils/is_awaitable.py @@ -27,8 +27,10 @@ def is_awaitable(value: Any) -> TypeGuard[Awaitable]: # check for coroutine objects isinstance(value, CoroutineType) # check for old-style generator based coroutine objects - or isinstance(value, GeneratorType) # for Python < 3.11 - and bool(value.gi_code.co_flags & CO_ITERABLE_COROUTINE) + or ( + isinstance(value, GeneratorType) # for Python < 3.11 + and bool(value.gi_code.co_flags & CO_ITERABLE_COROUTINE) + ) # check for other awaitables (e.g. futures) or hasattr(value, "__await__") ) diff --git a/src/graphql/pyutils/merge_kwargs.py b/src/graphql/pyutils/merge_kwargs.py index c7cace3e..21144524 100644 --- a/src/graphql/pyutils/merge_kwargs.py +++ b/src/graphql/pyutils/merge_kwargs.py @@ -9,4 +9,4 @@ def merge_kwargs(base_dict: T, **kwargs: Any) -> T: """Return arbitrary typed dictionary with some keyword args merged in.""" - return cast(T, {**cast(Dict, base_dict), **kwargs}) + return cast("T", {**cast("Dict", base_dict), **kwargs}) diff --git a/src/graphql/pyutils/ref_map.py b/src/graphql/pyutils/ref_map.py new file mode 100644 index 00000000..0cffd533 --- /dev/null +++ b/src/graphql/pyutils/ref_map.py @@ -0,0 +1,79 @@ +"""A Map class that work similar to JavaScript.""" + +from __future__ import annotations + +from collections.abc import MutableMapping + +try: + MutableMapping[str, int] +except TypeError: # Python < 3.9 + from typing import MutableMapping +from typing import Any, Iterable, Iterator, TypeVar + +__all__ = ["RefMap"] + +K = TypeVar("K") +V = TypeVar("V") + + +class RefMap(MutableMapping[K, V]): + """A dictionary like object that allows mutable objects as keys. + + This class keeps the insertion order like a normal dictionary. + + Note that the implementation is limited to what is needed internally. + """ + + _map: dict[int, tuple[K, V]] + + def __init__(self, items: Iterable[tuple[K, V]] | None = None) -> None: + super().__init__() + self._map = {} + if items: + self.update(items) + + def __setitem__(self, key: K, value: V) -> None: + self._map[id(key)] = (key, value) + + def __getitem__(self, key: K) -> Any: + return self._map[id(key)][1] + + def __delitem__(self, key: K) -> None: + del self._map[id(key)] + + def __contains__(self, key: Any) -> bool: + return id(key) in self._map + + def __len__(self) -> int: + return len(self._map) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({list(self.items())!r})" + + def get(self, key: Any, default: Any = None) -> Any: + """Get the mapped value for the given key.""" + try: + return self._map[id(key)][1] + except KeyError: + return default + + def __iter__(self) -> Iterator[K]: + return self.keys() + + def keys(self) -> Iterator[K]: # type: ignore + """Return an iterator over the keys of the map.""" + return (item[0] for item in self._map.values()) + + def values(self) -> Iterator[V]: # type: ignore + """Return an iterator over the values of the map.""" + return (item[1] for item in self._map.values()) + + def items(self) -> Iterator[tuple[K, V]]: # type: ignore + """Return an iterator over the key/value-pairs of the map.""" + return self._map.values() # type: ignore + + def update(self, items: Iterable[tuple[K, V]] | None = None) -> None: # type: ignore + """Update the map with the given key/value-pairs.""" + if items: + for key, value in items: + self[key] = value diff --git a/src/graphql/pyutils/ref_set.py b/src/graphql/pyutils/ref_set.py new file mode 100644 index 00000000..731c021d --- /dev/null +++ b/src/graphql/pyutils/ref_set.py @@ -0,0 +1,67 @@ +"""A Set class that work similar to JavaScript.""" + +from __future__ import annotations + +from collections.abc import MutableSet + +try: + MutableSet[int] +except TypeError: # Python < 3.9 + from typing import MutableSet +from contextlib import suppress +from typing import Any, Iterable, Iterator, TypeVar + +from .ref_map import RefMap + +__all__ = ["RefSet"] + + +T = TypeVar("T") + + +class RefSet(MutableSet[T]): + """A set like object that allows mutable objects as elements. + + This class keeps the insertion order unlike a normal set. + + Note that the implementation is limited to what is needed internally. + """ + + _map: RefMap[T, None] + + def __init__(self, values: Iterable[T] | None = None) -> None: + super().__init__() + self._map = RefMap() + if values: + self.update(values) + + def __contains__(self, key: Any) -> bool: + return key in self._map + + def __iter__(self) -> Iterator[T]: + return iter(self._map) + + def __len__(self) -> int: + return len(self._map) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({list(self)!r})" + + def add(self, value: T) -> None: + """Add the given item to the set.""" + self._map[value] = None + + def remove(self, value: T) -> None: + """Remove the given item from the set.""" + del self._map[value] + + def discard(self, value: T) -> None: + """Remove the given item from the set if it exists.""" + with suppress(KeyError): + self.remove(value) + + def update(self, values: Iterable[T] | None = None) -> None: + """Update the set with the given items.""" + if values: + for item in values: + self.add(item) diff --git a/src/graphql/pyutils/suggestion_list.py b/src/graphql/pyutils/suggestion_list.py index 6abeefed..35240c77 100644 --- a/src/graphql/pyutils/suggestion_list.py +++ b/src/graphql/pyutils/suggestion_list.py @@ -99,8 +99,7 @@ def measure(self, option: str, threshold: int) -> int | None: double_diagonal_cell = rows[(i - 2) % 3][j - 2] current_cell = min(current_cell, double_diagonal_cell + 1) - if current_cell < smallest_cell: - smallest_cell = current_cell + smallest_cell = min(current_cell, smallest_cell) current_row[j] = current_cell diff --git a/src/graphql/type/__init__.py b/src/graphql/type/__init__.py index 4db6516d..8c41bd28 100644 --- a/src/graphql/type/__init__.py +++ b/src/graphql/type/__init__.py @@ -137,6 +137,7 @@ GraphQLStreamDirective, GraphQLDeprecatedDirective, GraphQLSpecifiedByDirective, + GraphQLOneOfDirective, # Keyword Args GraphQLDirectiveKwargs, # Constant Deprecation Reason @@ -176,133 +177,134 @@ from .validate import validate_schema, assert_valid_schema __all__ = [ - "is_schema", - "assert_schema", - "assert_name", - "assert_enum_value_name", - "GraphQLSchema", - "GraphQLSchemaKwargs", - "is_type", - "is_scalar_type", - "is_object_type", - "is_interface_type", - "is_union_type", - "is_enum_type", - "is_input_object_type", - "is_list_type", - "is_non_null_type", - "is_input_type", - "is_output_type", - "is_leaf_type", - "is_composite_type", - "is_abstract_type", - "is_wrapping_type", - "is_nullable_type", - "is_named_type", - "is_required_argument", - "is_required_input_field", - "assert_type", - "assert_scalar_type", - "assert_object_type", - "assert_interface_type", - "assert_union_type", - "assert_enum_type", - "assert_input_object_type", - "assert_list_type", - "assert_non_null_type", - "assert_input_type", - "assert_output_type", - "assert_leaf_type", - "assert_composite_type", - "assert_abstract_type", - "assert_wrapping_type", - "assert_nullable_type", - "assert_named_type", - "get_nullable_type", - "get_named_type", - "resolve_thunk", - "GraphQLScalarType", - "GraphQLObjectType", - "GraphQLInterfaceType", - "GraphQLUnionType", - "GraphQLEnumType", - "GraphQLInputObjectType", - "GraphQLInputType", - "GraphQLArgument", - "GraphQLList", - "GraphQLNonNull", - "GraphQLType", - "GraphQLInputType", - "GraphQLOutputType", - "GraphQLLeafType", - "GraphQLCompositeType", + "DEFAULT_DEPRECATION_REASON", + "GRAPHQL_MAX_INT", + "GRAPHQL_MIN_INT", "GraphQLAbstractType", - "GraphQLWrappingType", - "GraphQLNullableType", - "GraphQLNullableInputType", - "GraphQLNullableOutputType", - "GraphQLNamedType", - "GraphQLNamedInputType", - "GraphQLNamedOutputType", - "Thunk", - "ThunkCollection", - "ThunkMapping", "GraphQLArgument", + "GraphQLArgument", + "GraphQLArgumentKwargs", "GraphQLArgumentMap", + "GraphQLBoolean", + "GraphQLCompositeType", + "GraphQLDeferDirective", + "GraphQLDeprecatedDirective", + "GraphQLDirective", + "GraphQLDirectiveKwargs", + "GraphQLEnumType", + "GraphQLEnumTypeKwargs", "GraphQLEnumValue", + "GraphQLEnumValueKwargs", "GraphQLEnumValueMap", "GraphQLField", + "GraphQLFieldKwargs", "GraphQLFieldMap", + "GraphQLFieldResolver", + "GraphQLFloat", + "GraphQLID", + "GraphQLIncludeDirective", "GraphQLInputField", + "GraphQLInputFieldKwargs", "GraphQLInputFieldMap", "GraphQLInputFieldOutType", - "GraphQLScalarSerializer", - "GraphQLScalarValueParser", - "GraphQLScalarLiteralParser", - "GraphQLArgumentKwargs", - "GraphQLEnumTypeKwargs", - "GraphQLEnumValueKwargs", - "GraphQLFieldKwargs", - "GraphQLInputFieldKwargs", + "GraphQLInputObjectType", "GraphQLInputObjectTypeKwargs", + "GraphQLInputType", + "GraphQLInputType", + "GraphQLInt", + "GraphQLInterfaceType", "GraphQLInterfaceTypeKwargs", + "GraphQLIsTypeOfFn", + "GraphQLLeafType", + "GraphQLList", + "GraphQLNamedInputType", + "GraphQLNamedOutputType", + "GraphQLNamedType", "GraphQLNamedTypeKwargs", + "GraphQLNonNull", + "GraphQLNullableInputType", + "GraphQLNullableOutputType", + "GraphQLNullableType", + "GraphQLObjectType", "GraphQLObjectTypeKwargs", - "GraphQLScalarTypeKwargs", - "GraphQLUnionTypeKwargs", - "GraphQLFieldResolver", - "GraphQLTypeResolver", - "GraphQLIsTypeOfFn", + "GraphQLOneOfDirective", + "GraphQLOutputType", "GraphQLResolveInfo", - "ResponsePath", - "is_directive", - "assert_directive", - "is_specified_directive", - "specified_directives", - "GraphQLDirective", - "GraphQLIncludeDirective", + "GraphQLScalarLiteralParser", + "GraphQLScalarSerializer", + "GraphQLScalarType", + "GraphQLScalarTypeKwargs", + "GraphQLScalarValueParser", + "GraphQLSchema", + "GraphQLSchemaKwargs", "GraphQLSkipDirective", - "GraphQLDeferDirective", - "GraphQLStreamDirective", - "GraphQLDeprecatedDirective", "GraphQLSpecifiedByDirective", - "GraphQLDirectiveKwargs", - "DEFAULT_DEPRECATION_REASON", - "is_specified_scalar_type", - "specified_scalar_types", - "GraphQLInt", - "GraphQLFloat", + "GraphQLStreamDirective", "GraphQLString", - "GraphQLBoolean", - "GraphQLID", - "GRAPHQL_MAX_INT", - "GRAPHQL_MIN_INT", - "is_introspection_type", - "introspection_types", - "TypeKind", + "GraphQLType", + "GraphQLTypeResolver", + "GraphQLUnionType", + "GraphQLUnionTypeKwargs", + "GraphQLWrappingType", + "ResponsePath", "SchemaMetaFieldDef", + "Thunk", + "ThunkCollection", + "ThunkMapping", + "TypeKind", "TypeMetaFieldDef", "TypeNameMetaFieldDef", - "validate_schema", + "assert_abstract_type", + "assert_composite_type", + "assert_directive", + "assert_enum_type", + "assert_enum_value_name", + "assert_input_object_type", + "assert_input_type", + "assert_interface_type", + "assert_leaf_type", + "assert_list_type", + "assert_name", + "assert_named_type", + "assert_non_null_type", + "assert_nullable_type", + "assert_object_type", + "assert_output_type", + "assert_scalar_type", + "assert_schema", + "assert_type", + "assert_union_type", "assert_valid_schema", + "assert_wrapping_type", + "get_named_type", + "get_nullable_type", + "introspection_types", + "is_abstract_type", + "is_composite_type", + "is_directive", + "is_enum_type", + "is_input_object_type", + "is_input_type", + "is_interface_type", + "is_introspection_type", + "is_leaf_type", + "is_list_type", + "is_named_type", + "is_non_null_type", + "is_nullable_type", + "is_object_type", + "is_output_type", + "is_required_argument", + "is_required_input_field", + "is_scalar_type", + "is_schema", + "is_specified_directive", + "is_specified_scalar_type", + "is_type", + "is_union_type", + "is_wrapping_type", + "resolve_thunk", + "specified_directives", + "specified_scalar_types", + "validate_schema", ] diff --git a/src/graphql/type/assert_name.py b/src/graphql/type/assert_name.py index b7e94e2d..1a8f7689 100644 --- a/src/graphql/type/assert_name.py +++ b/src/graphql/type/assert_name.py @@ -3,7 +3,7 @@ from ..error import GraphQLError from ..language.character_classes import is_name_continue, is_name_start -__all__ = ["assert_name", "assert_enum_value_name"] +__all__ = ["assert_enum_value_name", "assert_name"] def assert_name(name: str) -> str: diff --git a/src/graphql/type/definition.py b/src/graphql/type/definition.py index dbca4e66..528e3df3 100644 --- a/src/graphql/type/definition.py +++ b/src/graphql/type/definition.py @@ -2,7 +2,6 @@ from __future__ import annotations -from enum import Enum from typing import ( TYPE_CHECKING, Any, @@ -19,6 +18,18 @@ overload, ) +try: + from typing import TypedDict +except ImportError: # Python < 3.8 + from typing_extensions import TypedDict +try: + from typing import TypeAlias, TypeGuard +except ImportError: # Python < 3.10 + from typing_extensions import TypeAlias, TypeGuard + +if TYPE_CHECKING: + from enum import Enum + from ..error import GraphQLError from ..language import ( EnumTypeDefinitionNode, @@ -57,58 +68,11 @@ from ..utilities.value_from_ast_untyped import value_from_ast_untyped from .assert_name import assert_enum_value_name, assert_name -try: - from typing import TypedDict -except ImportError: # Python < 3.8 - from typing_extensions import TypedDict -try: - from typing import TypeAlias, TypeGuard -except ImportError: # Python < 3.10 - from typing_extensions import TypeAlias, TypeGuard - if TYPE_CHECKING: from .schema import GraphQLSchema + __all__ = [ - "is_type", - "is_scalar_type", - "is_object_type", - "is_interface_type", - "is_union_type", - "is_enum_type", - "is_input_object_type", - "is_list_type", - "is_non_null_type", - "is_input_type", - "is_output_type", - "is_leaf_type", - "is_composite_type", - "is_abstract_type", - "is_wrapping_type", - "is_nullable_type", - "is_named_type", - "is_required_argument", - "is_required_input_field", - "assert_type", - "assert_scalar_type", - "assert_object_type", - "assert_interface_type", - "assert_union_type", - "assert_enum_type", - "assert_input_object_type", - "assert_list_type", - "assert_non_null_type", - "assert_input_type", - "assert_output_type", - "assert_leaf_type", - "assert_composite_type", - "assert_abstract_type", - "assert_wrapping_type", - "assert_nullable_type", - "assert_named_type", - "get_nullable_type", - "get_named_type", - "resolve_thunk", "GraphQLAbstractType", "GraphQLArgument", "GraphQLArgumentKwargs", @@ -135,23 +99,23 @@ "GraphQLIsTypeOfFn", "GraphQLLeafType", "GraphQLList", - "GraphQLNamedType", - "GraphQLNamedTypeKwargs", "GraphQLNamedInputType", "GraphQLNamedOutputType", - "GraphQLNullableType", + "GraphQLNamedType", + "GraphQLNamedTypeKwargs", + "GraphQLNonNull", "GraphQLNullableInputType", "GraphQLNullableOutputType", - "GraphQLNonNull", + "GraphQLNullableType", + "GraphQLObjectType", + "GraphQLObjectTypeKwargs", + "GraphQLOutputType", "GraphQLResolveInfo", + "GraphQLScalarLiteralParser", + "GraphQLScalarSerializer", "GraphQLScalarType", "GraphQLScalarTypeKwargs", - "GraphQLScalarSerializer", "GraphQLScalarValueParser", - "GraphQLScalarLiteralParser", - "GraphQLObjectType", - "GraphQLObjectTypeKwargs", - "GraphQLOutputType", "GraphQLType", "GraphQLTypeResolver", "GraphQLUnionType", @@ -160,6 +124,45 @@ "Thunk", "ThunkCollection", "ThunkMapping", + "assert_abstract_type", + "assert_composite_type", + "assert_enum_type", + "assert_input_object_type", + "assert_input_type", + "assert_interface_type", + "assert_leaf_type", + "assert_list_type", + "assert_named_type", + "assert_non_null_type", + "assert_nullable_type", + "assert_object_type", + "assert_output_type", + "assert_scalar_type", + "assert_type", + "assert_union_type", + "assert_wrapping_type", + "get_named_type", + "get_nullable_type", + "is_abstract_type", + "is_composite_type", + "is_enum_type", + "is_input_object_type", + "is_input_type", + "is_interface_type", + "is_leaf_type", + "is_list_type", + "is_named_type", + "is_non_null_type", + "is_nullable_type", + "is_object_type", + "is_output_type", + "is_required_argument", + "is_required_input_field", + "is_scalar_type", + "is_type", + "is_union_type", + "is_wrapping_type", + "resolve_thunk", ] @@ -386,8 +389,7 @@ def __init__( self.parse_literal = parse_literal # type: ignore if parse_literal is not None and parse_value is None: msg = ( - f"{name} must provide" - " both 'parse_value' and 'parse_literal' functions." + f"{name} must provide both 'parse_value' and 'parse_literal' functions." ) raise TypeError(msg) self.specified_by_url = specified_by_url @@ -477,7 +479,7 @@ class GraphQLFieldKwargs(TypedDict, total=False): ast_node: FieldDefinitionNode | None -class GraphQLField: +class GraphQLField: # noqa: PLW1641 """Definition of a GraphQL field""" type: GraphQLOutputType @@ -504,7 +506,7 @@ def __init__( args = { assert_name(name): value if isinstance(value, GraphQLArgument) - else GraphQLArgument(cast(GraphQLInputType, value)) + else GraphQLArgument(cast("GraphQLInputType", value)) for name, value in args.items() } else: @@ -642,7 +644,7 @@ class GraphQLArgumentKwargs(TypedDict, total=False): ast_node: InputValueDefinitionNode | None -class GraphQLArgument: +class GraphQLArgument: # noqa: PLW1641 """Definition of a GraphQL argument""" type: GraphQLInputType @@ -790,7 +792,7 @@ def fields(self) -> GraphQLFieldMap: return { assert_name(name): value if isinstance(value, GraphQLField) - else GraphQLField(value) # type: ignore + else GraphQLField(value) for name, value in fields.items() } @@ -895,7 +897,7 @@ def fields(self) -> GraphQLFieldMap: return { assert_name(name): value if isinstance(value, GraphQLField) - else GraphQLField(value) # type: ignore + else GraphQLField(value) for name, value in fields.items() } @@ -1078,7 +1080,7 @@ def __init__( extension_ast_nodes=extension_ast_nodes, ) try: # check for enum - values = cast(Enum, values).__members__ # type: ignore + values = cast("Enum", values).__members__ # type: ignore except AttributeError: if not isinstance(values, Mapping) or not all( isinstance(name, str) for name in values @@ -1091,9 +1093,9 @@ def __init__( " with value names as keys." ) raise TypeError(msg) from error - values = cast(Dict[str, Any], values) + values = cast("Dict[str, Any]", values) else: - values = cast(Dict[str, Enum], values) + values = cast("Dict[str, Enum]", values) if names_as_values is False: values = {key: value.value for key, value in values.items()} elif names_as_values is True: @@ -1217,7 +1219,7 @@ class GraphQLEnumValueKwargs(TypedDict, total=False): ast_node: EnumValueDefinitionNode | None -class GraphQLEnumValue: +class GraphQLEnumValue: # noqa: PLW1641 """A GraphQL enum value.""" value: Any @@ -1272,6 +1274,7 @@ class GraphQLInputObjectTypeKwargs(GraphQLNamedTypeKwargs, total=False): fields: GraphQLInputFieldMap out_type: GraphQLInputFieldOutType | None + is_one_of: bool class GraphQLInputObjectType(GraphQLNamedType): @@ -1284,7 +1287,7 @@ class GraphQLInputObjectType(GraphQLNamedType): Example:: - NonNullFloat = GraphQLNonNull(GraphQLFloat()) + NonNullFloat = GraphQLNonNull(GraphQLFloat) class GeoPoint(GraphQLInputObjectType): name = 'GeoPoint' @@ -1292,7 +1295,7 @@ class GeoPoint(GraphQLInputObjectType): 'lat': GraphQLInputField(NonNullFloat), 'lon': GraphQLInputField(NonNullFloat), 'alt': GraphQLInputField( - GraphQLFloat(), default_value=0) + GraphQLFloat, default_value=0) } The outbound values will be Python dictionaries by default, but you can have them @@ -1301,6 +1304,7 @@ class GeoPoint(GraphQLInputObjectType): ast_node: InputObjectTypeDefinitionNode | None extension_ast_nodes: tuple[InputObjectTypeExtensionNode, ...] + is_one_of: bool def __init__( self, @@ -1311,6 +1315,7 @@ def __init__( extensions: dict[str, Any] | None = None, ast_node: InputObjectTypeDefinitionNode | None = None, extension_ast_nodes: Collection[InputObjectTypeExtensionNode] | None = None, + is_one_of: bool = False, ) -> None: super().__init__( name=name, @@ -1322,6 +1327,7 @@ def __init__( self._fields = fields if out_type is not None: self.out_type = out_type # type: ignore + self.is_one_of = is_one_of @staticmethod def out_type(value: dict[str, Any]) -> Any: @@ -1340,6 +1346,7 @@ def to_kwargs(self) -> GraphQLInputObjectTypeKwargs: out_type=None if self.out_type is GraphQLInputObjectType.out_type else self.out_type, + is_one_of=self.is_one_of, ) def __copy__(self) -> GraphQLInputObjectType: # pragma: no cover @@ -1357,7 +1364,7 @@ def fields(self) -> GraphQLInputFieldMap: return { assert_name(name): value if isinstance(value, GraphQLInputField) - else GraphQLInputField(value) # type: ignore + else GraphQLInputField(value) for name, value in fields.items() } @@ -1387,7 +1394,7 @@ class GraphQLInputFieldKwargs(TypedDict, total=False): ast_node: InputValueDefinitionNode | None -class GraphQLInputField: +class GraphQLInputField: # noqa: PLW1641 """Definition of a GraphQL input field""" type: GraphQLInputType @@ -1507,7 +1514,7 @@ class GraphQLNonNull(GraphQLWrappingType[GNT_co]): class RowType(GraphQLObjectType): name = 'Row' fields = { - 'id': GraphQLField(GraphQLNonNull(GraphQLString())) + 'id': GraphQLField(GraphQLNonNull(GraphQLString)) } Note: the enforcement of non-nullability occurs within the executor. @@ -1658,7 +1665,7 @@ def get_nullable_type( """Unwrap possible non-null type""" if is_non_null_type(type_): type_ = type_.of_type - return cast(Optional[GraphQLNullableType], type_) + return cast("Optional[GraphQLNullableType]", type_) # These named types do not include modifiers like List or NonNull. @@ -1703,7 +1710,7 @@ def get_named_type(type_: GraphQLType | None) -> GraphQLNamedType | None: unwrapped_type = type_ while is_wrapping_type(unwrapped_type): unwrapped_type = unwrapped_type.of_type - return cast(GraphQLNamedType, unwrapped_type) + return cast("GraphQLNamedType", unwrapped_type) return None diff --git a/src/graphql/type/directives.py b/src/graphql/type/directives.py index 17e8083c..bd3e360f 100644 --- a/src/graphql/type/directives.py +++ b/src/graphql/type/directives.py @@ -20,20 +20,20 @@ from typing_extensions import TypeGuard __all__ = [ - "is_directive", - "assert_directive", - "is_specified_directive", - "specified_directives", + "DEFAULT_DEPRECATION_REASON", + "DirectiveLocation", "GraphQLDeferDirective", + "GraphQLDeprecatedDirective", "GraphQLDirective", "GraphQLDirectiveKwargs", "GraphQLIncludeDirective", "GraphQLSkipDirective", - "GraphQLStreamDirective", - "GraphQLDeprecatedDirective", "GraphQLSpecifiedByDirective", - "DirectiveLocation", - "DEFAULT_DEPRECATION_REASON", + "GraphQLStreamDirective", + "assert_directive", + "is_directive", + "is_specified_directive", + "specified_directives", ] @@ -49,7 +49,7 @@ class GraphQLDirectiveKwargs(TypedDict, total=False): ast_node: ast.DirectiveDefinitionNode | None -class GraphQLDirective: +class GraphQLDirective: # noqa: PLW1641 """GraphQL Directive Directives are used by the GraphQL runtime as a way of modifying execution behavior. @@ -79,7 +79,7 @@ def __init__( locations = tuple( value if isinstance(value, DirectiveLocation) - else DirectiveLocation[cast(str, value)] + else DirectiveLocation[cast("str", value)] for value in locations ) except (KeyError, TypeError) as error: @@ -92,7 +92,7 @@ def __init__( args = { assert_name(name): value if isinstance(value, GraphQLArgument) - else GraphQLArgument(cast(GraphQLInputType, value)) + else GraphQLArgument(cast("GraphQLInputType", value)) for name, value in args.items() } else: @@ -248,17 +248,26 @@ def assert_directive(directive: Any) -> GraphQLDirective: description="Marks an element of a GraphQL schema as no longer supported.", ) -# Used to provide a URL for specifying the behaviour of custom scalar definitions: +# Used to provide a URL for specifying the behavior of custom scalar definitions: GraphQLSpecifiedByDirective = GraphQLDirective( name="specifiedBy", locations=[DirectiveLocation.SCALAR], args={ "url": GraphQLArgument( GraphQLNonNull(GraphQLString), - description="The URL that specifies the behaviour of this scalar.", + description="The URL that specifies the behavior of this scalar.", ) }, - description="Exposes a URL that specifies the behaviour of this scalar.", + description="Exposes a URL that specifies the behavior of this scalar.", +) + +# Used to indicate an Input Object is a OneOf Input Object. +GraphQLOneOfDirective = GraphQLDirective( + name="oneOf", + locations=[DirectiveLocation.INPUT_OBJECT], + args={}, + description="Indicates exactly one field must be supplied" + " and this field must not be `null`.", ) specified_directives: tuple[GraphQLDirective, ...] = ( @@ -266,6 +275,7 @@ def assert_directive(directive: Any) -> GraphQLDirective: GraphQLSkipDirective, GraphQLDeprecatedDirective, GraphQLSpecifiedByDirective, + GraphQLOneOfDirective, ) """A tuple with all directives from the GraphQL specification""" diff --git a/src/graphql/type/introspection.py b/src/graphql/type/introspection.py index 866a0499..313c3679 100644 --- a/src/graphql/type/introspection.py +++ b/src/graphql/type/introspection.py @@ -305,6 +305,7 @@ def __new__(cls): resolve=cls.input_fields, ), "ofType": GraphQLField(_Type, resolve=cls.of_type), + "isOneOf": GraphQLField(GraphQLBoolean, resolve=cls.is_one_of), } @staticmethod @@ -396,6 +397,10 @@ def input_fields(type_, _info, includeDeprecated=False): def of_type(type_, _info): return getattr(type_, "of_type", None) + @staticmethod + def is_one_of(type_, _info): + return type_.is_one_of if is_input_object_type(type_) else None + _Type: GraphQLObjectType = GraphQLObjectType( name="__Type", @@ -634,8 +639,7 @@ class TypeKind(Enum): ), "NON_NULL": GraphQLEnumValue( TypeKind.NON_NULL, - description="Indicates this type is a non-null." - " `ofType` is a valid field.", + description="Indicates this type is a non-null. `ofType` is a valid field.", ), }, ) diff --git a/src/graphql/type/scalars.py b/src/graphql/type/scalars.py index 22669c80..d35e6e26 100644 --- a/src/graphql/type/scalars.py +++ b/src/graphql/type/scalars.py @@ -23,15 +23,15 @@ from typing_extensions import TypeGuard __all__ = [ - "is_specified_scalar_type", - "specified_scalar_types", - "GraphQLInt", - "GraphQLFloat", - "GraphQLString", - "GraphQLBoolean", - "GraphQLID", "GRAPHQL_MAX_INT", "GRAPHQL_MIN_INT", + "GraphQLBoolean", + "GraphQLFloat", + "GraphQLID", + "GraphQLInt", + "GraphQLString", + "is_specified_scalar_type", + "specified_scalar_types", ] # As per the GraphQL Spec, Integers are only treated as valid @@ -315,7 +315,7 @@ def parse_id_literal(value_node: ValueNode, _variables: Any = None) -> str: GraphQLBoolean, GraphQLID, ) -} +} # pyright: ignore def is_specified_scalar_type(type_: GraphQLNamedType) -> TypeGuard[GraphQLScalarType]: diff --git a/src/graphql/type/schema.py b/src/graphql/type/schema.py index 5e546298..f8ab756b 100644 --- a/src/graphql/type/schema.py +++ b/src/graphql/type/schema.py @@ -21,6 +21,7 @@ GraphQLAbstractType, GraphQLCompositeType, GraphQLField, + GraphQLInputType, GraphQLInterfaceType, GraphQLNamedType, GraphQLObjectType, @@ -49,7 +50,7 @@ except ImportError: # Python < 3.10 from typing_extensions import TypeAlias, TypeGuard -__all__ = ["GraphQLSchema", "GraphQLSchemaKwargs", "is_schema", "assert_schema"] +__all__ = ["GraphQLSchema", "GraphQLSchemaKwargs", "assert_schema", "is_schema"] TypeMap: TypeAlias = Dict[str, GraphQLNamedType] @@ -293,12 +294,15 @@ def __deepcopy__(self, memo_: dict) -> GraphQLSchema: directive if is_specified_directive(directive) else copy(directive) for directive in self.directives ] + for directive in directives: + remap_directive(directive, type_map) return self.__class__( - self.query_type and cast(GraphQLObjectType, type_map[self.query_type.name]), + self.query_type + and cast("GraphQLObjectType", type_map[self.query_type.name]), self.mutation_type - and cast(GraphQLObjectType, type_map[self.mutation_type.name]), + and cast("GraphQLObjectType", type_map[self.mutation_type.name]), self.subscription_type - and cast(GraphQLObjectType, type_map[self.subscription_type.name]), + and cast("GraphQLObjectType", type_map[self.subscription_type.name]), types, directives, self.description, @@ -324,7 +328,7 @@ def get_possible_types( abstract_type.types if is_union_type(abstract_type) else self.get_implementations( - cast(GraphQLInterfaceType, abstract_type) + cast("GraphQLInterfaceType", abstract_type) ).objects ) @@ -351,7 +355,7 @@ def is_sub_type( add(type_.name) else: implementations = self.get_implementations( - cast(GraphQLInterfaceType, abstract_type) + cast("GraphQLInterfaceType", abstract_type) ) for type_ in implementations.objects: add(type_.name) @@ -407,7 +411,7 @@ class TypeSet(Dict[GraphQLNamedType, None]): @classmethod def with_initial_types(cls, types: Collection[GraphQLType]) -> TypeSet: - return cast(TypeSet, super().fromkeys(types)) + return cast("TypeSet", super().fromkeys(types)) def collect_referenced_types(self, type_: GraphQLType) -> None: """Recursive function supplementing the type starting from an initial type.""" @@ -452,17 +456,13 @@ def remapped_type(type_: GraphQLType, type_map: TypeMap) -> GraphQLType: """Get a copy of the given type that uses this type map.""" if is_wrapping_type(type_): return type_.__class__(remapped_type(type_.of_type, type_map)) - type_ = cast(GraphQLNamedType, type_) + type_ = cast("GraphQLNamedType", type_) return type_map.get(type_.name, type_) def remap_named_type(type_: GraphQLNamedType, type_map: TypeMap) -> None: """Change all references in the given named type to use this type map.""" - if is_union_type(type_): - type_.types = [ - type_map.get(member_type.name, member_type) for member_type in type_.types - ] - elif is_object_type(type_) or is_interface_type(type_): + if is_object_type(type_) or is_interface_type(type_): type_.interfaces = [ type_map.get(interface_type.name, interface_type) for interface_type in type_.interfaces @@ -477,9 +477,22 @@ def remap_named_type(type_: GraphQLNamedType, type_map: TypeMap) -> None: arg.type = remapped_type(arg.type, type_map) args[arg_name] = arg fields[field_name] = field + elif is_union_type(type_): + type_.types = [ + type_map.get(member_type.name, member_type) for member_type in type_.types + ] elif is_input_object_type(type_): fields = type_.fields for field_name, field in fields.items(): field = copy(field) # noqa: PLW2901 field.type = remapped_type(field.type, type_map) fields[field_name] = field + + +def remap_directive(directive: GraphQLDirective, type_map: TypeMap) -> None: + """Change all references in the given directive to use this type map.""" + args = directive.args + for arg_name, arg in args.items(): + arg = copy(arg) # noqa: PLW2901 + arg.type = cast("GraphQLInputType", remapped_type(arg.type, type_map)) + args[arg_name] = arg diff --git a/src/graphql/type/validate.py b/src/graphql/type/validate.py index 8a6b7257..9b22f44e 100644 --- a/src/graphql/type/validate.py +++ b/src/graphql/type/validate.py @@ -16,7 +16,7 @@ SchemaDefinitionNode, SchemaExtensionNode, ) -from ..pyutils import and_list, inspect +from ..pyutils import Undefined, and_list, inspect from ..utilities.type_comparators import is_equal_type, is_type_sub_type_of from .definition import ( GraphQLEnumType, @@ -41,7 +41,7 @@ from .introspection import is_introspection_type from .schema import GraphQLSchema, assert_schema -__all__ = ["validate_schema", "assert_valid_schema"] +__all__ = ["assert_valid_schema", "validate_schema"] def validate_schema(schema: GraphQLSchema) -> list[GraphQLError]: @@ -101,7 +101,7 @@ def report_error( ) -> None: if nodes and not isinstance(nodes, Node): nodes = [node for node in nodes if node] - nodes = cast(Optional[Collection[Node]], nodes) + nodes = cast("Optional[Collection[Node]]", nodes) self.errors.append(GraphQLError(message, nodes)) def validate_root_types(self) -> None: @@ -183,7 +183,7 @@ def validate_name(self, node: Any, name: str | None = None) -> None: try: if not name: name = node.name - name = cast(str, name) + name = cast("str", name) ast_node = node.ast_node except AttributeError: # pragma: no cover pass @@ -454,8 +454,7 @@ def validate_input_fields(self, input_obj: GraphQLInputObjectType) -> None: if not fields: self.report_error( - f"Input Object type {input_obj.name}" - " must define one or more fields.", + f"Input Object type {input_obj.name} must define one or more fields.", [input_obj.ast_node, *input_obj.extension_ast_nodes], ) @@ -482,6 +481,28 @@ def validate_input_fields(self, input_obj: GraphQLInputObjectType) -> None: ], ) + if input_obj.is_one_of: + self.validate_one_of_input_object_field(input_obj, field_name, field) + + def validate_one_of_input_object_field( + self, + type_: GraphQLInputObjectType, + field_name: str, + field: GraphQLInputField, + ) -> None: + if is_non_null_type(field.type): + self.report_error( + f"OneOf input field {type_.name}.{field_name} must be nullable.", + field.ast_node and field.ast_node.type, + ) + + if field.default_value is not Undefined: + self.report_error( + f"OneOf input field {type_.name}.{field_name}" + " cannot have a default value.", + field.ast_node, + ) + def get_operation_type_node( schema: GraphQLSchema, operation: OperationType @@ -540,7 +561,7 @@ def __call__(self, input_obj: GraphQLInputObjectType) -> None: " within itself through a series of non-null fields:" f" '{'.'.join(field_names)}'.", cast( - Collection[Node], + "Collection[Node]", map(attrgetter("ast_node"), map(itemgetter(1), cycle_path)), ), ) diff --git a/src/graphql/utilities/__init__.py b/src/graphql/utilities/__init__.py index f528bdcc..5aadcc31 100644 --- a/src/graphql/utilities/__init__.py +++ b/src/graphql/utilities/__init__.py @@ -100,14 +100,14 @@ "find_dangerous_changes", "get_introspection_query", "get_operation_ast", + "introspection_from_schema", "is_equal_type", "is_type_sub_type_of", - "introspection_from_schema", "lexicographic_sort_schema", - "print_schema", - "print_type", "print_directive", "print_introspection_schema", + "print_schema", + "print_type", "print_value", "separate_operations", "strip_ignored_characters", diff --git a/src/graphql/utilities/ast_to_dict.py b/src/graphql/utilities/ast_to_dict.py index fea70b32..10f13c15 100644 --- a/src/graphql/utilities/ast_to_dict.py +++ b/src/graphql/utilities/ast_to_dict.py @@ -37,9 +37,8 @@ def ast_to_dict( ) -> Any: """Convert a language AST to a nested Python dictionary. - Set `location` to True in order to get the locations as well. + Set `locations` to True in order to get the locations as well. """ - """Convert a node to a nested Python dictionary.""" if isinstance(node, Node): if cache is None: cache = {} @@ -49,7 +48,7 @@ def ast_to_dict( res.update( { key: ast_to_dict(getattr(node, key), locations, cache) - for key in ("kind",) + node.keys[1:] + for key in ("kind", *node.keys[1:]) } ) if locations: diff --git a/src/graphql/utilities/build_ast_schema.py b/src/graphql/utilities/build_ast_schema.py index 8736e979..26ccfea2 100644 --- a/src/graphql/utilities/build_ast_schema.py +++ b/src/graphql/utilities/build_ast_schema.py @@ -68,11 +68,11 @@ def build_ast_schema( # validation with validate_schema() will produce more actionable results. type_name = type_.name if type_name == "Query": - schema_kwargs["query"] = cast(GraphQLObjectType, type_) + schema_kwargs["query"] = cast("GraphQLObjectType", type_) elif type_name == "Mutation": - schema_kwargs["mutation"] = cast(GraphQLObjectType, type_) + schema_kwargs["mutation"] = cast("GraphQLObjectType", type_) elif type_name == "Subscription": - schema_kwargs["subscription"] = cast(GraphQLObjectType, type_) + schema_kwargs["subscription"] = cast("GraphQLObjectType", type_) # If specified directives were not explicitly declared, add them. directives = schema_kwargs["directives"] diff --git a/src/graphql/utilities/build_client_schema.py b/src/graphql/utilities/build_client_schema.py index c4d05ccc..0e2cbd0e 100644 --- a/src/graphql/utilities/build_client_schema.py +++ b/src/graphql/utilities/build_client_schema.py @@ -3,7 +3,7 @@ from __future__ import annotations from itertools import chain -from typing import Callable, Collection, cast +from typing import TYPE_CHECKING, Callable, Collection, cast from ..language import DirectiveLocation, parse_value from ..pyutils import Undefined, inspect @@ -33,22 +33,25 @@ is_output_type, specified_scalar_types, ) -from .get_introspection_query import ( - IntrospectionDirective, - IntrospectionEnumType, - IntrospectionField, - IntrospectionInputObjectType, - IntrospectionInputValue, - IntrospectionInterfaceType, - IntrospectionObjectType, - IntrospectionQuery, - IntrospectionScalarType, - IntrospectionType, - IntrospectionTypeRef, - IntrospectionUnionType, -) from .value_from_ast import value_from_ast +if TYPE_CHECKING: + from .get_introspection_query import ( + IntrospectionDirective, + IntrospectionEnumType, + IntrospectionField, + IntrospectionInputObjectType, + IntrospectionInputValue, + IntrospectionInterfaceType, + IntrospectionObjectType, + IntrospectionQuery, + IntrospectionScalarType, + IntrospectionType, + IntrospectionTypeRef, + IntrospectionUnionType, + ) + + __all__ = ["build_client_schema"] @@ -90,17 +93,17 @@ def get_type(type_ref: IntrospectionTypeRef) -> GraphQLType: if not item_ref: msg = "Decorated type deeper than introspection query." raise TypeError(msg) - item_ref = cast(IntrospectionTypeRef, item_ref) + item_ref = cast("IntrospectionTypeRef", item_ref) return GraphQLList(get_type(item_ref)) if kind == TypeKind.NON_NULL.name: nullable_ref = type_ref.get("ofType") if not nullable_ref: msg = "Decorated type deeper than introspection query." raise TypeError(msg) - nullable_ref = cast(IntrospectionTypeRef, nullable_ref) + nullable_ref = cast("IntrospectionTypeRef", nullable_ref) nullable_type = get_type(nullable_ref) return GraphQLNonNull(assert_nullable_type(nullable_type)) - type_ref = cast(IntrospectionType, type_ref) + type_ref = cast("IntrospectionType", type_ref) return get_named_type(type_ref) def get_named_type(type_ref: IntrospectionType) -> GraphQLNamedType: @@ -145,7 +148,7 @@ def build_scalar_def( ) -> GraphQLScalarType: name = scalar_introspection["name"] try: - return cast(GraphQLScalarType, GraphQLScalarType.reserved_types[name]) + return cast("GraphQLScalarType", GraphQLScalarType.reserved_types[name]) except KeyError: return GraphQLScalarType( name=name, @@ -168,7 +171,7 @@ def build_implementations_list( f" {inspect(implementing_introspection)}." ) raise TypeError(msg) - interfaces = cast(Collection[IntrospectionInterfaceType], maybe_interfaces) + interfaces = cast("Collection[IntrospectionInterfaceType]", maybe_interfaces) return [get_interface_type(interface) for interface in interfaces] def build_object_def( @@ -176,7 +179,7 @@ def build_object_def( ) -> GraphQLObjectType: name = object_introspection["name"] try: - return cast(GraphQLObjectType, GraphQLObjectType.reserved_types[name]) + return cast("GraphQLObjectType", GraphQLObjectType.reserved_types[name]) except KeyError: return GraphQLObjectType( name=name, @@ -205,7 +208,9 @@ def build_union_def( f" {inspect(union_introspection)}." ) raise TypeError(msg) - possible_types = cast(Collection[IntrospectionObjectType], maybe_possible_types) + possible_types = cast( + "Collection[IntrospectionObjectType]", maybe_possible_types + ) return GraphQLUnionType( name=union_introspection["name"], description=union_introspection.get("description"), @@ -221,7 +226,7 @@ def build_enum_def(enum_introspection: IntrospectionEnumType) -> GraphQLEnumType raise TypeError(msg) name = enum_introspection["name"] try: - return cast(GraphQLEnumType, GraphQLEnumType.reserved_types[name]) + return cast("GraphQLEnumType", GraphQLEnumType.reserved_types[name]) except KeyError: return GraphQLEnumType( name=name, @@ -275,7 +280,7 @@ def build_field_def_map( } def build_field(field_introspection: IntrospectionField) -> GraphQLField: - type_introspection = cast(IntrospectionType, field_introspection["type"]) + type_introspection = cast("IntrospectionType", field_introspection["type"]) type_ = get_type(type_introspection) if not is_output_type(type_): msg = ( @@ -310,7 +315,7 @@ def build_argument_def_map( def build_argument( argument_introspection: IntrospectionInputValue, ) -> GraphQLArgument: - type_introspection = cast(IntrospectionType, argument_introspection["type"]) + type_introspection = cast("IntrospectionType", argument_introspection["type"]) type_ = get_type(type_introspection) if not is_input_type(type_): msg = ( @@ -345,7 +350,9 @@ def build_input_value_def_map( def build_input_value( input_value_introspection: IntrospectionInputValue, ) -> GraphQLInputField: - type_introspection = cast(IntrospectionType, input_value_introspection["type"]) + type_introspection = cast( + "IntrospectionType", input_value_introspection["type"] + ) type_ = get_type(type_introspection) if not is_input_type(type_): msg = ( @@ -388,7 +395,7 @@ def build_directive( is_repeatable=directive_introspection.get("isRepeatable", False), locations=list( cast( - Collection[DirectiveLocation], + "Collection[DirectiveLocation]", directive_introspection.get("locations"), ) ), diff --git a/src/graphql/utilities/coerce_input_value.py b/src/graphql/utilities/coerce_input_value.py index db74d272..b7452ec3 100644 --- a/src/graphql/utilities/coerce_input_value.py +++ b/src/graphql/utilities/coerce_input_value.py @@ -130,13 +130,37 @@ def coerce_input_value( + did_you_mean(suggestions) ), ) + + if type_.is_one_of: + keys = list(coerced_dict) + if len(keys) != 1: + on_error( + path.as_list() if path else [], + input_value, + GraphQLError( + "Exactly one key must be specified" + f" for OneOf type '{type_.name}'.", + ), + ) + else: + key = keys[0] + value = coerced_dict[key] + if value is None: + on_error( + (path.as_list() if path else []) + [key], + value, + GraphQLError( + f"Field '{key}' must be non-null.", + ), + ) + return type_.out_type(coerced_dict) if is_leaf_type(type_): # Scalars and Enums determine if an input value is valid via `parse_value()`, # which can throw to indicate failure. If it throws, maintain a reference # to the original error. - type_ = cast(GraphQLScalarType, type_) + type_ = cast("GraphQLScalarType", type_) try: parse_result = type_.parse_value(input_value) except GraphQLError as error: diff --git a/src/graphql/utilities/extend_schema.py b/src/graphql/utilities/extend_schema.py index c5af8669..aebdd2b3 100644 --- a/src/graphql/utilities/extend_schema.py +++ b/src/graphql/utilities/extend_schema.py @@ -65,6 +65,7 @@ GraphQLNullableType, GraphQLObjectType, GraphQLObjectTypeKwargs, + GraphQLOneOfDirective, GraphQLOutputType, GraphQLScalarType, GraphQLSchema, @@ -91,8 +92,8 @@ from .value_from_ast import value_from_ast __all__ = [ - "extend_schema", "ExtendSchemaImpl", + "extend_schema", ] @@ -229,8 +230,12 @@ def extend_schema_args( return schema_kwargs self = cls(type_extensions) - for existing_type in schema_kwargs["types"] or (): - self.type_map[existing_type.name] = self.extend_named_type(existing_type) + + self.type_map = { + type_.name: self.extend_named_type(type_) + for type_ in schema_kwargs["types"] or () + } + for type_node in type_defs: name = type_node.name.value self.type_map[name] = std_type_map.get(name) or self.build_type(type_node) @@ -400,7 +405,7 @@ def extend_object_type_interfaces( ) -> list[GraphQLInterfaceType]: """Extend a GraphQL object type interface.""" return [ - cast(GraphQLInterfaceType, self.replace_named_type(interface)) + cast("GraphQLInterfaceType", self.replace_named_type(interface)) for interface in kwargs["interfaces"] ] + self.build_interfaces(extensions) @@ -438,7 +443,7 @@ def extend_interface_type_interfaces( ) -> list[GraphQLInterfaceType]: """Extend GraphQL interface type interfaces.""" return [ - cast(GraphQLInterfaceType, self.replace_named_type(interface)) + cast("GraphQLInterfaceType", self.replace_named_type(interface)) for interface in kwargs["interfaces"] ] + self.build_interfaces(extensions) @@ -478,7 +483,7 @@ def extend_union_type_types( ) -> list[GraphQLObjectType]: """Extend types of a GraphQL union type.""" return [ - cast(GraphQLObjectType, self.replace_named_type(member_type)) + cast("GraphQLObjectType", self.replace_named_type(member_type)) for member_type in kwargs["types"] ] + self.build_union_types(extensions) @@ -546,9 +551,9 @@ def get_wrapped_type(self, node: TypeNode) -> GraphQLType: return GraphQLList(self.get_wrapped_type(node.type)) if isinstance(node, NonNullTypeNode): return GraphQLNonNull( - cast(GraphQLNullableType, self.get_wrapped_type(node.type)) + cast("GraphQLNullableType", self.get_wrapped_type(node.type)) ) - return self.get_named_type(cast(NamedTypeNode, node)) + return self.get_named_type(cast("NamedTypeNode", node)) def build_directive(self, node: DirectiveDefinitionNode) -> GraphQLDirective: """Build a GraphQL directive for a given directive definition node.""" @@ -580,7 +585,7 @@ def build_field_map( # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. field_map[field.name.value] = GraphQLField( - type_=cast(GraphQLOutputType, self.get_wrapped_type(field.type)), + type_=cast("GraphQLOutputType", self.get_wrapped_type(field.type)), description=field.description.value if field.description else None, args=self.build_argument_map(field.arguments), deprecation_reason=get_deprecation_reason(field), @@ -598,7 +603,7 @@ def build_argument_map( # Note: While this could make assertions to get the correctly typed # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. - type_ = cast(GraphQLInputType, self.get_wrapped_type(arg.type)) + type_ = cast("GraphQLInputType", self.get_wrapped_type(arg.type)) arg_map[arg.name.value] = GraphQLArgument( type_=type_, description=arg.description.value if arg.description else None, @@ -619,7 +624,7 @@ def build_input_field_map( # Note: While this could make assertions to get the correctly typed # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. - type_ = cast(GraphQLInputType, self.get_wrapped_type(field.type)) + type_ = cast("GraphQLInputType", self.get_wrapped_type(field.type)) input_field_map[field.name.value] = GraphQLInputField( type_=type_, description=field.description.value if field.description else None, @@ -663,7 +668,7 @@ def build_interfaces( # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. return [ - cast(GraphQLInterfaceType, self.get_named_type(type_)) + cast("GraphQLInterfaceType", self.get_named_type(type_)) for node in nodes for type_ in node.interfaces or [] ] @@ -677,7 +682,7 @@ def build_union_types( # value, that would throw immediately while type system validation # with validate_schema() will produce more actionable results. return [ - cast(GraphQLObjectType, self.get_named_type(type_)) + cast("GraphQLObjectType", self.get_named_type(type_)) for node in nodes for type_ in node.types or [] ] @@ -777,6 +782,7 @@ def build_input_object_type( fields=partial(self.build_input_field_map, all_nodes), ast_node=ast_node, extension_ast_nodes=extension_nodes, + is_one_of=is_one_of(ast_node), ) def build_type(self, ast_node: TypeDefinitionNode) -> GraphQLNamedType: @@ -822,3 +828,10 @@ def get_specified_by_url( specified_by_url = get_directive_values(GraphQLSpecifiedByDirective, node) return specified_by_url["url"] if specified_by_url else None + + +def is_one_of(node: InputObjectTypeDefinitionNode) -> bool: + """Given an input object node, returns if the node should be OneOf.""" + from ..execution import get_directive_values + + return get_directive_values(GraphQLOneOfDirective, node) is not None diff --git a/src/graphql/utilities/find_breaking_changes.py b/src/graphql/utilities/find_breaking_changes.py index c88c1265..d2a03ad2 100644 --- a/src/graphql/utilities/find_breaking_changes.py +++ b/src/graphql/utilities/find_breaking_changes.py @@ -216,11 +216,8 @@ def find_type_changes( schema_changes.extend(find_union_type_changes(old_type, new_type)) elif is_input_object_type(old_type) and is_input_object_type(new_type): schema_changes.extend(find_input_object_type_changes(old_type, new_type)) - elif ( - is_object_type(old_type) - and is_object_type(new_type) - or is_interface_type(old_type) - and is_interface_type(new_type) + elif (is_object_type(old_type) and is_object_type(new_type)) or ( + is_interface_type(old_type) and is_interface_type(new_type) ): schema_changes.extend(find_field_changes(old_type, new_type)) schema_changes.extend( @@ -297,7 +294,7 @@ def find_union_type_changes( schema_changes.append( DangerousChange( DangerousChangeType.TYPE_ADDED_TO_UNION, - f"{possible_type.name} was added" f" to union type {old_type.name}.", + f"{possible_type.name} was added to union type {old_type.name}.", ) ) @@ -410,7 +407,7 @@ def find_arg_changes( schema_changes.append( BreakingChange( BreakingChangeType.ARG_REMOVED, - f"{old_type.name}.{field_name} arg" f" {arg_name} was removed.", + f"{old_type.name}.{field_name} arg {arg_name} was removed.", ) ) diff --git a/src/graphql/utilities/get_introspection_query.py b/src/graphql/utilities/get_introspection_query.py index cffaa12d..adf038ac 100644 --- a/src/graphql/utilities/get_introspection_query.py +++ b/src/graphql/utilities/get_introspection_query.py @@ -19,7 +19,6 @@ __all__ = [ - "get_introspection_query", "IntrospectionDirective", "IntrospectionEnumType", "IntrospectionField", @@ -35,6 +34,7 @@ "IntrospectionType", "IntrospectionTypeRef", "IntrospectionUnionType", + "get_introspection_query", ] @@ -149,6 +149,14 @@ def input_deprecation(string: str) -> str | None: ofType {{ kind name + ofType {{ + kind + name + ofType {{ + kind + name + }} + }} }} }} }} @@ -294,7 +302,9 @@ class IntrospectionSchema(MaybeWithDescription): directives: list[IntrospectionDirective] -class IntrospectionQuery(TypedDict): - """The root typed dictionary for schema introspections.""" - - __schema: IntrospectionSchema +# The root typed dictionary for schema introspections. +# Note: We don't use class syntax here since the key looks like a private attribute. +IntrospectionQuery = TypedDict( + "IntrospectionQuery", + {"__schema": IntrospectionSchema}, +) diff --git a/src/graphql/utilities/introspection_from_schema.py b/src/graphql/utilities/introspection_from_schema.py index cc1e60ce..a0440a32 100644 --- a/src/graphql/utilities/introspection_from_schema.py +++ b/src/graphql/utilities/introspection_from_schema.py @@ -51,4 +51,4 @@ def introspection_from_schema( if not result.data: # pragma: no cover msg = "Introspection did not return a result" raise GraphQLError(msg) - return cast(IntrospectionQuery, result.data) + return cast("IntrospectionQuery", result.data) diff --git a/src/graphql/utilities/lexicographic_sort_schema.py b/src/graphql/utilities/lexicographic_sort_schema.py index cf0c4959..de675a94 100644 --- a/src/graphql/utilities/lexicographic_sort_schema.py +++ b/src/graphql/utilities/lexicographic_sort_schema.py @@ -51,7 +51,7 @@ def replace_type( return GraphQLList(replace_type(type_.of_type)) if is_non_null_type(type_): return GraphQLNonNull(replace_type(type_.of_type)) - return replace_named_type(cast(GraphQLNamedType, type_)) + return replace_named_type(cast("GraphQLNamedType", type_)) def replace_named_type(type_: GraphQLNamedType) -> GraphQLNamedType: return type_map[type_.name] @@ -76,7 +76,7 @@ def sort_args(args_map: dict[str, GraphQLArgument]) -> dict[str, GraphQLArgument args[name] = GraphQLArgument( **merge_kwargs( arg.to_kwargs(), - type_=replace_type(cast(GraphQLNamedType, arg.type)), + type_=replace_type(cast("GraphQLNamedType", arg.type)), ) ) return args @@ -87,7 +87,7 @@ def sort_fields(fields_map: dict[str, GraphQLField]) -> dict[str, GraphQLField]: fields[name] = GraphQLField( **merge_kwargs( field.to_kwargs(), - type_=replace_type(cast(GraphQLNamedType, field.type)), + type_=replace_type(cast("GraphQLNamedType", field.type)), args=sort_args(field.args), ) ) @@ -99,7 +99,8 @@ def sort_input_fields( return { name: GraphQLInputField( cast( - GraphQLInputType, replace_type(cast(GraphQLNamedType, field.type)) + "GraphQLInputType", + replace_type(cast("GraphQLNamedType", field.type)), ), description=field.description, default_value=field.default_value, @@ -174,12 +175,14 @@ def sort_named_type(type_: GraphQLNamedType) -> GraphQLNamedType: sort_directive(directive) for directive in sorted(schema.directives, key=sort_by_name_key) ], - query=cast(Optional[GraphQLObjectType], replace_maybe_type(schema.query_type)), + query=cast( + "Optional[GraphQLObjectType]", replace_maybe_type(schema.query_type) + ), mutation=cast( - Optional[GraphQLObjectType], replace_maybe_type(schema.mutation_type) + "Optional[GraphQLObjectType]", replace_maybe_type(schema.mutation_type) ), subscription=cast( - Optional[GraphQLObjectType], replace_maybe_type(schema.subscription_type) + "Optional[GraphQLObjectType]", replace_maybe_type(schema.subscription_type) ), ast_node=schema.ast_node, ) diff --git a/src/graphql/utilities/print_schema.py b/src/graphql/utilities/print_schema.py index 44c876dc..abd52a28 100644 --- a/src/graphql/utilities/print_schema.py +++ b/src/graphql/utilities/print_schema.py @@ -33,10 +33,10 @@ from .ast_from_value import ast_from_value __all__ = [ - "print_schema", - "print_type", "print_directive", "print_introspection_schema", + "print_schema", + "print_type", "print_value", ] @@ -214,7 +214,12 @@ def print_input_object(type_: GraphQLInputObjectType) -> str: print_description(field, " ", not i) + " " + print_input_value(name, field) for i, (name, field) in enumerate(type_.fields.items()) ] - return print_description(type_) + f"input {type_.name}" + print_block(fields) + return ( + print_description(type_) + + f"input {type_.name}" + + (" @oneOf" if type_.is_one_of else "") + + print_block(fields) + ) def print_fields(type_: GraphQLObjectType | GraphQLInterfaceType) -> str: diff --git a/src/graphql/utilities/strip_ignored_characters.py b/src/graphql/utilities/strip_ignored_characters.py index 6521d10b..9ffe1e26 100644 --- a/src/graphql/utilities/strip_ignored_characters.py +++ b/src/graphql/utilities/strip_ignored_characters.py @@ -68,7 +68,7 @@ def strip_ignored_characters(source: str | Source) -> str: """Type description""" type Foo{"""Field description""" bar:String} ''' if not is_source(source): - source = Source(cast(str, source)) + source = Source(cast("str", source)) body = source.body lexer = Lexer(source) diff --git a/src/graphql/utilities/type_comparators.py b/src/graphql/utilities/type_comparators.py index 3ab50dc5..609c19b6 100644 --- a/src/graphql/utilities/type_comparators.py +++ b/src/graphql/utilities/type_comparators.py @@ -11,7 +11,7 @@ is_object_type, ) -__all__ = ["is_equal_type", "is_type_sub_type_of", "do_types_overlap"] +__all__ = ["do_types_overlap", "is_equal_type", "is_type_sub_type_of"] def is_equal_type(type_a: GraphQLType, type_b: GraphQLType) -> bool: diff --git a/src/graphql/utilities/type_from_ast.py b/src/graphql/utilities/type_from_ast.py index c082ebc1..10acd68f 100644 --- a/src/graphql/utilities/type_from_ast.py +++ b/src/graphql/utilities/type_from_ast.py @@ -58,7 +58,7 @@ def type_from_ast( return GraphQLList(inner_type) if inner_type else None if isinstance(type_node, NonNullTypeNode): inner_type = type_from_ast(schema, type_node.type) - inner_type = cast(GraphQLNullableType, inner_type) + inner_type = cast("GraphQLNullableType", inner_type) return GraphQLNonNull(inner_type) if inner_type else None if isinstance(type_node, NamedTypeNode): return schema.get_type(type_node.name.value) diff --git a/src/graphql/utilities/value_from_ast.py b/src/graphql/utilities/value_from_ast.py index 67ed11dc..399cdcb4 100644 --- a/src/graphql/utilities/value_from_ast.py +++ b/src/graphql/utilities/value_from_ast.py @@ -118,12 +118,20 @@ def value_from_ast( return Undefined coerced_obj[field.out_name or field_name] = field_value + if type_.is_one_of: + keys = list(coerced_obj) + if len(keys) != 1: + return Undefined + + if coerced_obj[keys[0]] is None: + return Undefined + return type_.out_type(coerced_obj) if is_leaf_type(type_): # Scalars fulfill parsing a literal value via `parse_literal()`. Invalid values # represent a failure to parse correctly, in which case Undefined is returned. - type_ = cast(GraphQLScalarType, type_) + type_ = cast("GraphQLScalarType", type_) # noinspection PyBroadException try: if variables: diff --git a/src/graphql/validation/__init__.py b/src/graphql/validation/__init__.py index 8f67f9b7..ed6ca6c8 100644 --- a/src/graphql/validation/__init__.py +++ b/src/graphql/validation/__init__.py @@ -124,14 +124,8 @@ from .rules.custom.no_schema_introspection import NoSchemaIntrospectionCustomRule __all__ = [ - "validate", "ASTValidationContext", "ASTValidationRule", - "SDLValidationContext", - "SDLValidationRule", - "ValidationContext", - "ValidationRule", - "specified_rules", "DeferStreamDirectiveLabel", "DeferStreamDirectiveOnRootField", "DeferStreamDirectiveOnValidOperationsRule", @@ -143,33 +137,39 @@ "KnownFragmentNamesRule", "KnownTypeNamesRule", "LoneAnonymousOperationRule", + "LoneSchemaDefinitionRule", + "NoDeprecatedCustomRule", "NoFragmentCyclesRule", + "NoSchemaIntrospectionCustomRule", "NoUndefinedVariablesRule", "NoUnusedFragmentsRule", "NoUnusedVariablesRule", "OverlappingFieldsCanBeMergedRule", "PossibleFragmentSpreadsRule", + "PossibleTypeExtensionsRule", "ProvidedRequiredArgumentsRule", + "SDLValidationContext", + "SDLValidationRule", "ScalarLeafsRule", "SingleFieldSubscriptionsRule", "StreamDirectiveOnListField", + "UniqueArgumentDefinitionNamesRule", "UniqueArgumentNamesRule", + "UniqueDirectiveNamesRule", "UniqueDirectivesPerLocationRule", + "UniqueEnumValueNamesRule", + "UniqueFieldDefinitionNamesRule", "UniqueFragmentNamesRule", "UniqueInputFieldNamesRule", "UniqueOperationNamesRule", + "UniqueOperationTypesRule", + "UniqueTypeNamesRule", "UniqueVariableNamesRule", + "ValidationContext", + "ValidationRule", "ValuesOfCorrectTypeRule", "VariablesAreInputTypesRule", "VariablesInAllowedPositionRule", - "LoneSchemaDefinitionRule", - "UniqueOperationTypesRule", - "UniqueTypeNamesRule", - "UniqueEnumValueNamesRule", - "UniqueFieldDefinitionNamesRule", - "UniqueArgumentDefinitionNamesRule", - "UniqueDirectiveNamesRule", - "PossibleTypeExtensionsRule", - "NoDeprecatedCustomRule", - "NoSchemaIntrospectionCustomRule", + "specified_rules", + "validate", ] diff --git a/src/graphql/validation/rules/defer_stream_directive_on_root_field.py b/src/graphql/validation/rules/defer_stream_directive_on_root_field.py index 7a73a990..023fc2b2 100644 --- a/src/graphql/validation/rules/defer_stream_directive_on_root_field.py +++ b/src/graphql/validation/rules/defer_stream_directive_on_root_field.py @@ -29,7 +29,7 @@ def enter_directive( _path: Any, _ancestors: list[Node], ) -> None: - context = cast(ValidationContext, self.context) + context = cast("ValidationContext", self.context) parent_type = context.get_parent_type() if not parent_type: return diff --git a/src/graphql/validation/rules/defer_stream_directive_on_valid_operations_rule.py b/src/graphql/validation/rules/defer_stream_directive_on_valid_operations_rule.py index c412b89e..0159715d 100644 --- a/src/graphql/validation/rules/defer_stream_directive_on_valid_operations_rule.py +++ b/src/graphql/validation/rules/defer_stream_directive_on_valid_operations_rule.py @@ -66,7 +66,8 @@ def enter_directive( if ( isinstance(definition_node, FragmentDefinitionNode) and definition_node.name.value in self.fragments_used_on_subscriptions - or isinstance(definition_node, OperationDefinitionNode) + ) or ( + isinstance(definition_node, OperationDefinitionNode) and definition_node.operation == OperationType.SUBSCRIPTION ): if node.name.value == GraphQLDeferDirective.name: diff --git a/src/graphql/validation/rules/executable_definitions.py b/src/graphql/validation/rules/executable_definitions.py index 1f702210..6ca01a9d 100644 --- a/src/graphql/validation/rules/executable_definitions.py +++ b/src/graphql/validation/rules/executable_definitions.py @@ -39,7 +39,7 @@ def enter_document(self, node: DocumentNode, *_args: Any) -> VisitorAction: ) else "'{}'".format( cast( - Union[DirectiveDefinitionNode, TypeDefinitionNode], + "Union[DirectiveDefinitionNode, TypeDefinitionNode]", definition, ).name.value ) diff --git a/src/graphql/validation/rules/known_argument_names.py b/src/graphql/validation/rules/known_argument_names.py index dadfd34a..643300d0 100644 --- a/src/graphql/validation/rules/known_argument_names.py +++ b/src/graphql/validation/rules/known_argument_names.py @@ -16,7 +16,7 @@ from ...type import specified_directives from . import ASTValidationRule, SDLValidationContext, ValidationContext -__all__ = ["KnownArgumentNamesRule", "KnownArgumentNamesOnDirectivesRule"] +__all__ = ["KnownArgumentNamesOnDirectivesRule", "KnownArgumentNamesRule"] class KnownArgumentNamesOnDirectivesRule(ASTValidationRule): @@ -35,7 +35,7 @@ def __init__(self, context: ValidationContext | SDLValidationContext) -> None: schema = context.schema defined_directives = schema.directives if schema else specified_directives - for directive in cast(List, defined_directives): + for directive in cast("List", defined_directives): directive_args[directive.name] = list(directive.args) ast_definitions = context.document.definitions diff --git a/src/graphql/validation/rules/known_directives.py b/src/graphql/validation/rules/known_directives.py index 8a0c76c4..da31730b 100644 --- a/src/graphql/validation/rules/known_directives.py +++ b/src/graphql/validation/rules/known_directives.py @@ -35,7 +35,7 @@ def __init__(self, context: ValidationContext | SDLValidationContext) -> None: schema = context.schema defined_directives = ( - schema.directives if schema else cast(List, specified_directives) + schema.directives if schema else cast("List", specified_directives) ) for directive in defined_directives: locations_map[directive.name] = directive.locations @@ -111,7 +111,7 @@ def get_directive_location_for_ast_path( raise TypeError(msg) kind = applied_to.kind if kind == "operation_definition": - applied_to = cast(OperationDefinitionNode, applied_to) + applied_to = cast("OperationDefinitionNode", applied_to) return _operation_location[applied_to.operation.value] if kind == "input_value_definition": parent_node = ancestors[-3] diff --git a/src/graphql/validation/rules/known_type_names.py b/src/graphql/validation/rules/known_type_names.py index 118d7c0e..5dbac00b 100644 --- a/src/graphql/validation/rules/known_type_names.py +++ b/src/graphql/validation/rules/known_type_names.py @@ -94,7 +94,7 @@ def is_sdl_node( value is not None and not isinstance(value, list) and ( - is_type_system_definition_node(cast(Node, value)) - or is_type_system_extension_node(cast(Node, value)) + is_type_system_definition_node(cast("Node", value)) + or is_type_system_extension_node(cast("Node", value)) ) ) diff --git a/src/graphql/validation/rules/overlapping_fields_can_be_merged.py b/src/graphql/validation/rules/overlapping_fields_can_be_merged.py index b077958b..97939e56 100644 --- a/src/graphql/validation/rules/overlapping_fields_can_be_merged.py +++ b/src/graphql/validation/rules/overlapping_fields_can_be_merged.py @@ -44,8 +44,7 @@ def reason_message(reason: ConflictReasonMessage) -> str: if isinstance(reason, list): return " and ".join( - f"subfields '{response_name}' conflict" - f" because {reason_message(sub_reason)}" + f"subfields '{response_name}' conflict because {reason_message(sub_reason)}" for response_name, sub_reason in reason ) return reason @@ -539,8 +538,8 @@ def find_conflict( ) # The return type for each field. - type1 = cast(Optional[GraphQLOutputType], def1 and def1.type) - type2 = cast(Optional[GraphQLOutputType], def2 and def2.type) + type1 = cast("Optional[GraphQLOutputType]", def1 and def1.type) + type2 = cast("Optional[GraphQLOutputType]", def2 and def2.type) if not are_mutually_exclusive: # Two aliases must refer to the same field. @@ -740,7 +739,7 @@ def collect_fields_and_fragment_names( if not node_and_defs.get(response_name): node_and_defs[response_name] = [] node_and_defs[response_name].append( - cast(NodeAndDef, (parent_type, selection, field_def)) + cast("NodeAndDef", (parent_type, selection, field_def)) ) elif isinstance(selection, FragmentSpreadNode): fragment_names[selection.name.value] = True diff --git a/src/graphql/validation/rules/provided_required_arguments.py b/src/graphql/validation/rules/provided_required_arguments.py index a9313273..9c98065e 100644 --- a/src/graphql/validation/rules/provided_required_arguments.py +++ b/src/graphql/validation/rules/provided_required_arguments.py @@ -19,7 +19,7 @@ from ...type import GraphQLArgument, is_required_argument, is_type, specified_directives from . import ASTValidationRule, SDLValidationContext, ValidationContext -__all__ = ["ProvidedRequiredArgumentsRule", "ProvidedRequiredArgumentsOnDirectivesRule"] +__all__ = ["ProvidedRequiredArgumentsOnDirectivesRule", "ProvidedRequiredArgumentsRule"] class ProvidedRequiredArgumentsOnDirectivesRule(ASTValidationRule): @@ -41,7 +41,7 @@ def __init__(self, context: ValidationContext | SDLValidationContext) -> None: schema = context.schema defined_directives = schema.directives if schema else specified_directives - for directive in cast(List, defined_directives): + for directive in cast("List", defined_directives): required_args_map[directive.name] = { name: arg for name, arg in directive.args.items() @@ -71,7 +71,7 @@ def leave_directive(self, directive_node: DirectiveNode, *_args: Any) -> None: arg_type_str = ( str(arg_type) if is_type(arg_type) - else print_ast(cast(TypeNode, arg_type)) + else print_ast(cast("TypeNode", arg_type)) ) self.report_error( GraphQLError( diff --git a/src/graphql/validation/rules/single_field_subscriptions.py b/src/graphql/validation/rules/single_field_subscriptions.py index 9a689809..5ebbc1b3 100644 --- a/src/graphql/validation/rules/single_field_subscriptions.py +++ b/src/graphql/validation/rules/single_field_subscriptions.py @@ -2,10 +2,10 @@ from __future__ import annotations -from typing import Any, cast +from typing import Any from ...error import GraphQLError -from ...execution.collect_fields import collect_fields +from ...execution.collect_fields import FieldDetails, collect_fields from ...language import ( FieldNode, FragmentDefinitionNode, @@ -17,6 +17,10 @@ __all__ = ["SingleFieldSubscriptionsRule"] +def to_nodes(field_details_list: list[FieldDetails]) -> list[FieldNode]: + return [field_details.node for field_details in field_details_list] + + class SingleFieldSubscriptionsRule(ValidationRule): """Subscriptions must only include a single non-introspection field. @@ -42,24 +46,20 @@ def enter_operation_definition( for definition in document.definitions if isinstance(definition, FragmentDefinitionNode) } - grouped_field_set = collect_fields( + fields = collect_fields( schema, fragments, variable_values, subscription_type, node, - ).grouped_field_set - if len(grouped_field_set) > 1: - field_selection_lists = list(grouped_field_set.values()) - extra_field_selection_lists = field_selection_lists[1:] + ).fields + if len(fields) > 1: + field_groups = list(fields.values()) + extra_field_groups = field_groups[1:] extra_field_selection = [ - field - for fields in extra_field_selection_lists - for field in ( - fields - if isinstance(fields, list) - else [cast(FieldNode, fields)] - ) + node + for field_group in extra_field_groups + for node in to_nodes(field_group) ] self.report_error( GraphQLError( @@ -72,8 +72,8 @@ def enter_operation_definition( extra_field_selection, ) ) - for field_group in grouped_field_set.values(): - field_name = field_group[0].name.value + for field_group in fields.values(): + field_name = to_nodes(field_group)[0].name.value if field_name.startswith("__"): self.report_error( GraphQLError( @@ -83,6 +83,6 @@ def enter_operation_definition( else f"Subscription '{operation_name}'" ) + " must not select an introspection top level field.", - field_group, + to_nodes(field_group), ) ) diff --git a/src/graphql/validation/rules/stream_directive_on_list_field.py b/src/graphql/validation/rules/stream_directive_on_list_field.py index 141984c2..03015cd0 100644 --- a/src/graphql/validation/rules/stream_directive_on_list_field.py +++ b/src/graphql/validation/rules/stream_directive_on_list_field.py @@ -28,7 +28,7 @@ def enter_directive( _path: Any, _ancestors: list[Node], ) -> None: - context = cast(ValidationContext, self.context) + context = cast("ValidationContext", self.context) field_def = context.get_field_def() parent_type = context.get_parent_type() if ( diff --git a/src/graphql/validation/rules/unique_directives_per_location.py b/src/graphql/validation/rules/unique_directives_per_location.py index de9a05d0..daab2935 100644 --- a/src/graphql/validation/rules/unique_directives_per_location.py +++ b/src/graphql/validation/rules/unique_directives_per_location.py @@ -38,7 +38,7 @@ def __init__(self, context: ValidationContext | SDLValidationContext) -> None: schema = context.schema defined_directives = ( - schema.directives if schema else cast(List, specified_directives) + schema.directives if schema else cast("List", specified_directives) ) for directive in defined_directives: unique_directive_map[directive.name] = not directive.is_repeatable @@ -60,7 +60,7 @@ def enter(self, node: Node, *_args: Any) -> None: directives = getattr(node, "directives", None) if not directives: return - directives = cast(List[DirectiveNode], directives) + directives = cast("List[DirectiveNode]", directives) if isinstance(node, (SchemaDefinitionNode, SchemaExtensionNode)): seen_directives = self.schema_directives diff --git a/src/graphql/validation/rules/unique_field_definition_names.py b/src/graphql/validation/rules/unique_field_definition_names.py index 8451bc27..39df7203 100644 --- a/src/graphql/validation/rules/unique_field_definition_names.py +++ b/src/graphql/validation/rules/unique_field_definition_names.py @@ -47,8 +47,7 @@ def check_field_uniqueness( elif field_name in field_names: self.report_error( GraphQLError( - f"Field '{type_name}.{field_name}'" - " can only be defined once.", + f"Field '{type_name}.{field_name}' can only be defined once.", [field_names[field_name], field_def.name], ) ) diff --git a/src/graphql/validation/rules/values_of_correct_type.py b/src/graphql/validation/rules/values_of_correct_type.py index 8951a2d9..ea4c4a3c 100644 --- a/src/graphql/validation/rules/values_of_correct_type.py +++ b/src/graphql/validation/rules/values_of_correct_type.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Any, cast +from typing import Any, Mapping, cast from ...error import GraphQLError from ...language import ( @@ -12,16 +12,20 @@ FloatValueNode, IntValueNode, ListValueNode, + NonNullTypeNode, NullValueNode, ObjectFieldNode, ObjectValueNode, StringValueNode, ValueNode, + VariableDefinitionNode, + VariableNode, VisitorAction, print_ast, ) from ...pyutils import Undefined, did_you_mean, suggestion_list from ...type import ( + GraphQLInputObjectType, GraphQLScalarType, get_named_type, get_nullable_type, @@ -31,7 +35,7 @@ is_non_null_type, is_required_input_field, ) -from . import ValidationRule +from . import ValidationContext, ValidationRule __all__ = ["ValuesOfCorrectTypeRule"] @@ -45,6 +49,18 @@ class ValuesOfCorrectTypeRule(ValidationRule): See https://spec.graphql.org/draft/#sec-Values-of-Correct-Type """ + def __init__(self, context: ValidationContext) -> None: + super().__init__(context) + self.variable_definitions: dict[str, VariableDefinitionNode] = {} + + def enter_operation_definition(self, *_args: Any) -> None: + self.variable_definitions.clear() + + def enter_variable_definition( + self, definition: VariableDefinitionNode, *_args: Any + ) -> None: + self.variable_definitions[definition.variable.name.value] = definition + def enter_list_value(self, node: ListValueNode, *_args: Any) -> VisitorAction: # Note: TypeInfo will traverse into a list's item type, so look to the parent # input type to check if it is a list. @@ -72,6 +88,10 @@ def enter_object_value(self, node: ObjectValueNode, *_args: Any) -> VisitorActio node, ) ) + if type_.is_one_of: + validate_one_of_input_object( + self.context, node, type_, field_node_map, self.variable_definitions + ) return None def enter_object_field(self, node: ObjectFieldNode, *_args: Any) -> None: @@ -137,7 +157,7 @@ def is_valid_value_node(self, node: ValueNode) -> None: # Scalars determine if a literal value is valid via `parse_literal()` which may # throw or return an invalid value to indicate failure. - type_ = cast(GraphQLScalarType, type_) + type_ = cast("GraphQLScalarType", type_) try: parse_result = type_.parse_literal(node) if parse_result is Undefined: @@ -162,3 +182,51 @@ def is_valid_value_node(self, node: ValueNode) -> None: ) return + + +def validate_one_of_input_object( + context: ValidationContext, + node: ObjectValueNode, + type_: GraphQLInputObjectType, + field_node_map: Mapping[str, ObjectFieldNode], + variable_definitions: dict[str, VariableDefinitionNode], +) -> None: + keys = list(field_node_map) + is_not_exactly_one_filed = len(keys) != 1 + + if is_not_exactly_one_filed: + context.report_error( + GraphQLError( + f"OneOf Input Object '{type_.name}' must specify exactly one key.", + node, + ) + ) + return + + object_field_node = field_node_map.get(keys[0]) + value = object_field_node.value if object_field_node else None + is_null_literal = not value or isinstance(value, NullValueNode) + + if is_null_literal: + context.report_error( + GraphQLError( + f"Field '{type_.name}.{keys[0]}' must be non-null.", + node, + ) + ) + return + + is_variable = value and isinstance(value, VariableNode) + if is_variable: + variable_name = cast("VariableNode", value).name.value + definition = variable_definitions[variable_name] + is_nullable_variable = not isinstance(definition.type, NonNullTypeNode) + + if is_nullable_variable: + context.report_error( + GraphQLError( + f"Variable '{variable_name}' must be non-nullable" + f" to be used for OneOf Input Object '{type_.name}'.", + node, + ) + ) diff --git a/src/graphql/validation/validate.py b/src/graphql/validation/validate.py index 1439f7e4..8e59821c 100644 --- a/src/graphql/validation/validate.py +++ b/src/graphql/validation/validate.py @@ -14,7 +14,13 @@ if TYPE_CHECKING: from .rules import ASTValidationRule -__all__ = ["assert_valid_sdl", "assert_valid_sdl_extension", "validate", "validate_sdl"] +__all__ = [ + "ValidationAbortedError", + "assert_valid_sdl", + "assert_valid_sdl_extension", + "validate", + "validate_sdl", +] class ValidationAbortedError(GraphQLError): diff --git a/src/graphql/validation/validation_context.py b/src/graphql/validation/validation_context.py index dec21042..055b4231 100644 --- a/src/graphql/validation/validation_context.py +++ b/src/graphql/validation/validation_context.py @@ -143,7 +143,7 @@ def get_fragment_spreads(self, node: SelectionSetNode) -> list[FragmentSpreadNod append_spread(selection) else: set_to_visit = cast( - NodeWithSelectionSet, selection + "NodeWithSelectionSet", selection ).selection_set if set_to_visit: append_set(set_to_visit) diff --git a/src/graphql/version.py b/src/graphql/version.py index 29166e49..fa63f8af 100644 --- a/src/graphql/version.py +++ b/src/graphql/version.py @@ -5,12 +5,12 @@ import re from typing import NamedTuple -__all__ = ["version", "version_info", "version_js", "version_info_js"] +__all__ = ["version", "version_info", "version_info_js", "version_js"] -version = "3.3.0a6" +version = "3.3.0a9" -version_js = "17.0.0a2" +version_js = "17.0.0a3" _re_version = re.compile(r"(\d+)\.(\d+)\.(\d+)(\D*)(\d*)") diff --git a/tests/benchmarks/test_visit.py b/tests/benchmarks/test_visit.py index 53bfc98e..4e7a85a2 100644 --- a/tests/benchmarks/test_visit.py +++ b/tests/benchmarks/test_visit.py @@ -23,5 +23,5 @@ def test_visit_all_ast_nodes(benchmark, big_schema_sdl): # noqa: F811 def test_visit_all_ast_nodes_in_parallel(benchmark, big_schema_sdl): # noqa: F811 document_ast = parse(big_schema_sdl) visitor = DummyVisitor() - parallel_visitor = ParallelVisitor([visitor] * 50) + parallel_visitor = ParallelVisitor([visitor] * 25) benchmark(lambda: visit(document_ast, parallel_visitor)) diff --git a/tests/error/test_graphql_error.py b/tests/error/test_graphql_error.py index d01e1e8a..03b85dcf 100644 --- a/tests/error/test_graphql_error.py +++ b/tests/error/test_graphql_error.py @@ -25,7 +25,7 @@ ast = parse(source) operation_node = ast.definitions[0] -operation_node = cast(OperationDefinitionNode, operation_node) +operation_node = cast("OperationDefinitionNode", operation_node) assert operation_node assert operation_node.kind == "operation_definition" field_node = operation_node.selection_set.selections[0] @@ -224,7 +224,7 @@ def serializes_to_include_all_standard_fields(): extensions = {"foo": "bar "} e_full = GraphQLError("msg", field_node, None, None, path, None, extensions) assert str(e_full) == ( - "msg\n\nGraphQL request:2:3\n" "1 | {\n2 | field\n | ^\n3 | }" + "msg\n\nGraphQL request:2:3\n1 | {\n2 | field\n | ^\n3 | }" ) assert repr(e_full) == ( "GraphQLError('msg', locations=[SourceLocation(line=2, column=3)]," @@ -299,7 +299,7 @@ def prints_an_error_with_nodes_from_different_sources(): ) ) op_a = doc_a.definitions[0] - op_a = cast(ObjectTypeDefinitionNode, op_a) + op_a = cast("ObjectTypeDefinitionNode", op_a) assert op_a assert op_a.kind == "object_type_definition" assert op_a.fields @@ -317,7 +317,7 @@ def prints_an_error_with_nodes_from_different_sources(): ) ) op_b = doc_b.definitions[0] - op_b = cast(ObjectTypeDefinitionNode, op_b) + op_b = cast("ObjectTypeDefinitionNode", op_b) assert op_b assert op_b.kind == "object_type_definition" assert op_b.fields diff --git a/tests/error/test_located_error.py b/tests/error/test_located_error.py index 593b24ad..f22f6fd4 100644 --- a/tests/error/test_located_error.py +++ b/tests/error/test_located_error.py @@ -11,7 +11,7 @@ def throws_without_an_original_error(): def passes_graphql_error_through(): path = ["path", 3, "to", "field"] - e = GraphQLError("msg", None, None, None, cast(Any, path)) + e = GraphQLError("msg", None, None, None, cast("Any", path)) assert located_error(e, [], []) == e def passes_graphql_error_ish_through(): @@ -21,7 +21,7 @@ def passes_graphql_error_ish_through(): def does_not_pass_through_elasticsearch_like_errors(): e = Exception("I am from elasticsearch") - cast(Any, e).path = "/something/feed/_search" + cast("Any", e).path = "/something/feed/_search" assert located_error(e, [], []) is not e def handles_lazy_error_messages(): diff --git a/tests/execution/test_abstract.py b/tests/execution/test_abstract.py index b5ebc45b..ddb01345 100644 --- a/tests/execution/test_abstract.py +++ b/tests/execution/test_abstract.py @@ -3,6 +3,7 @@ from typing import Any, NamedTuple import pytest + from graphql.execution import ExecutionResult, execute, execute_sync from graphql.language import parse from graphql.pyutils import is_awaitable @@ -22,14 +23,14 @@ def sync_and_async(spec): """Decorator for running a test synchronously and asynchronously.""" return pytest.mark.asyncio( - pytest.mark.parametrize("sync", (True, False), ids=("sync", "async"))(spec) + pytest.mark.parametrize("sync", [True, False], ids=("sync", "async"))(spec) ) def access_variants(spec): """Decorator for tests with dict and object access, including inheritance.""" return pytest.mark.asyncio( - pytest.mark.parametrize("access", ("dict", "object", "inheritance"))(spec) + pytest.mark.parametrize("access", ["dict", "object", "inheritance"])(spec) ) @@ -41,7 +42,7 @@ async def execute_query( assert isinstance(schema, GraphQLSchema) assert isinstance(query, str) document = parse(query) - result = (execute_sync if sync else execute)(schema, document, root_value) # type: ignore + result = (execute_sync if sync else execute)(schema, document, root_value) if not sync and is_awaitable(result): result = await result assert isinstance(result, ExecutionResult) diff --git a/tests/execution/test_customize.py b/tests/execution/test_customize.py index 23740237..bf1859a2 100644 --- a/tests/execution/test_customize.py +++ b/tests/execution/test_customize.py @@ -1,6 +1,7 @@ from inspect import isasyncgen import pytest + from graphql.execution import ExecutionContext, execute, subscribe from graphql.language import parse from graphql.type import GraphQLField, GraphQLObjectType, GraphQLSchema, GraphQLString @@ -9,7 +10,7 @@ anext # noqa: B018 except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator): # noqa: A001 + async def anext(iterator): """Return the next item from an async iterator.""" return await iterator.__anext__() @@ -42,27 +43,42 @@ def uses_a_custom_execution_context_class(): ) class TestExecutionContext(ExecutionContext): + def __init__(self, *args, **kwargs): + assert kwargs.pop("custom_arg", None) == "baz" + super().__init__(*args, **kwargs) + def execute_field( self, parent_type, source, field_group, path, - incremental_data_record=None, + incremental_data_record, + defer_map, ): result = super().execute_field( - parent_type, source, field_group, path, incremental_data_record + parent_type, + source, + field_group, + path, + incremental_data_record, + defer_map, ) return result * 2 # type: ignore - assert execute(schema, query, execution_context_class=TestExecutionContext) == ( + assert execute( + schema, + query, + execution_context_class=TestExecutionContext, + custom_arg="baz", + ) == ( {"foo": "barbar"}, None, ) def describe_customize_subscription(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def uses_a_custom_subscribe_field_resolver(): schema = GraphQLSchema( query=GraphQLObjectType("Query", {"foo": GraphQLField(GraphQLString)}), @@ -91,9 +107,13 @@ async def custom_foo(): await subscription.aclose() - @pytest.mark.asyncio() + @pytest.mark.asyncio async def uses_a_custom_execution_context_class(): class TestExecutionContext(ExecutionContext): + def __init__(self, *args, **kwargs): + assert kwargs.pop("custom_arg", None) == "baz" + super().__init__(*args, **kwargs) + def build_resolve_info(self, *args, **kwargs): resolve_info = super().build_resolve_info(*args, **kwargs) resolve_info.context["foo"] = "bar" @@ -125,6 +145,7 @@ def resolve_foo(message, _info): document, context_value={}, execution_context_class=TestExecutionContext, + custom_arg="baz", ) assert isasyncgen(subscription) diff --git a/tests/execution/test_defer.py b/tests/execution/test_defer.py index 6b39f74e..1fcfa25c 100644 --- a/tests/execution/test_defer.py +++ b/tests/execution/test_defer.py @@ -1,20 +1,29 @@ from __future__ import annotations from asyncio import sleep -from typing import Any, AsyncGenerator, NamedTuple +from typing import Any, AsyncGenerator, NamedTuple, cast import pytest + from graphql.error import GraphQLError from graphql.execution import ( ExecutionResult, ExperimentalIncrementalExecutionResults, IncrementalDeferResult, + IncrementalResult, InitialIncrementalExecutionResult, SubsequentIncrementalExecutionResult, execute, experimental_execute_incrementally, ) -from graphql.execution.incremental_publisher import DeferredFragmentRecord +from graphql.execution.incremental_publisher import ( + CompletedResult, + DeferredFragmentRecord, + DeferredGroupedFieldSetRecord, + PendingResult, + StreamItemsRecord, + StreamRecord, +) from graphql.language import DocumentNode, parse from graphql.pyutils import Path, is_awaitable from graphql.type import ( @@ -36,6 +45,79 @@ }, ) + +class Friend(NamedTuple): + id: int + name: str + + +friends = [Friend(2, "Han"), Friend(3, "Leia"), Friend(4, "C-3PO")] + +deeper_object = GraphQLObjectType( + "DeeperObject", + { + "foo": GraphQLField(GraphQLString), + "bar": GraphQLField(GraphQLString), + "baz": GraphQLField(GraphQLString), + "bak": GraphQLField(GraphQLString), + }, +) + +nested_object = GraphQLObjectType( + "NestedObject", + {"deeperObject": GraphQLField(deeper_object), "name": GraphQLField(GraphQLString)}, +) + +another_nested_object = GraphQLObjectType( + "AnotherNestedObject", {"deeperObject": GraphQLField(deeper_object)} +) + +hero = { + "name": "Luke", + "id": 1, + "friends": friends, + "nestedObject": nested_object, + "AnotherNestedObject": another_nested_object, +} + +c = GraphQLObjectType( + "c", + { + "d": GraphQLField(GraphQLString), + "nonNullErrorField": GraphQLField(GraphQLNonNull(GraphQLString)), + }, +) + +e = GraphQLObjectType( + "e", + { + "f": GraphQLField(GraphQLString), + }, +) + +b = GraphQLObjectType( + "b", + { + "c": GraphQLField(c), + "e": GraphQLField(e), + }, +) + +a = GraphQLObjectType( + "a", + { + "b": GraphQLField(b), + "someField": GraphQLField(GraphQLString), + }, +) + +g = GraphQLObjectType( + "g", + { + "h": GraphQLField(GraphQLString), + }, +) + hero_type = GraphQLObjectType( "Hero", { @@ -43,24 +125,19 @@ "name": GraphQLField(GraphQLString), "nonNullName": GraphQLField(GraphQLNonNull(GraphQLString)), "friends": GraphQLField(GraphQLList(friend_type)), + "nestedObject": GraphQLField(nested_object), + "anotherNestedObject": GraphQLField(another_nested_object), }, ) -query = GraphQLObjectType("Query", {"hero": GraphQLField(hero_type)}) +query = GraphQLObjectType( + "Query", + {"hero": GraphQLField(hero_type), "a": GraphQLField(a), "g": GraphQLField(g)}, +) schema = GraphQLSchema(query) -class Friend(NamedTuple): - id: int - name: str - - -friends = [Friend(2, "Han"), Friend(3, "Leia"), Friend(4, "C-3PO")] - -hero = {"id": 1, "name": "Luke", "friends": friends} - - class Resolvers: """Various resolver functions for testing.""" @@ -76,19 +153,23 @@ async def null_async(_info) -> None: @staticmethod async def slow(_info) -> str: - """Simulate a slow async resolver returning a value.""" + """Simulate a slow async resolver returning a non-null value.""" await sleep(0) return "slow" + @staticmethod + async def slow_null(_info) -> None: + """Simulate a slow async resolver returning a null value.""" + await sleep(0) + @staticmethod def bad(_info) -> str: """Simulate a bad resolver raising an error.""" raise RuntimeError("bad") @staticmethod - async def friends(_info) -> AsyncGenerator[Friend, None]: - """A slow async generator yielding the first friend.""" - await sleep(0) + async def first_friend(_info) -> AsyncGenerator[Friend, None]: + """An async generator yielding the first friend.""" yield friends[0] @@ -114,28 +195,77 @@ def modified_args(args: dict[str, Any], **modifications: Any) -> dict[str, Any]: def describe_execute_defer_directive(): + def can_format_and_print_pending_result(): + result = PendingResult("foo", []) + assert result.formatted == {"id": "foo", "path": []} + assert str(result) == "PendingResult(id='foo', path=[])" + + result = PendingResult(id="foo", path=["bar", 1], label="baz") + assert result.formatted == {"id": "foo", "path": ["bar", 1], "label": "baz"} + assert str(result) == "PendingResult(id='foo', path=['bar', 1], label='baz')" + + def can_compare_pending_result(): + args: dict[str, Any] = {"id": "foo", "path": ["bar", 1], "label": "baz"} + result = PendingResult(**args) + assert result == PendingResult(**args) + assert result != PendingResult(**modified_args(args, id="bar")) + assert result != PendingResult(**modified_args(args, path=["bar", 2])) + assert result != PendingResult(**modified_args(args, label="bar")) + assert result == tuple(args.values()) + assert result == tuple(args.values())[:2] + assert result != tuple(args.values())[:1] + assert result != (*tuple(args.values())[:1], ["bar", 2]) + assert result == args + assert result != {**args, "id": "bar"} + assert result != {**args, "path": ["bar", 2]} + assert result != {**args, "label": "bar"} + + def can_format_and_print_completed_result(): + result = CompletedResult("foo") + assert result.formatted == {"id": "foo"} + assert str(result) == "CompletedResult(id='foo')" + + result = CompletedResult(id="foo", errors=[GraphQLError("oops")]) + assert result.formatted == {"id": "foo", "errors": [{"message": "oops"}]} + assert str(result) == "CompletedResult(id='foo', errors=[GraphQLError('oops')])" + + def can_compare_completed_result(): + args: dict[str, Any] = {"id": "foo", "errors": []} + result = CompletedResult(**args) + assert result == CompletedResult(**args) + assert result != CompletedResult(**modified_args(args, id="bar")) + assert result != CompletedResult( + **modified_args(args, errors=[GraphQLError("oops")]) + ) + assert result == tuple(args.values()) + assert result != tuple(args.values())[:1] + assert result != (*tuple(args.values())[:1], [GraphQLError("oops")]) + assert result == args + assert result != {**args, "id": "bar"} + assert result != {**args, "errors": [{"message": "oops"}]} + def can_format_and_print_incremental_defer_result(): - result = IncrementalDeferResult() - assert result.formatted == {"data": None} - assert str(result) == "IncrementalDeferResult(data=None, errors=None)" + result = IncrementalDeferResult(data={}, id="foo") + assert result.formatted == {"data": {}, "id": "foo"} + assert str(result) == "IncrementalDeferResult(data={}, id='foo')" result = IncrementalDeferResult( data={"hello": "world"}, - errors=[GraphQLError("msg")], - path=["foo", 1], - label="bar", + id="foo", + sub_path=["bar", 1], + errors=[GraphQLError("oops")], extensions={"baz": 2}, ) assert result.formatted == { "data": {"hello": "world"}, - "errors": [{"message": "msg"}], + "id": "foo", + "subPath": ["bar", 1], + "errors": [{"message": "oops"}], "extensions": {"baz": 2}, - "label": "bar", - "path": ["foo", 1], } assert ( str(result) == "IncrementalDeferResult(data={'hello': 'world'}," - " errors=[GraphQLError('msg')], path=['foo', 1], label='bar'," + " id='foo', sub_path=['bar', 1], errors=[GraphQLError('oops')]," " extensions={'baz': 2})" ) @@ -143,9 +273,9 @@ def can_format_and_print_incremental_defer_result(): def can_compare_incremental_defer_result(): args: dict[str, Any] = { "data": {"hello": "world"}, - "errors": [GraphQLError("msg")], - "path": ["foo", 1], - "label": "bar", + "id": "foo", + "sub_path": ["bar", 1], + "errors": [GraphQLError("oops")], "extensions": {"baz": 2}, } result = IncrementalDeferResult(**args) @@ -153,9 +283,11 @@ def can_compare_incremental_defer_result(): assert result != IncrementalDeferResult( **modified_args(args, data={"hello": "foo"}) ) + assert result != IncrementalDeferResult(**modified_args(args, id="bar")) + assert result != IncrementalDeferResult( + **modified_args(args, sub_path=["bar", 2]) + ) assert result != IncrementalDeferResult(**modified_args(args, errors=[])) - assert result != IncrementalDeferResult(**modified_args(args, path=["foo", 2])) - assert result != IncrementalDeferResult(**modified_args(args, label="baz")) assert result != IncrementalDeferResult( **modified_args(args, extensions={"baz": 1}) ) @@ -164,54 +296,50 @@ def can_compare_incremental_defer_result(): assert result == tuple(args.values())[:3] assert result == tuple(args.values())[:2] assert result != tuple(args.values())[:1] - assert result != ({"hello": "world"}, []) + assert result != ({"hello": "world"}, "bar") + args["subPath"] = args.pop("sub_path") assert result == args - assert result == dict(list(args.items())[:2]) - assert result == dict(list(args.items())[:3]) - assert result != dict(list(args.items())[:2] + [("path", ["foo", 2])]) - assert result != {**args, "label": "baz"} + assert result != {**args, "data": {"hello": "foo"}} + assert result != {**args, "id": "bar"} + assert result != {**args, "subPath": ["bar", 2]} + assert result != {**args, "errors": []} + assert result != {**args, "extensions": {"baz": 1}} def can_format_and_print_initial_incremental_execution_result(): result = InitialIncrementalExecutionResult() - assert result.formatted == {"data": None, "hasNext": False} - assert ( - str(result) == "InitialIncrementalExecutionResult(data=None, errors=None)" - ) + assert result.formatted == {"data": None, "hasNext": False, "pending": []} + assert str(result) == "InitialIncrementalExecutionResult(data=None)" result = InitialIncrementalExecutionResult(has_next=True) - assert result.formatted == {"data": None, "hasNext": True} - assert ( - str(result) - == "InitialIncrementalExecutionResult(data=None, errors=None, has_next)" - ) + assert result.formatted == {"data": None, "hasNext": True, "pending": []} + assert str(result) == "InitialIncrementalExecutionResult(data=None, has_next)" - incremental = [IncrementalDeferResult(label="foo")] result = InitialIncrementalExecutionResult( data={"hello": "world"}, errors=[GraphQLError("msg")], - incremental=incremental, + pending=[PendingResult("foo", ["bar"])], has_next=True, extensions={"baz": 2}, ) assert result.formatted == { "data": {"hello": "world"}, - "errors": [GraphQLError("msg")], - "incremental": [{"data": None, "label": "foo"}], + "errors": [{"message": "msg"}], + "pending": [{"id": "foo", "path": ["bar"]}], "hasNext": True, "extensions": {"baz": 2}, } assert ( str(result) == "InitialIncrementalExecutionResult(" - "data={'hello': 'world'}, errors=[GraphQLError('msg')], incremental[1]," - " has_next, extensions={'baz': 2})" + "data={'hello': 'world'}, errors=[GraphQLError('msg')]," + " pending=[PendingResult(id='foo', path=['bar'])], has_next," + " extensions={'baz': 2})" ) def can_compare_initial_incremental_execution_result(): - incremental = [IncrementalDeferResult(label="foo")] args: dict[str, Any] = { "data": {"hello": "world"}, "errors": [GraphQLError("msg")], - "incremental": incremental, + "pending": [PendingResult("foo", ["bar"])], "has_next": True, "extensions": {"baz": 2}, } @@ -224,7 +352,7 @@ def can_compare_initial_incremental_execution_result(): **modified_args(args, errors=[]) ) assert result != InitialIncrementalExecutionResult( - **modified_args(args, incremental=[]) + **modified_args(args, pending=[]) ) assert result != InitialIncrementalExecutionResult( **modified_args(args, has_next=False) @@ -233,6 +361,7 @@ def can_compare_initial_incremental_execution_result(): **modified_args(args, extensions={"baz": 1}) ) assert result == tuple(args.values()) + assert result == tuple(args.values())[:5] assert result == tuple(args.values())[:4] assert result == tuple(args.values())[:3] assert result == tuple(args.values())[:2] @@ -242,23 +371,40 @@ def can_compare_initial_incremental_execution_result(): assert result == { "data": {"hello": "world"}, "errors": [GraphQLError("msg")], - "incremental": incremental, + "pending": [PendingResult("foo", ["bar"])], "hasNext": True, "extensions": {"baz": 2}, } - assert result == { + assert result != { + "errors": [GraphQLError("msg")], + "pending": [PendingResult("foo", ["bar"])], + "hasNext": True, + "extensions": {"baz": 2}, + } + assert result != { + "data": {"hello": "world"}, + "pending": [PendingResult("foo", ["bar"])], + "hasNext": True, + "extensions": {"baz": 2}, + } + assert result != { "data": {"hello": "world"}, "errors": [GraphQLError("msg")], - "incremental": incremental, "hasNext": True, + "extensions": {"baz": 2}, } assert result != { "data": {"hello": "world"}, "errors": [GraphQLError("msg")], - "incremental": incremental, - "hasNext": False, + "pending": [PendingResult("foo", ["bar"])], "extensions": {"baz": 2}, } + assert result != { + "data": {"hello": "world"}, + "errors": [GraphQLError("msg")], + "pending": [PendingResult("foo", ["bar"])], + "hasNext": True, + } def can_format_and_print_subsequent_incremental_execution_result(): result = SubsequentIncrementalExecutionResult() @@ -269,31 +415,48 @@ def can_format_and_print_subsequent_incremental_execution_result(): assert result.formatted == {"hasNext": True} assert str(result) == "SubsequentIncrementalExecutionResult(has_next)" - incremental = [IncrementalDeferResult(label="foo")] + pending = [PendingResult("foo", ["bar"])] + incremental = [ + cast("IncrementalResult", IncrementalDeferResult({"foo": 1}, "bar")) + ] + completed = [CompletedResult("foo")] result = SubsequentIncrementalExecutionResult( - incremental=incremental, has_next=True, + pending=pending, + incremental=incremental, + completed=completed, extensions={"baz": 2}, ) assert result.formatted == { - "incremental": [{"data": None, "label": "foo"}], "hasNext": True, + "pending": [{"id": "foo", "path": ["bar"]}], + "incremental": [{"data": {"foo": 1}, "id": "bar"}], + "completed": [{"id": "foo"}], "extensions": {"baz": 2}, } assert ( - str(result) == "SubsequentIncrementalExecutionResult(incremental[1]," - " has_next, extensions={'baz': 2})" + str(result) == "SubsequentIncrementalExecutionResult(has_next," + " pending[1], incremental[1], completed[1], extensions={'baz': 2})" ) def can_compare_subsequent_incremental_execution_result(): - incremental = [IncrementalDeferResult(label="foo")] + pending = [PendingResult("foo", ["bar"])] + incremental = [ + cast("IncrementalResult", IncrementalDeferResult({"foo": 1}, "bar")) + ] + completed = [CompletedResult("foo")] args: dict[str, Any] = { - "incremental": incremental, "has_next": True, + "pending": pending, + "incremental": incremental, + "completed": completed, "extensions": {"baz": 2}, } result = SubsequentIncrementalExecutionResult(**args) assert result == SubsequentIncrementalExecutionResult(**args) + assert result != SubsequentIncrementalExecutionResult( + **modified_args(args, pending=[]) + ) assert result != SubsequentIncrementalExecutionResult( **modified_args(args, incremental=[]) ) @@ -304,36 +467,89 @@ def can_compare_subsequent_incremental_execution_result(): **modified_args(args, extensions={"baz": 1}) ) assert result == tuple(args.values()) + assert result == tuple(args.values())[:3] assert result == tuple(args.values())[:2] assert result != tuple(args.values())[:1] assert result != (incremental, False) assert result == { + "hasNext": True, + "pending": pending, + "incremental": incremental, + "completed": completed, + "extensions": {"baz": 2}, + } + assert result != { + "pending": pending, "incremental": incremental, + "completed": completed, + "extensions": {"baz": 2}, + } + assert result != { + "hasNext": True, + "incremental": incremental, + "completed": completed, + "extensions": {"baz": 2}, + } + assert result != { "hasNext": True, + "pending": pending, + "completed": completed, "extensions": {"baz": 2}, } - assert result == {"incremental": incremental, "hasNext": True} assert result != { + "hasNext": True, + "pending": pending, "incremental": incremental, - "hasNext": False, "extensions": {"baz": 2}, } + assert result != { + "hasNext": True, + "pending": pending, + "incremental": incremental, + "completed": completed, + } + + def can_print_deferred_grouped_field_set_record(): + record = DeferredGroupedFieldSetRecord([], {}, False) + assert ( + str(record) == "DeferredGroupedFieldSetRecord(" + "deferred_fragment_records=[], grouped_field_set={})" + ) + record = DeferredGroupedFieldSetRecord([], {}, True, Path(None, "foo", "Foo")) + assert ( + str(record) == "DeferredGroupedFieldSetRecord(" + "deferred_fragment_records=[], grouped_field_set={}, path=['foo'])" + ) def can_print_deferred_fragment_record(): - record = DeferredFragmentRecord(None, None, None) - assert str(record) == "DeferredFragmentRecord(path=[])" - record = DeferredFragmentRecord("foo", Path(None, "bar", "Bar"), record) + record = DeferredFragmentRecord(None, None) + assert str(record) == "DeferredFragmentRecord()" + record = DeferredFragmentRecord(Path(None, "bar", "Bar"), "foo") + assert str(record) == "DeferredFragmentRecord(path=['bar'], label='foo')" + + def can_print_stream_record(): + record = StreamRecord(Path(None, "bar", "Bar"), "foo") + assert str(record) == "StreamRecord(path=['bar'], label='foo')" + record.path = [] + assert str(record) == "StreamRecord(label='foo')" + record.label = None + assert str(record) == "StreamRecord()" + + def can_print_stream_items_record(): + record = StreamItemsRecord( + StreamRecord(Path(None, "bar", "Bar"), "foo"), + Path(None, "baz", "Baz"), + ) assert ( - str(record) == "DeferredFragmentRecord(" - "path=['bar'], label='foo', parent_context)" + str(record) == "StreamItemsRecord(stream_record=StreamRecord(" + "path=['bar'], label='foo'), path=['baz'])" ) - record.data = {"hello": "world"} + record = StreamItemsRecord(StreamRecord(Path(None, "bar", "Bar"))) assert ( - str(record) == "DeferredFragmentRecord(" - "path=['bar'], label='foo', parent_context, data)" + str(record) == "StreamItemsRecord(stream_record=StreamRecord(path=['bar']))" ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fragments_containing_scalar_types(): document = parse( """ @@ -351,14 +567,19 @@ async def can_defer_fragments_containing_scalar_types(): result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { - "incremental": [{"data": {"name": "Luke"}, "path": ["hero"]}], + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [{"data": {"name": "Luke"}, "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_disable_defer_using_if_argument(): document = parse( """ @@ -375,16 +596,9 @@ async def can_disable_defer_using_if_argument(): ) result = await complete(document) - assert result == { - "data": { - "hero": { - "id": "1", - "name": "Luke", - }, - }, - } + assert result == {"data": {"hero": {"id": "1", "name": "Luke"}}} - @pytest.mark.asyncio() + @pytest.mark.asyncio async def does_not_disable_defer_with_null_if_argument(): document = parse( """ @@ -402,14 +616,19 @@ async def does_not_disable_defer_with_null_if_argument(): result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { - "incremental": [{"data": {"name": "Luke"}, "path": ["hero"]}], + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [{"data": {"name": "Luke"}, "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws_an_error_for_defer_directive_with_non_string_label(): document = parse( """ @@ -430,7 +649,7 @@ async def throws_an_error_for_defer_directive_with_non_string_label(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fragments_on_the_top_level_query_field(): document = parse( """ @@ -447,16 +666,19 @@ async def can_defer_fragments_on_the_top_level_query_field(): result = await complete(document) assert result == [ - {"data": {}, "hasNext": True}, { - "incremental": [ - {"data": {"hero": {"id": "1"}}, "path": [], "label": "DeferQuery"} - ], + "data": {}, + "pending": [{"id": "0", "path": [], "label": "DeferQuery"}], + "hasNext": True, + }, + { + "incremental": [{"data": {"hero": {"id": "1"}}, "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fragments_with_errors_on_the_top_level_query_field(): document = parse( """ @@ -473,7 +695,11 @@ async def can_defer_fragments_with_errors_on_the_top_level_query_field(): result = await complete(document, {"hero": {**hero, "name": Resolvers.bad}}) assert result == [ - {"data": {}, "hasNext": True}, + { + "data": {}, + "pending": [{"id": "0", "path": [], "label": "DeferQuery"}], + "hasNext": True, + }, { "incremental": [ { @@ -485,15 +711,15 @@ async def can_defer_fragments_with_errors_on_the_top_level_query_field(): "path": ["hero", "name"], } ], - "path": [], - "label": "DeferQuery", + "id": "0", } ], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_a_fragment_within_an_already_deferred_fragment(): document = parse( """ @@ -516,7 +742,17 @@ async def can_defer_a_fragment_within_an_already_deferred_fragment(): result = await complete(document) assert result == [ - {"data": {"hero": {}}, "hasNext": True}, + { + "data": {"hero": {}}, + "pending": [{"id": "0", "path": ["hero"], "label": "DeferTop"}], + "hasNext": True, + }, + { + "pending": [{"id": "1", "path": ["hero"], "label": "DeferNested"}], + "incremental": [{"data": {"id": "1"}, "id": "0"}], + "completed": [{"id": "0"}], + "hasNext": True, + }, { "incremental": [ { @@ -527,20 +763,15 @@ async def can_defer_a_fragment_within_an_already_deferred_fragment(): {"name": "C-3PO"}, ] }, - "path": ["hero"], - "label": "DeferNested", - }, - { - "data": {"id": "1"}, - "path": ["hero"], - "label": "DeferTop", + "id": "1", }, ], + "completed": [{"id": "1"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_a_fragment_that_is_also_not_deferred_with_deferred_first(): document = parse( """ @@ -557,21 +788,9 @@ async def can_defer_a_fragment_that_is_also_not_deferred_with_deferred_first(): ) result = await complete(document) - assert result == [ - {"data": {"hero": {"name": "Luke"}}, "hasNext": True}, - { - "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["hero"], - "label": "DeferTop", - }, - ], - "hasNext": False, - }, - ] + assert result == {"data": {"hero": {"name": "Luke"}}} - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_a_fragment_that_is_also_not_deferred_with_non_deferred_first(): document = parse( """ @@ -588,21 +807,9 @@ async def can_defer_a_fragment_that_is_also_not_deferred_with_non_deferred_first ) result = await complete(document) - assert result == [ - {"data": {"hero": {"name": "Luke"}}, "hasNext": True}, - { - "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["hero"], - "label": "DeferTop", - }, - ], - "hasNext": False, - }, - ] + assert result == {"data": {"hero": {"name": "Luke"}}} - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_an_inline_fragment(): document = parse( """ @@ -619,108 +826,1206 @@ async def can_defer_an_inline_fragment(): result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { - "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["hero"], - "label": "InlineDeferred", - }, - ], + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"], "label": "InlineDeferred"}], + "hasNext": True, + }, + { + "incremental": [{"data": {"name": "Luke"}, "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() - async def handles_errors_thrown_in_deferred_fragments(): + @pytest.mark.asyncio + async def does_not_emit_empty_defer_fragments(): document = parse( """ query HeroNameQuery { hero { - id - ...NameFragment @defer + ... @defer { + name @skip(if: true) + } } } - fragment NameFragment on Hero { + fragment TopFragment on Hero { name } """ ) - result = await complete(document, {"hero": {**hero, "name": Resolvers.bad}}) + result = await complete(document) + + assert result == {"data": {"hero": {}}} + + @pytest.mark.asyncio + async def emits_children_of_empty_defer_fragments(): + document = parse( + """ + query HeroNameQuery { + hero { + ... @defer { + ... @defer { + name + } + } + } + } + """ + ) + result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { - "incremental": [ - { - "data": {"name": None}, - "path": ["hero"], - "errors": [ - { - "message": "bad", - "locations": [{"line": 9, "column": 15}], - "path": ["hero", "name"], - } - ], - }, - ], + "data": {"hero": {}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [{"data": {"name": "Luke"}, "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() - async def handles_non_nullable_errors_thrown_in_deferred_fragments(): + @pytest.mark.asyncio + async def separately_emits_defer_fragments_different_labels_varying_fields(): document = parse( """ query HeroNameQuery { hero { - id - ...NameFragment @defer + ... @defer(label: "DeferID") { + id + } + ... @defer(label: "DeferName") { + name + } } } - fragment NameFragment on Hero { - nonNullName - } """ ) - result = await complete( - document, {"hero": {**hero, "nonNullName": Resolvers.null}} - ) + result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, + { + "data": {"hero": {}}, + "pending": [ + {"id": "0", "path": ["hero"], "label": "DeferID"}, + {"id": "1", "path": ["hero"], "label": "DeferName"}, + ], + "hasNext": True, + }, { "incremental": [ - { - "data": None, - "path": ["hero"], - "errors": [ - { - "message": "Cannot return null for non-nullable field" - " Hero.nonNullName.", - "locations": [{"line": 9, "column": 15}], - "path": ["hero", "nonNullName"], - } - ], - }, + {"data": {"id": "1"}, "id": "0"}, + {"data": {"name": "Luke"}, "id": "1"}, ], + "completed": [{"id": "0"}, {"id": "1"}], "hasNext": False, }, ] - @pytest.mark.asyncio() - async def handles_non_nullable_errors_thrown_outside_deferred_fragments(): + @pytest.mark.asyncio + async def separately_emits_defer_fragments_different_labels_varying_subfields(): document = parse( """ query HeroNameQuery { - hero { - nonNullName - ...NameFragment @defer + ... @defer(label: "DeferID") { + hero { + id + } + } + ... @defer(label: "DeferName") { + hero { + name + } } } - fragment NameFragment on Hero { - id + """ + ) + result = await complete(document) + + assert result == [ + { + "data": {}, + "pending": [ + {"id": "0", "path": [], "label": "DeferID"}, + {"id": "1", "path": [], "label": "DeferName"}, + ], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"hero": {}}, "id": "0"}, + {"data": {"id": "1"}, "id": "0", "subPath": ["hero"]}, + {"data": {"name": "Luke"}, "id": "1", "subPath": ["hero"]}, + ], + "completed": [{"id": "0"}, {"id": "1"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def separately_emits_defer_fragments_different_labels_var_subfields_async(): + document = parse( + """ + query HeroNameQuery { + ... @defer(label: "DeferID") { + hero { + id + } + } + ... @defer(label: "DeferName") { + hero { + name + } + } + } + """ + ) + + async def resolve(value): + return value + + result = await complete( + document, + { + "hero": { + "id": lambda _info: resolve(1), + "name": lambda _info: resolve("Luke"), + } + }, + ) + + assert result == [ + { + "data": {}, + "pending": [ + {"id": "0", "path": [], "label": "DeferID"}, + {"id": "1", "path": [], "label": "DeferName"}, + ], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"hero": {}}, "id": "0"}, + {"data": {"id": "1"}, "id": "0", "subPath": ["hero"]}, + {"data": {"name": "Luke"}, "id": "1", "subPath": ["hero"]}, + ], + "completed": [{"id": "0"}, {"id": "1"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def separately_emits_defer_fragments_var_subfields_same_prio_diff_level(): + document = parse( + """ + query HeroNameQuery { + hero { + ... @defer(label: "DeferID") { + id + } + } + ... @defer(label: "DeferName") { + hero { + name + } + } + } + """ + ) + result = await complete(document) + + assert result == [ + { + "data": {"hero": {}}, + "pending": [ + {"id": "0", "path": [], "label": "DeferName"}, + {"id": "1", "path": ["hero"], "label": "DeferID"}, + ], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"id": "1"}, "id": "1"}, + {"data": {"name": "Luke"}, "id": "0", "subPath": ["hero"]}, + ], + "completed": [{"id": "1"}, {"id": "0"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def separately_emits_nested_defer_frags_var_subfields_same_prio_diff_level(): + document = parse( + """ + query HeroNameQuery { + ... @defer(label: "DeferName") { + hero { + name + ... @defer(label: "DeferID") { + id + } + } + } + } + """ + ) + result = await complete(document) + + assert result == [ + { + "data": {}, + "pending": [{"id": "0", "path": [], "label": "DeferName"}], + "hasNext": True, + }, + { + "pending": [{"id": "1", "path": ["hero"], "label": "DeferID"}], + "incremental": [{"data": {"hero": {"name": "Luke"}}, "id": "0"}], + "completed": [{"id": "0"}], + "hasNext": True, + }, + { + "incremental": [{"data": {"id": "1"}, "id": "1"}], + "completed": [{"id": "1"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def can_deduplicate_multiple_defers_on_the_same_object(): + document = parse( + """ + query { + hero { + friends { + ... @defer { + ...FriendFrag + ... @defer { + ...FriendFrag + ... @defer { + ...FriendFrag + ... @defer { + ...FriendFrag + } + } + } + } + } + } + } + + fragment FriendFrag on Friend { + id + name + } + """ + ) + result = await complete(document) + + assert result == [ + { + "data": {"hero": {"friends": [{}, {}, {}]}}, + "pending": [ + {"id": "0", "path": ["hero", "friends", 0]}, + {"id": "1", "path": ["hero", "friends", 1]}, + {"id": "2", "path": ["hero", "friends", 2]}, + ], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"id": "2", "name": "Han"}, "id": "0"}, + {"data": {"id": "3", "name": "Leia"}, "id": "1"}, + {"data": {"id": "4", "name": "C-3PO"}, "id": "2"}, + ], + "completed": [{"id": "0"}, {"id": "1"}, {"id": "2"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_present_in_the_initial_payload(): + document = parse( + """ + query { + hero { + nestedObject { + deeperObject { + foo + } + } + anotherNestedObject { + deeperObject { + foo + } + } + ... @defer { + nestedObject { + deeperObject { + bar + } + } + anotherNestedObject { + deeperObject { + foo + } + } + } + } + } + """ + ) + result = await complete( + document, + { + "hero": { + "nestedObject": {"deeperObject": {"foo": "foo", "bar": "bar"}}, + "anotherNestedObject": {"deeperObject": {"foo": "foo"}}, + } + }, + ) + + assert result == [ + { + "data": { + "hero": { + "nestedObject": {"deeperObject": {"foo": "foo"}}, + "anotherNestedObject": {"deeperObject": {"foo": "foo"}}, + } + }, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [ + { + "data": {"bar": "bar"}, + "id": "0", + "subPath": ["nestedObject", "deeperObject"], + }, + ], + "completed": [{"id": "0"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_present_in_a_parent_defer_payload(): + document = parse( + """ + query { + hero { + ... @defer { + nestedObject { + deeperObject { + foo + ... @defer { + foo + bar + } + } + } + } + } + } + """ + ) + result = await complete( + document, + {"hero": {"nestedObject": {"deeperObject": {"foo": "foo", "bar": "bar"}}}}, + ) + + assert result == [ + { + "data": {"hero": {}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "pending": [ + {"id": "1", "path": ["hero", "nestedObject", "deeperObject"]} + ], + "incremental": [ + { + "data": {"nestedObject": {"deeperObject": {"foo": "foo"}}}, + "id": "0", + }, + ], + "completed": [{"id": "0"}], + "hasNext": True, + }, + { + "incremental": [{"data": {"bar": "bar"}, "id": "1"}], + "completed": [{"id": "1"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_with_deferred_fragments_at_multiple_levels(): + document = parse( + """ + query { + hero { + nestedObject { + deeperObject { + foo + } + } + ... @defer { + nestedObject { + deeperObject { + foo + bar + } + ... @defer { + deeperObject { + foo + bar + baz + ... @defer { + foo + bar + baz + bak + } + } + } + } + } + } + } + """ + ) + result = await complete( + document, + { + "hero": { + "nestedObject": { + "deeperObject": { + "foo": "foo", + "bar": "bar", + "baz": "baz", + "bak": "bak", + } + } + } + }, + ) + + assert result == [ + { + "data": { + "hero": { + "nestedObject": { + "deeperObject": { + "foo": "foo", + }, + }, + }, + }, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "pending": [{"id": "1", "path": ["hero", "nestedObject"]}], + "incremental": [ + { + "data": {"bar": "bar"}, + "id": "0", + "subPath": ["nestedObject", "deeperObject"], + }, + ], + "completed": [{"id": "0"}], + "hasNext": True, + }, + { + "pending": [ + {"id": "2", "path": ["hero", "nestedObject", "deeperObject"]} + ], + "incremental": [ + {"data": {"baz": "baz"}, "id": "1", "subPath": ["deeperObject"]}, + ], + "hasNext": True, + "completed": [{"id": "1"}], + }, + { + "incremental": [{"data": {"bak": "bak"}, "id": "2"}], + "completed": [{"id": "2"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_from_deferred_fragments_branches_same_level(): + document = parse( + """ + query { + hero { + nestedObject { + deeperObject { + ... @defer { + foo + } + } + } + ... @defer { + nestedObject { + deeperObject { + ... @defer { + foo + bar + } + } + } + } + } + } + """ + ) + result = await complete( + document, + {"hero": {"nestedObject": {"deeperObject": {"foo": "foo", "bar": "bar"}}}}, + ) + + assert result == [ + { + "data": {"hero": {"nestedObject": {"deeperObject": {}}}}, + "pending": [ + {"id": "0", "path": ["hero", "nestedObject", "deeperObject"]}, + {"id": "1", "path": ["hero", "nestedObject", "deeperObject"]}, + ], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"foo": "foo"}, "id": "0"}, + {"data": {"bar": "bar"}, "id": "1"}, + ], + "completed": [{"id": "0"}, {"id": "1"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def deduplicates_fields_from_deferred_fragments_branches_multi_levels(): + document = parse( + """ + query { + a { + b { + c { + d + } + ... @defer { + e { + f + } + } + } + } + ... @defer { + a { + b { + e { + f + } + } + } + g { + h + } + } + } + """ + ) + result = await complete( + document, + {"a": {"b": {"c": {"d": "d"}, "e": {"f": "f"}}}, "g": {"h": "h"}}, + ) + + assert result == [ + { + "data": {"a": {"b": {"c": {"d": "d"}}}}, + "pending": [{"id": "0", "path": []}, {"id": "1", "path": ["a", "b"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"e": {"f": "f"}}, "id": "1"}, + {"data": {"g": {"h": "h"}}, "id": "0"}, + ], + "completed": [{"id": "1"}, {"id": "0"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def nulls_cross_defer_boundaries_null_first(): + document = parse( + """ + query { + ... @defer { + a { + someField + b { + c { + nonNullErrorField + } + } + } + } + a { + ... @defer { + b { + c { + d + } + } + } + } + } + """ + ) + result = await complete( + document, + {"a": {"b": {"c": {"d": "d"}}, "someField": "someField"}}, + ) + + assert result == [ + { + "data": {"a": {}}, + "pending": [{"id": "0", "path": []}, {"id": "1", "path": ["a"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"b": {"c": {}}}, "id": "1"}, + {"data": {"d": "d"}, "id": "1", "subPath": ["b", "c"]}, + ], + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field c.nonNullErrorField.", + "locations": [{"line": 8, "column": 23}], + "path": ["a", "b", "c", "nonNullErrorField"], + }, + ], + }, + {"id": "1"}, + ], + "hasNext": False, + }, + ] + + async def nulls_cross_defer_boundaries_value_first(): + document = parse( + """ + query { + ... @defer { + a { + b { + c { + d + } + } + } + } + a { + ... @defer { + someField + b { + c { + nonNullErrorField + } + } + } + } + } + """ + ) + result = await complete( + document, + { + "a": { + "b": {"c": {"d": "d"}, "nonNullErrorFIeld": None}, + "someField": "someField", + } + }, + ) + + assert result == [ + { + "data": {"a": {}}, + "pending": [{"id": "0", "path": []}, {"id": "1", "path": ["a"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"b": {"c": {}}}, "id": "1"}, + {"data": {"d": "d"}, "id": "0", "subPath": ["a", "b", "c"]}, + ], + "completed": [ + { + "id": "1", + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field c.nonNullErrorField.", + "locations": [{"line": 17, "column": 23}], + "path": ["a", "b", "c", "nonNullErrorField"], + }, + ], + }, + {"id": "0"}, + ], + "hasNext": False, + }, + ] + + async def filters_a_payload_with_a_null_that_cannot_be_merged(): + document = parse( + """ + query { + ... @defer { + a { + someField + b { + c { + nonNullErrorField + } + } + } + } + a { + ... @defer { + b { + c { + d + } + } + } + } + } + """ + ) + + result = await complete( + document, + { + "a": { + "b": {"c": {"d": "d", "nonNullErrorField": Resolvers.slow_null}}, + "someField": "someField", + } + }, + ) + + assert result == [ + { + "data": {"a": {}}, + "pending": [{"id": "0", "path": []}, {"id": "1", "path": ["a"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"b": {"c": {}}}, "id": "1"}, + {"data": {"d": "d"}, "id": "1", "subPath": ["b", "c"]}, + ], + "completed": [{"id": "1"}], + "hasNext": True, + }, + { + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field c.nonNullErrorField.", + "locations": [{"line": 8, "column": 23}], + "path": ["a", "b", "c", "nonNullErrorField"], + }, + ], + }, + ], + "hasNext": False, + }, + ] + + async def cancels_deferred_fields_when_initial_result_exhibits_null_bubbling(): + document = parse( + """ + query { + hero { + nonNullName + } + ... @defer { + hero { + name + } + } + } + """ + ) + result = await complete( + document, {"hero": {**hero, "nonNullName": lambda _info: None}} + ) + + assert result == { + "data": {"hero": None}, + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field Hero.nonNullName.", + "locations": [{"line": 4, "column": 17}], + "path": ["hero", "nonNullName"], + }, + ], + } + + async def cancels_deferred_fields_when_deferred_result_exhibits_null_bubbling(): + document = parse( + """ + query { + ... @defer { + hero { + nonNullName + name + } + } + } + """ + ) + result = await complete( + document, {"hero": {**hero, "nonNullName": lambda _info: None}} + ) + + assert result == [ + { + "data": {}, + "pending": [{"id": "0", "path": []}], + "hasNext": True, + }, + { + "incremental": [ + { + "data": {"hero": None}, + "id": "0", + "errors": [ + { + "message": "Cannot return null" + " for non-nullable field Hero.nonNullName.", + "locations": [{"line": 5, "column": 19}], + "path": ["hero", "nonNullName"], + }, + ], + }, + ], + "completed": [{"id": "0"}], + "hasNext": False, + }, + ] + + async def deduplicates_list_fields(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + name + } + } + } + } + """ + ) + + result = await complete(document) + + assert result == { + "data": { + "hero": { + "friends": [{"name": "Han"}, {"name": "Leia"}, {"name": "C-3PO"}] + } + }, + } + + async def deduplicates_async_iterable_list_fields(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + name + } + } + } + } + """ + ) + + result = await complete( + document, {"hero": {**hero, "friends": Resolvers.first_friend}} + ) + + assert result == {"data": {"hero": {"friends": [{"name": "Han"}]}}} + + async def deduplicates_empty_async_iterable_list_fields(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + name + } + } + } + } + """ + ) + + async def resolve_friends(_info): + await sleep(0) + for friend in []: # type: ignore + yield friend # pragma: no cover + + result = await complete( + document, {"hero": {**hero, "friends": resolve_friends}} + ) + + assert result == {"data": {"hero": {"friends": []}}} + + async def does_not_deduplicate_list_fields_with_non_overlapping_fields(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + id + } + } + } + } + """ + ) + result = await complete(document) + + assert result == [ + { + "data": { + "hero": { + "friends": [ + {"name": "Han"}, + {"name": "Leia"}, + {"name": "C-3PO"}, + ] + } + }, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [ + {"data": {"id": "2"}, "id": "0", "subPath": ["friends", 0]}, + {"data": {"id": "3"}, "id": "0", "subPath": ["friends", 1]}, + {"data": {"id": "4"}, "id": "0", "subPath": ["friends", 2]}, + ], + "completed": [{"id": "0"}], + "hasNext": False, + }, + ] + + async def deduplicates_list_fields_that_return_empty_lists(): + document = parse( + """ + query { + hero { + friends { + name + } + ... @defer { + friends { + name + } + } + } + } + """ + ) + result = await complete( + document, {"hero": {**hero, "friends": lambda _info: []}} + ) + + assert result == {"data": {"hero": {"friends": []}}} + + async def deduplicates_null_object_fields(): + document = parse( + """ + query { + hero { + nestedObject { + name + } + ... @defer { + nestedObject { + name + } + } + } + } + """ + ) + result = await complete( + document, {"hero": {**hero, "nestedObject": lambda _info: None}} + ) + + assert result == {"data": {"hero": {"nestedObject": None}}} + + async def deduplicates_async_object_fields(): + document = parse( + """ + query { + hero { + nestedObject { + name + } + ... @defer { + nestedObject { + name + } + } + } + } + """ + ) + + async def resolve_nested_object(_info): + return {"name": "foo"} + + result = await complete( + document, {"hero": {"nestedObject": resolve_nested_object}} + ) + + assert result == {"data": {"hero": {"nestedObject": {"name": "foo"}}}} + + @pytest.mark.asyncio + async def handles_errors_thrown_in_deferred_fragments(): + document = parse( + """ + query HeroNameQuery { + hero { + id + ...NameFragment @defer + } + } + fragment NameFragment on Hero { + name + } + """ + ) + result = await complete(document, {"hero": {**hero, "name": Resolvers.bad}}) + + assert result == [ + { + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "incremental": [ + { + "data": {"name": None}, + "id": "0", + "errors": [ + { + "message": "bad", + "locations": [{"line": 9, "column": 15}], + "path": ["hero", "name"], + } + ], + }, + ], + "completed": [{"id": "0"}], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def handles_non_nullable_errors_thrown_in_deferred_fragments(): + document = parse( + """ + query HeroNameQuery { + hero { + id + ...NameFragment @defer + } + } + fragment NameFragment on Hero { + nonNullName + } + """ + ) + result = await complete( + document, {"hero": {**hero, "nonNullName": Resolvers.null}} + ) + + assert result == [ + { + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Cannot return null for non-nullable field" + " Hero.nonNullName.", + "locations": [{"line": 9, "column": 15}], + "path": ["hero", "nonNullName"], + } + ], + }, + ], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def handles_non_nullable_errors_thrown_outside_deferred_fragments(): + document = parse( + """ + query HeroNameQuery { + hero { + nonNullName + ...NameFragment @defer + } + } + fragment NameFragment on Hero { + id } """ ) @@ -740,7 +2045,7 @@ async def handles_non_nullable_errors_thrown_outside_deferred_fragments(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_non_nullable_errors_thrown_in_deferred_fragments(): document = parse( """ @@ -760,12 +2065,15 @@ async def handles_async_non_nullable_errors_thrown_in_deferred_fragments(): ) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { - "incremental": [ + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "completed": [ { - "data": None, - "path": ["hero"], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -780,7 +2088,7 @@ async def handles_async_non_nullable_errors_thrown_in_deferred_fragments(): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_payloads_in_correct_order(): document = parse( """ @@ -804,36 +2112,35 @@ async def returns_payloads_in_correct_order(): result = await complete(document, {"hero": {**hero, "name": Resolvers.slow}}) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "pending": [ + {"id": "1", "path": ["hero", "friends", 0]}, + {"id": "2", "path": ["hero", "friends", 1]}, + {"id": "3", "path": ["hero", "friends", 2]}, + ], "incremental": [ - { - "data": {"name": "slow", "friends": [{}, {}, {}]}, - "path": ["hero"], - } + {"data": {"name": "slow", "friends": [{}, {}, {}]}, "id": "0"} ], + "completed": [{"id": "0"}], "hasNext": True, }, { "incremental": [ - { - "data": {"name": "Han"}, - "path": ["hero", "friends", 0], - }, - { - "data": {"name": "Leia"}, - "path": ["hero", "friends", 1], - }, - { - "data": {"name": "C-3PO"}, - "path": ["hero", "friends", 2], - }, + {"data": {"name": "Han"}, "id": "1"}, + {"data": {"name": "Leia"}, "id": "2"}, + {"data": {"name": "C-3PO"}, "id": "3"}, ], + "completed": [{"id": "1"}, {"id": "2"}, {"id": "3"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_payloads_from_synchronous_data_in_correct_order(): document = parse( """ @@ -857,36 +2164,35 @@ async def returns_payloads_from_synchronous_data_in_correct_order(): result = await complete(document) assert result == [ - {"data": {"hero": {"id": "1"}}, "hasNext": True}, { + "data": {"hero": {"id": "1"}}, + "pending": [{"id": "0", "path": ["hero"]}], + "hasNext": True, + }, + { + "pending": [ + {"id": "1", "path": ["hero", "friends", 0]}, + {"id": "2", "path": ["hero", "friends", 1]}, + {"id": "3", "path": ["hero", "friends", 2]}, + ], "incremental": [ - { - "data": {"name": "Luke", "friends": [{}, {}, {}]}, - "path": ["hero"], - }, + {"data": {"name": "Luke", "friends": [{}, {}, {}]}, "id": "0"} ], + "completed": [{"id": "0"}], "hasNext": True, }, { "incremental": [ - { - "data": {"name": "Han"}, - "path": ["hero", "friends", 0], - }, - { - "data": {"name": "Leia"}, - "path": ["hero", "friends", 1], - }, - { - "data": {"name": "C-3PO"}, - "path": ["hero", "friends", 2], - }, + {"data": {"name": "Han"}, "id": "1"}, + {"data": {"name": "Leia"}, "id": "2"}, + {"data": {"name": "C-3PO"}, "id": "3"}, ], + "completed": [{"id": "1"}, {"id": "2"}, {"id": "3"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def filters_deferred_payloads_when_list_item_from_async_iterable_nulled(): document = parse( """ @@ -905,7 +2211,7 @@ async def filters_deferred_payloads_when_list_item_from_async_iterable_nulled(): ) result = await complete( - document, {"hero": {**hero, "friends": Resolvers.friends}} + document, {"hero": {**hero, "friends": Resolvers.first_friend}} ) assert result == { @@ -920,7 +2226,7 @@ async def filters_deferred_payloads_when_list_item_from_async_iterable_nulled(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def original_execute_function_throws_error_if_deferred_and_all_is_sync(): document = parse( """ @@ -938,7 +2244,7 @@ async def original_execute_function_throws_error_if_deferred_and_all_is_sync(): " multiple payloads (due to @defer or @stream directive)" ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def original_execute_function_throws_error_if_deferred_and_not_all_is_sync(): document = parse( """ diff --git a/tests/execution/test_execution_result.py b/tests/execution/test_execution_result.py index 28ba17af..96935d99 100644 --- a/tests/execution/test_execution_result.py +++ b/tests/execution/test_execution_result.py @@ -1,4 +1,5 @@ import pytest + from graphql.error import GraphQLError from graphql.execution import ExecutionResult @@ -54,15 +55,15 @@ def compares_to_dict(): res = ExecutionResult(data, errors) assert res == {"data": data, "errors": errors} assert res == {"data": data, "errors": errors, "extensions": None} - assert res != {"data": data, "errors": None} - assert res != {"data": None, "errors": errors} + assert res == {"data": data, "errors": errors, "extensions": {}} + assert res != {"errors": errors} + assert res != {"data": data} assert res != {"data": data, "errors": errors, "extensions": extensions} res = ExecutionResult(data, errors, extensions) - assert res == {"data": data, "errors": errors} assert res == {"data": data, "errors": errors, "extensions": extensions} - assert res != {"data": data, "errors": None} - assert res != {"data": None, "errors": errors} - assert res != {"data": data, "errors": errors, "extensions": None} + assert res != {"errors": errors, "extensions": extensions} + assert res != {"data": data, "extensions": extensions} + assert res != {"data": data, "errors": errors} def compares_to_tuple(): res = ExecutionResult(data, errors) diff --git a/tests/execution/test_executor.py b/tests/execution/test_executor.py index 5ea1f25b..e2d2db1f 100644 --- a/tests/execution/test_executor.py +++ b/tests/execution/test_executor.py @@ -4,6 +4,7 @@ from typing import Any, Awaitable, cast import pytest + from graphql.error import GraphQLError from graphql.execution import execute, execute_sync from graphql.language import FieldNode, OperationDefinitionNode, parse @@ -41,7 +42,7 @@ def accepts_positional_arguments(): assert result == ({"a": "rootValue"}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def executes_arbitrary_code(): # noinspection PyMethodMayBeStatic,PyMethodMayBeStatic class Data: @@ -244,16 +245,16 @@ def resolve(_obj, info): execute_sync(schema, document, root_value, variable_values=variable_values) assert len(resolved_infos) == 1 - operation = cast(OperationDefinitionNode, document.definitions[0]) + operation = cast("OperationDefinitionNode", document.definitions[0]) assert operation assert operation.kind == "operation_definition" - field = cast(FieldNode, operation.selection_set.selections[0]) + field = cast("FieldNode", operation.selection_set.selections[0]) assert resolved_infos[0] == GraphQLResolveInfo( field_name="test", field_nodes=[field], return_type=GraphQLString, - parent_type=cast(GraphQLObjectType, schema.query_type), + parent_type=cast("GraphQLObjectType", schema.query_type), path=ResponsePath(None, "result", "Test"), schema=schema, fragments={}, @@ -375,7 +376,7 @@ def resolve(_obj, _info, **args): assert len(resolved_args) == 1 assert resolved_args[0] == {"numArg": 123, "stringArg": "foo"} - @pytest.mark.asyncio() + @pytest.mark.asyncio async def nulls_out_error_subtrees(): document = parse( """ @@ -520,7 +521,6 @@ async def asyncReturnErrorWithExtensions(self, _info): ], ) - @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") def handles_sync_errors_combined_with_async_ones(): is_async_resolver_finished = False @@ -617,7 +617,6 @@ def resolve_error(*_args): ], ) - @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") def uses_the_inline_operation_if_no_operation_name_is_provided(): schema = GraphQLSchema( GraphQLObjectType("Type", {"a": GraphQLField(GraphQLString)}) @@ -631,7 +630,6 @@ class Data: result = execute_sync(schema, document, Data()) assert result == ({"a": "b"}, None) - @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") def uses_the_only_operation_if_no_operation_name_is_provided(): schema = GraphQLSchema( GraphQLObjectType("Type", {"a": GraphQLField(GraphQLString)}) @@ -645,7 +643,6 @@ class Data: result = execute_sync(schema, document, Data()) assert result == ({"a": "b"}, None) - @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") def uses_the_named_operation_if_operation_name_is_provided(): schema = GraphQLSchema( GraphQLObjectType("Type", {"a": GraphQLField(GraphQLString)}) @@ -868,7 +865,7 @@ def resolves_to_an_error_if_schema_does_not_support_operation(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correct_field_ordering_despite_execution_order(): schema = GraphQLSchema( GraphQLObjectType( @@ -984,7 +981,7 @@ def does_not_include_arguments_that_were_not_set(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def fails_when_is_type_of_check_is_not_met(): class Special: value: str diff --git a/tests/execution/test_lists.py b/tests/execution/test_lists.py index 3d2bb8fa..083c437c 100644 --- a/tests/execution/test_lists.py +++ b/tests/execution/test_lists.py @@ -1,6 +1,7 @@ from typing import Any, AsyncGenerator import pytest + from graphql.execution import ExecutionResult, execute, execute_sync from graphql.language import parse from graphql.pyutils import is_awaitable @@ -171,7 +172,7 @@ async def _list_field( assert is_awaitable(result) return await result - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_an_async_generator_as_a_list_value(): async def list_field(): yield "two" @@ -183,7 +184,7 @@ async def list_field(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_a_custom_async_iterable_as_a_list_value(): class ListField: def __aiter__(self): @@ -202,7 +203,7 @@ async def __anext__(self): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_an_async_generator_that_throws(): async def list_field(): yield "two" @@ -214,7 +215,7 @@ async def list_field(): [{"message": "bad", "locations": [(1, 3)], "path": ["listField"]}], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_an_async_generator_where_intermediate_value_triggers_an_error(): async def list_field(): yield "two" @@ -232,7 +233,8 @@ async def list_field(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio + @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def handles_errors_from_complete_value_in_async_iterables(): async def list_field(): yield "two" @@ -249,7 +251,7 @@ async def list_field(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_functions_from_complete_value_in_async_iterables(): async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: return data.index @@ -259,7 +261,7 @@ async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_single_async_functions_from_complete_value_in_async_iterables(): async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: return data.index @@ -269,7 +271,7 @@ async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_errors_from_complete_value_in_async_iterables(): async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: index = data.index @@ -288,7 +290,7 @@ async def resolve(data: _IndexData, _info: GraphQLResolveInfo) -> int: ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_nulls_yielded_by_async_generator(): async def list_field(): yield 1 @@ -322,7 +324,7 @@ def execute_query(list_value: Any) -> Any: return result - @pytest.mark.asyncio() + @pytest.mark.asyncio async def contains_values(): list_field = [1, 2] assert await _complete(list_field, "[Int]") == ({"listField": [1, 2]}, None) @@ -330,7 +332,7 @@ async def contains_values(): assert await _complete(list_field, "[Int!]") == ({"listField": [1, 2]}, None) assert await _complete(list_field, "[Int!]!") == ({"listField": [1, 2]}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def contains_null(): list_field = [1, None, 2] errors = [ @@ -351,7 +353,7 @@ async def contains_null(): assert await _complete(list_field, "[Int!]") == ({"listField": None}, errors) assert await _complete(list_field, "[Int!]!") == (None, errors) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): list_field = None errors = [ @@ -366,7 +368,7 @@ async def returns_null(): assert await _complete(list_field, "[Int!]") == ({"listField": None}, None) assert await _complete(list_field, "[Int!]!") == (None, errors) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def contains_error(): list_field = [1, RuntimeError("bad"), 2] errors = [ @@ -393,7 +395,7 @@ async def contains_error(): errors, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def results_in_errors(): list_field = RuntimeError("bad") errors = [ diff --git a/tests/execution/test_map_async_iterable.py b/tests/execution/test_map_async_iterable.py index 055a61bc..eb3cddb8 100644 --- a/tests/execution/test_map_async_iterable.py +++ b/tests/execution/test_map_async_iterable.py @@ -1,11 +1,12 @@ import pytest + from graphql.execution import map_async_iterable try: # pragma: no cover anext # noqa: B018 except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator): # noqa: A001 + async def anext(iterator): """Return the next item from an async iterator.""" return await iterator.__anext__() @@ -21,7 +22,7 @@ async def throw(_x: int) -> int: def describe_map_async_iterable(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def maps_over_async_generator(): async def source(): yield 1 @@ -36,7 +37,7 @@ async def source(): with pytest.raises(StopAsyncIteration): assert await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def maps_over_async_iterable(): items = [1, 2, 3] @@ -57,7 +58,7 @@ async def __anext__(self): assert not items assert values == [2, 4, 6] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def compatible_with_async_for(): async def source(): yield 1 @@ -70,7 +71,7 @@ async def source(): assert values == [2, 4, 6] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_returning_early_from_mapped_async_generator(): async def source(): yield 1 @@ -91,7 +92,7 @@ async def source(): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_returning_early_from_mapped_async_iterable(): items = [1, 2, 3] @@ -119,7 +120,7 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_throwing_errors_through_async_iterable(): items = [1, 2, 3] @@ -150,7 +151,7 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_throwing_errors_with_traceback_through_async_iterables(): class Iterable: def __aiter__(self): @@ -177,7 +178,7 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(one) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def does_not_map_over_thrown_errors(): async def source(): yield 1 @@ -192,7 +193,7 @@ async def source(): assert str(exc_info.value) == "Goodbye" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def does_not_map_over_externally_thrown_errors(): async def source(): yield 1 @@ -206,7 +207,7 @@ async def source(): assert str(exc_info.value) == "Goodbye" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterable_is_closed_when_mapped_iterable_is_closed(): class Iterable: def __init__(self): @@ -230,7 +231,7 @@ async def aclose(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterable_is_closed_on_callback_error(): class Iterable: def __init__(self): @@ -253,7 +254,7 @@ async def aclose(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterable_exits_on_callback_error(): exited = False @@ -272,7 +273,7 @@ async def iterable(): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def mapped_iterable_is_closed_when_iterable_cannot_be_closed(): class Iterable: def __aiter__(self): @@ -287,7 +288,7 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(doubles) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def ignores_that_iterable_cannot_be_closed_on_callback_error(): class Iterable: def __aiter__(self): diff --git a/tests/execution/test_middleware.py b/tests/execution/test_middleware.py index d4abba95..50159995 100644 --- a/tests/execution/test_middleware.py +++ b/tests/execution/test_middleware.py @@ -2,6 +2,7 @@ from typing import Awaitable, cast import pytest + from graphql.execution import Middleware, MiddlewareManager, execute, subscribe from graphql.language.parser import parse from graphql.type import GraphQLField, GraphQLObjectType, GraphQLSchema, GraphQLString @@ -90,7 +91,7 @@ def capitalize_middleware(next_, *args, **kwargs): assert result.data == {"first": "Eno", "second": "Owt"} # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def single_async_function(): doc = parse("{ first second }") @@ -200,7 +201,7 @@ def resolve(self, next_, *args, **kwargs): ) assert result.data == {"field": "devloseR"} # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def with_async_function_and_object(): doc = parse("{ field }") @@ -237,7 +238,7 @@ async def resolve(self, next_, *args, **kwargs): result = await awaitable_result assert result.data == {"field": "devloseR"} - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscription_simple(): async def bar_resolve(_obj, _info): yield "bar" @@ -322,7 +323,7 @@ def bad_middleware_object(): GraphQLSchema(test_type), doc, None, - middleware=cast(Middleware, {"bad": "value"}), + middleware=cast("Middleware", {"bad": "value"}), ) assert str(exc_info.value) == ( diff --git a/tests/execution/test_mutations.py b/tests/execution/test_mutations.py index 20ee1c97..b03004de 100644 --- a/tests/execution/test_mutations.py +++ b/tests/execution/test_mutations.py @@ -4,6 +4,7 @@ from typing import Any, Awaitable import pytest + from graphql.execution import ( ExperimentalIncrementalExecutionResults, execute, @@ -106,7 +107,7 @@ async def promise_to_get_the_number(holder: NumberHolder, _info) -> int: def describe_execute_handles_mutation_execution_ordering(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def evaluates_mutations_serially(): document = parse( """ @@ -154,7 +155,7 @@ def does_not_include_illegal_mutation_fields_in_output(): result = execute_sync(schema=schema, document=document) assert result == ({}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def evaluates_mutations_correctly_in_presence_of_a_failed_mutation(): document = parse( """ @@ -211,7 +212,7 @@ async def evaluates_mutations_correctly_in_presence_of_a_failed_mutation(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def mutation_fields_with_defer_do_not_block_next_mutation(): document = parse( """ @@ -241,22 +242,19 @@ async def mutation_fields_with_defer_do_not_block_next_mutation(): patches.append(patch.formatted) assert patches == [ - {"data": {"first": {}, "second": {"theNumber": 2}}, "hasNext": True}, { - "incremental": [ - { - "label": "defer-label", - "path": ["first"], - "data": { - "promiseToGetTheNumber": 2, - }, - }, - ], + "data": {"first": {}, "second": {"theNumber": 2}}, + "pending": [{"id": "0", "path": ["first"], "label": "defer-label"}], + "hasNext": True, + }, + { + "incremental": [{"id": "0", "data": {"promiseToGetTheNumber": 2}}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def mutation_inside_of_a_fragment(): document = parse( """ @@ -282,7 +280,7 @@ async def mutation_inside_of_a_fragment(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def mutation_with_defer_is_not_executed_serially(): document = parse( """ @@ -312,17 +310,14 @@ async def mutation_with_defer_is_not_executed_serially(): patches.append(patch.formatted) assert patches == [ - {"data": {"second": {"theNumber": 2}}, "hasNext": True}, { - "incremental": [ - { - "label": "defer-label", - "path": [], - "data": { - "first": {"theNumber": 1}, - }, - }, - ], + "data": {"second": {"theNumber": 2}}, + "pending": [{"id": "0", "path": [], "label": "defer-label"}], + "hasNext": True, + }, + { + "incremental": [{"id": "0", "data": {"first": {"theNumber": 1}}}], + "completed": [{"id": "0"}], "hasNext": False, }, ] diff --git a/tests/execution/test_nonnull.py b/tests/execution/test_nonnull.py index 053009a9..6c98eb67 100644 --- a/tests/execution/test_nonnull.py +++ b/tests/execution/test_nonnull.py @@ -3,6 +3,7 @@ from typing import Any, Awaitable, cast import pytest + from graphql.execution import ExecutionResult, execute, execute_sync from graphql.language import parse from graphql.pyutils import AwaitableOrValue @@ -110,7 +111,7 @@ def patch(data: str) -> str: async def execute_sync_and_async(query: str, root_value: Any) -> ExecutionResult: sync_result = execute_sync(schema, parse(query), root_value) async_result = await cast( - Awaitable[ExecutionResult], execute(schema, parse(patch(query)), root_value) + "Awaitable[ExecutionResult]", execute(schema, parse(patch(query)), root_value) ) assert repr(async_result) == patch(repr(sync_result)) @@ -125,12 +126,12 @@ def describe_nulls_a_nullable_field(): } """ - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): result = await execute_sync_and_async(query, NullingData()) assert result == ({"sync": None}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws(): result = await execute_sync_and_async(query, ThrowingData()) assert result == ( @@ -153,7 +154,7 @@ def describe_nulls_a_returned_object_that_contains_a_non_null_field(): } """ - @pytest.mark.asyncio() + @pytest.mark.asyncio async def that_returns_null(): result = await execute_sync_and_async(query, NullingData()) assert result == ( @@ -168,7 +169,7 @@ async def that_returns_null(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def that_throws(): result = await execute_sync_and_async(query, ThrowingData()) assert result == ( @@ -214,17 +215,17 @@ def describe_nulls_a_complex_tree_of_nullable_fields_each(): }, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): result = await cast( - Awaitable[ExecutionResult], execute_query(query, NullingData()) + "Awaitable[ExecutionResult]", execute_query(query, NullingData()) ) assert result == (data, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws(): result = await cast( - Awaitable[ExecutionResult], execute_query(query, ThrowingData()) + "Awaitable[ExecutionResult]", execute_query(query, ThrowingData()) ) assert result == ( data, @@ -348,10 +349,10 @@ def describe_nulls_first_nullable_after_long_chain_of_non_null_fields(): "anotherPromiseNest": None, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): result = await cast( - Awaitable[ExecutionResult], execute_query(query, NullingData()) + "Awaitable[ExecutionResult]", execute_query(query, NullingData()) ) assert result == ( data, @@ -411,10 +412,10 @@ async def returns_null(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws(): result = await cast( - Awaitable[ExecutionResult], execute_query(query, ThrowingData()) + "Awaitable[ExecutionResult]", execute_query(query, ThrowingData()) ) assert result == ( data, @@ -477,7 +478,7 @@ def describe_nulls_the_top_level_if_non_nullable_field(): } """ - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_null(): result = await execute_sync_and_async(query, NullingData()) await asyncio.sleep(0) # strangely needed to get coverage on Python 3.11 @@ -493,7 +494,7 @@ async def returns_null(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws(): result = await execute_sync_and_async(query, ThrowingData()) await asyncio.sleep(0) # strangely needed to get coverage on Python 3.11 diff --git a/tests/execution/test_oneof.py b/tests/execution/test_oneof.py new file mode 100644 index 00000000..2040b1a7 --- /dev/null +++ b/tests/execution/test_oneof.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from graphql.execution import ExecutionResult, execute +from graphql.language import parse +from graphql.utilities import build_schema + +if TYPE_CHECKING: + from graphql.pyutils import AwaitableOrValue + +schema = build_schema(""" + type Query { + test(input: TestInputObject!): TestObject + } + + input TestInputObject @oneOf { + a: String + b: Int + } + + type TestObject { + a: String + b: Int + } + """) + + +def execute_query( + query: str, root_value: Any, variable_values: dict[str, Any] | None = None +) -> AwaitableOrValue[ExecutionResult]: + return execute(schema, parse(query), root_value, variable_values=variable_values) + + +def describe_execute_handles_one_of_input_objects(): + def describe_one_of_input_objects(): + root_value = { + "test": lambda _info, input: input, # noqa: A006 + } + + def accepts_a_good_default_value(): + query = """ + query ($input: TestInputObject! = {a: "abc"}) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value) + + assert result == ({"test": {"a": "abc", "b": None}}, None) + + def rejects_a_bad_default_value(): + query = """ + query ($input: TestInputObject! = {a: "abc", b: 123}) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value) + + assert result == ( + {"test": None}, + [ + { + # This type of error would be caught at validation-time + # hence the vague error message here. + "message": "Argument 'input' of non-null type" + " 'TestInputObject!' must not be null.", + "locations": [(3, 31)], + "path": ["test"], + } + ], + ) + + def accepts_a_good_variable(): + query = """ + query ($input: TestInputObject!) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value, {"input": {"a": "abc"}}) + + assert result == ({"test": {"a": "abc", "b": None}}, None) + + def accepts_a_good_variable_with_an_undefined_key(): + query = """ + query ($input: TestInputObject!) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value, {"input": {"a": "abc"}}) + + assert result == ({"test": {"a": "abc", "b": None}}, None) + + def rejects_a_variable_with_multiple_non_null_keys(): + query = """ + query ($input: TestInputObject!) { + test(input: $input) { + a + b + } + } + """ + result = execute_query(query, root_value, {"input": {"a": "abc", "b": 123}}) + + assert result == ( + None, + [ + { + "message": "Variable '$input' got invalid value" + " {'a': 'abc', 'b': 123}; Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + "locations": [(2, 24)], + } + ], + ) + + def rejects_a_variable_with_multiple_nullable_keys(): + query = """ + query ($input: TestInputObject!) { + test(input: $input) { + a + b + } + } + """ + result = execute_query( + query, root_value, {"input": {"a": "abc", "b": None}} + ) + + assert result == ( + None, + [ + { + "message": "Variable '$input' got invalid value" + " {'a': 'abc', 'b': None}; Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + "locations": [(2, 24)], + } + ], + ) diff --git a/tests/execution/test_parallel.py b/tests/execution/test_parallel.py index faacd0c4..82b23855 100644 --- a/tests/execution/test_parallel.py +++ b/tests/execution/test_parallel.py @@ -2,6 +2,7 @@ from typing import Awaitable import pytest + from graphql.execution import execute from graphql.language import parse from graphql.type import ( @@ -10,6 +11,7 @@ GraphQLInt, GraphQLInterfaceType, GraphQLList, + GraphQLNonNull, GraphQLObjectType, GraphQLSchema, GraphQLString, @@ -31,7 +33,7 @@ async def wait(self) -> bool: def describe_parallel_execution(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_single_field(): # make sure that the special case of resolving a single field works async def resolve(*_args): @@ -52,7 +54,7 @@ async def resolve(*_args): assert result == ({"foo": True}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_fields_in_parallel(): barrier = Barrier(2) @@ -74,11 +76,11 @@ async def resolve(*_args): # raises TimeoutError if not parallel awaitable_result = execute(schema, ast) assert isinstance(awaitable_result, Awaitable) - result = await asyncio.wait_for(awaitable_result, 1.0) + result = await asyncio.wait_for(awaitable_result, 1) assert result == ({"foo": True, "bar": True}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_single_element_list(): # make sure that the special case of resolving a single element list works async def resolve(*_args): @@ -97,7 +99,7 @@ async def resolve(*_args): assert result == ({"foo": [True]}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_list_in_parallel(): barrier = Barrier(2) @@ -123,11 +125,11 @@ async def resolve_list(*args): # raises TimeoutError if not parallel awaitable_result = execute(schema, ast) assert isinstance(awaitable_result, Awaitable) - result = await asyncio.wait_for(awaitable_result, 1.0) + result = await asyncio.wait_for(awaitable_result, 1) assert result == ({"foo": [True, True]}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolve_is_type_of_in_parallel(): FooType = GraphQLInterfaceType("Foo", {"foo": GraphQLField(GraphQLString)}) @@ -186,9 +188,232 @@ async def is_type_of_baz(obj, *_args): # raises TimeoutError if not parallel awaitable_result = execute(schema, ast) assert isinstance(awaitable_result, Awaitable) - result = await asyncio.wait_for(awaitable_result, 1.0) + result = await asyncio.wait_for(awaitable_result, 1) assert result == ( {"foo": [{"foo": "bar", "foobar": 1}, {"foo": "baz", "foobaz": 2}]}, None, ) + + def describe_cancel_on_exception(): + """Tests for cancellation of parallel execution on exception. + + These tests are specifically targeted at the Python asyncio implementation. + """ + + @pytest.mark.asyncio + async def cancel_selection_sets(): + barrier = Barrier(2) + completed = False + + async def succeed(*_args): + nonlocal completed + await barrier.wait() + completed = True # pragma: no cover + + async def fail(*_args): + raise RuntimeError("Oops") + + schema = GraphQLSchema( + GraphQLObjectType( + "Query", + { + "foo": GraphQLField( + GraphQLNonNull(GraphQLBoolean), resolve=fail + ), + "bar": GraphQLField(GraphQLBoolean, resolve=succeed), + }, + ) + ) + + ast = parse("{foo, bar}") + + awaitable_result = execute(schema, ast) + assert isinstance(awaitable_result, Awaitable) + result = await asyncio.wait_for(awaitable_result, 1) + + assert result == ( + None, + [{"message": "Oops", "locations": [(1, 2)], "path": ["foo"]}], + ) + + assert not completed + + # Unblock succeed() and check that it does not complete + await barrier.wait() + await asyncio.sleep(0) + assert not completed + + @pytest.mark.asyncio + async def cancel_lists(): + barrier = Barrier(2) + completed = False + + async def succeed(*_args): + nonlocal completed + await barrier.wait() + completed = True # pragma: no cover + + async def fail(*_args): + raise RuntimeError("Oops") + + async def resolve_list(*args): + return [fail(*args), succeed(*args)] + + schema = GraphQLSchema( + GraphQLObjectType( + "Query", + { + "foo": GraphQLField( + GraphQLList(GraphQLNonNull(GraphQLBoolean)), + resolve=resolve_list, + ) + }, + ) + ) + + ast = parse("{foo}") + + awaitable_result = execute(schema, ast) + assert isinstance(awaitable_result, Awaitable) + result = await asyncio.wait_for(awaitable_result, 1) + + assert result == ( + {"foo": None}, + [{"message": "Oops", "locations": [(1, 2)], "path": ["foo", 0]}], + ) + + assert not completed + + # Unblock succeed() and check that it does not complete + await barrier.wait() + await asyncio.sleep(0) + assert not completed + + @pytest.mark.asyncio + async def cancel_async_iterators(): + barrier = Barrier(2) + completed = False + + async def succeed(*_args): + nonlocal completed + await barrier.wait() + completed = True # pragma: no cover + + async def fail(*_args): + raise RuntimeError("Oops") + + async def resolve_iterator(*args): + yield fail(*args) + yield succeed(*args) + + schema = GraphQLSchema( + GraphQLObjectType( + "Query", + { + "foo": GraphQLField( + GraphQLList(GraphQLNonNull(GraphQLBoolean)), + resolve=resolve_iterator, + ) + }, + ) + ) + + ast = parse("{foo}") + + awaitable_result = execute(schema, ast) + assert isinstance(awaitable_result, Awaitable) + result = await asyncio.wait_for(awaitable_result, 1) + + assert result == ( + {"foo": None}, + [{"message": "Oops", "locations": [(1, 2)], "path": ["foo", 0]}], + ) + + assert not completed + + # Unblock succeed() and check that it does not complete + await barrier.wait() + await asyncio.sleep(0) + assert not completed + + @pytest.mark.asyncio + async def cancel_type_resolver(): + FooType = GraphQLInterfaceType("Foo", {"foo": GraphQLField(GraphQLString)}) + + barrier = Barrier(3) + completed = False + + async def is_type_of_bar(*_args): + raise RuntimeError("Oops") + + BarType = GraphQLObjectType( + "Bar", + { + "foo": GraphQLField(GraphQLString), + }, + interfaces=[FooType], + is_type_of=is_type_of_bar, + ) + + async def is_type_of_baz(*_args): + nonlocal completed + await barrier.wait() + completed = True # pragma: no cover + + BazType = GraphQLObjectType( + "Baz", + { + "foo": GraphQLField(GraphQLString), + }, + interfaces=[FooType], + is_type_of=is_type_of_baz, + ) + + schema = GraphQLSchema( + GraphQLObjectType( + "Query", + { + "foo": GraphQLField( + GraphQLList(FooType), + resolve=lambda *_args: [ + {"foo": "bar"}, + {"foo": "baz"}, + ], + ) + }, + ), + types=[BarType, BazType], + ) + + ast = parse( + """ + { + foo { + foo + ... on Bar { foobar } + ... on Baz { foobaz } + } + } + """ + ) + + # raises TimeoutError if not parallel + awaitable_result = execute(schema, ast) + assert isinstance(awaitable_result, Awaitable) + result = await asyncio.wait_for(awaitable_result, 1) + + assert result == ( + {"foo": [None, None]}, + [ + {"message": "Oops", "locations": [(3, 17)], "path": ["foo", 0]}, + {"message": "Oops", "locations": [(3, 17)], "path": ["foo", 1]}, + ], + ) + + assert not completed + + # Unblock succeed() and check that it does not complete + await barrier.wait() + await asyncio.sleep(0) + assert not completed diff --git a/tests/execution/test_resolve.py b/tests/execution/test_resolve.py index 1c77af8b..db52d638 100644 --- a/tests/execution/test_resolve.py +++ b/tests/execution/test_resolve.py @@ -7,9 +7,11 @@ from graphql.type import ( GraphQLArgument, GraphQLField, + GraphQLID, GraphQLInputField, GraphQLInputObjectType, GraphQLInt, + GraphQLList, GraphQLObjectType, GraphQLSchema, GraphQLString, @@ -213,6 +215,91 @@ def execute_query(query: str, root_value: Any = None) -> ExecutionResult: None, ) + def transforms_default_values_using_out_names(): + # This is an extension of GraphQL.js. + resolver_kwargs: Any + + def search_resolver(_obj: None, _info, **kwargs): + nonlocal resolver_kwargs + resolver_kwargs = kwargs + return [{"id": "42"}] + + filters_type = GraphQLInputObjectType( + "SearchFilters", + {"pageSize": GraphQLInputField(GraphQLInt, out_name="page_size")}, + ) + result_type = GraphQLObjectType("SearchResult", {"id": GraphQLField(GraphQLID)}) + query = GraphQLObjectType( + "Query", + { + "search": GraphQLField( + GraphQLList(result_type), + { + "searchFilters": GraphQLArgument( + filters_type, {"pageSize": 10}, out_name="search_filters" + ) + }, + resolve=search_resolver, + ) + }, + ) + schema = GraphQLSchema(query) + + resolver_kwargs = None + result = execute_sync(schema, parse("{ search { id } }")) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 10}} + + resolver_kwargs = None + result = execute_sync( + schema, parse("{ search(searchFilters:{pageSize: 25}) { id } }") + ) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 25}} + + resolver_kwargs = None + result = execute_sync( + schema, + parse( + """ + query ($searchFilters: SearchFilters) { + search(searchFilters: $searchFilters) { id } + } + """ + ), + ) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 10}} + + resolver_kwargs = None + result = execute_sync( + schema, + parse( + """ + query ($searchFilters: SearchFilters) { + search(searchFilters: $searchFilters) { id } + } + """ + ), + variable_values={"searchFilters": {"pageSize": 25}}, + ) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 25}} + + resolver_kwargs = None + result = execute_sync( + schema, + parse( + """ + query ($searchFilters: SearchFilters = {pageSize: 25}) { + search(searchFilters: $searchFilters) { id } + } + """ + ), + ) + assert result == ({"search": [{"id": "42"}]}, None) + assert resolver_kwargs == {"search_filters": {"page_size": 25}} + def pass_error_from_resolver_wrapped_as_located_graphql_error(): def resolve(_obj, _info): raise ValueError("Some error") diff --git a/tests/execution/test_schema.py b/tests/execution/test_schema.py index a3448d89..7096c5fb 100644 --- a/tests/execution/test_schema.py +++ b/tests/execution/test_schema.py @@ -78,7 +78,7 @@ def __init__(self, id: int): # noqa: A002 "article": GraphQLField( BlogArticle, args={"id": GraphQLArgument(GraphQLID)}, - resolve=lambda _obj, _info, id: Article(id), # noqa: A002 + resolve=lambda _obj, _info, id: Article(id), # noqa: A006 ), "feed": GraphQLField( GraphQLList(BlogArticle), diff --git a/tests/execution/test_stream.py b/tests/execution/test_stream.py index 46a53b56..a52e0d87 100644 --- a/tests/execution/test_stream.py +++ b/tests/execution/test_stream.py @@ -4,6 +4,7 @@ from typing import Any, Awaitable, NamedTuple import pytest + from graphql.error import GraphQLError from graphql.execution import ( ExecutionResult, @@ -11,7 +12,7 @@ IncrementalStreamResult, experimental_execute_incrementally, ) -from graphql.execution.incremental_publisher import StreamItemsRecord +from graphql.execution.incremental_publisher import StreamRecord from graphql.language import DocumentNode, parse from graphql.pyutils import Path from graphql.type import ( @@ -28,7 +29,7 @@ anext # noqa: B018 except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator): # noqa: A001 + async def anext(iterator): """Return the next item from an async iterator.""" return await iterator.__anext__() @@ -147,51 +148,39 @@ def modified_args(args: dict[str, Any], **modifications: Any) -> dict[str, Any]: def describe_execute_stream_directive(): def can_format_and_print_incremental_stream_result(): - result = IncrementalStreamResult() - assert result.formatted == {"items": None} - assert str(result) == "IncrementalStreamResult(items=None, errors=None)" + result = IncrementalStreamResult(items=["hello", "world"], id="foo") + assert result.formatted == {"items": ["hello", "world"], "id": "foo"} + assert ( + str(result) == "IncrementalStreamResult(items=['hello', 'world'], id='foo')" + ) result = IncrementalStreamResult( items=["hello", "world"], - errors=[GraphQLError("msg")], - path=["foo", 1], - label="bar", + id="foo", + sub_path=["bar", 1], + errors=[GraphQLError("oops")], extensions={"baz": 2}, ) assert result.formatted == { "items": ["hello", "world"], - "errors": [{"message": "msg"}], + "id": "foo", + "subPath": ["bar", 1], + "errors": [{"message": "oops"}], "extensions": {"baz": 2}, - "label": "bar", - "path": ["foo", 1], } assert ( str(result) == "IncrementalStreamResult(items=['hello', 'world']," - " errors=[GraphQLError('msg')], path=['foo', 1], label='bar'," + " id='foo', sub_path=['bar', 1], errors=[GraphQLError('oops')]," " extensions={'baz': 2})" ) - def can_print_stream_record(): - record = StreamItemsRecord(None, None, None, None) - assert str(record) == "StreamItemsRecord(path=[])" - record = StreamItemsRecord("foo", Path(None, "bar", "Bar"), record, None) - assert ( - str(record) == "StreamItemsRecord(" - "path=['bar'], label='foo', parent_context)" - ) - record.items = ["hello", "world"] - assert ( - str(record) == "StreamItemsRecord(" - "path=['bar'], label='foo', parent_context, items)" - ) - # noinspection PyTypeChecker def can_compare_incremental_stream_result(): args: dict[str, Any] = { "items": ["hello", "world"], - "errors": [GraphQLError("msg")], - "path": ["foo", 1], - "label": "bar", + "id": "foo", + "sub_path": ["bar", 1], + "errors": [GraphQLError("oops")], "extensions": {"baz": 2}, } result = IncrementalStreamResult(**args) @@ -199,9 +188,11 @@ def can_compare_incremental_stream_result(): assert result != IncrementalStreamResult( **modified_args(args, items=["hello", "foo"]) ) + assert result != IncrementalStreamResult(**modified_args(args, id="bar")) + assert result != IncrementalStreamResult( + **modified_args(args, sub_path=["bar", 2]) + ) assert result != IncrementalStreamResult(**modified_args(args, errors=[])) - assert result != IncrementalStreamResult(**modified_args(args, path=["foo", 2])) - assert result != IncrementalStreamResult(**modified_args(args, label="baz")) assert result != IncrementalStreamResult( **modified_args(args, extensions={"baz": 1}) ) @@ -210,14 +201,22 @@ def can_compare_incremental_stream_result(): assert result == tuple(args.values())[:3] assert result == tuple(args.values())[:2] assert result != tuple(args.values())[:1] - assert result != (["hello", "world"], []) + assert result != (["hello", "world"], "bar") + args["subPath"] = args.pop("sub_path") assert result == args - assert result == dict(list(args.items())[:2]) - assert result == dict(list(args.items())[:3]) - assert result != dict(list(args.items())[:2] + [("path", ["foo", 2])]) - assert result != {**args, "label": "baz"} + assert result != {**args, "items": ["hello", "foo"]} + assert result != {**args, "id": "bar"} + assert result != {**args, "subPath": ["bar", 2]} + assert result != {**args, "errors": []} + assert result != {**args, "extensions": {"baz": 1}} + + def can_print_stream_record(): + record = StreamRecord(Path(None, 0, None)) + assert str(record) == "StreamRecord(path=[0])" + record = StreamRecord(Path(None, "bar", "Bar"), "foo") + assert str(record) == "StreamRecord(path=['bar'], label='foo')" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_list_field(): document = parse("{ scalarList @stream(initialCount: 1) }") result = await complete( @@ -225,22 +224,19 @@ async def can_stream_a_list_field(): ) assert result == [ { - "data": { - "scalarList": ["apple"], - }, - "hasNext": True, - }, - { - "incremental": [{"items": ["banana"], "path": ["scalarList", 1]}], + "data": {"scalarList": ["apple"]}, + "pending": [{"id": "0", "path": ["scalarList"]}], "hasNext": True, }, + {"incremental": [{"items": ["banana"], "id": "0"}], "hasNext": True}, { - "incremental": [{"items": ["coconut"], "path": ["scalarList", 2]}], + "incremental": [{"items": ["coconut"], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_use_default_value_of_initial_count(): document = parse("{ scalarList @stream }") result = await complete( @@ -248,35 +244,27 @@ async def can_use_default_value_of_initial_count(): ) assert result == [ { - "data": { - "scalarList": [], - }, - "hasNext": True, - }, - { - "incremental": [{"items": ["apple"], "path": ["scalarList", 0]}], + "data": {"scalarList": []}, + "pending": [{"id": "0", "path": ["scalarList"]}], "hasNext": True, }, + {"incremental": [{"items": ["apple"], "id": "0"}], "hasNext": True}, + {"incremental": [{"items": ["banana"], "id": "0"}], "hasNext": True}, { - "incremental": [{"items": ["banana"], "path": ["scalarList", 1]}], - "hasNext": True, - }, - { - "incremental": [{"items": ["coconut"], "path": ["scalarList", 2]}], + "incremental": [{"items": ["coconut"], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def negative_values_of_initial_count_throw_field_errors(): document = parse("{ scalarList @stream(initialCount: -2) }") result = await complete( document, {"scalarList": ["apple", "banana", "coconut"]} ) assert result == { - "data": { - "scalarList": None, - }, + "data": {"scalarList": None}, "errors": [ { "message": "initialCount must be a positive integer", @@ -286,14 +274,12 @@ async def negative_values_of_initial_count_throw_field_errors(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def non_integer_values_of_initial_count_throw_field_errors(): document = parse("{ scalarList @stream(initialCount: 1.5) }") result = await complete(document, {"scalarList": ["apple", "half of a banana"]}) assert result == { - "data": { - "scalarList": None, - }, + "data": {"scalarList": None}, "errors": [ { "message": "Argument 'initialCount' has invalid value 1.5.", @@ -303,7 +289,7 @@ async def non_integer_values_of_initial_count_throw_field_errors(): ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_label_from_stream_directive(): document = parse( '{ scalarList @stream(initialCount: 1, label: "scalar-stream") }' @@ -313,34 +299,21 @@ async def returns_label_from_stream_directive(): ) assert result == [ { - "data": { - "scalarList": ["apple"], - }, - "hasNext": True, - }, - { - "incremental": [ - { - "items": ["banana"], - "path": ["scalarList", 1], - "label": "scalar-stream", - } + "data": {"scalarList": ["apple"]}, + "pending": [ + {"id": "0", "path": ["scalarList"], "label": "scalar-stream"} ], "hasNext": True, }, + {"incremental": [{"items": ["banana"], "id": "0"}], "hasNext": True}, { - "incremental": [ - { - "items": ["coconut"], - "path": ["scalarList", 2], - "label": "scalar-stream", - } - ], + "incremental": [{"items": ["coconut"], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws_an_error_for_stream_directive_with_non_string_label(): document = parse("{ scalarList @stream(initialCount: 1, label: 42) }") result = await complete(document, {"scalarList": ["some apples"]}) @@ -348,32 +321,22 @@ async def throws_an_error_for_stream_directive_with_non_string_label(): "data": {"scalarList": None}, "errors": [ { - "locations": [ - { - "line": 1, - "column": 46, - } - ], + "locations": [{"line": 1, "column": 46}], "message": "Argument 'label' has invalid value 42.", "path": ["scalarList"], } ], } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_disable_stream_using_if_argument(): document = parse("{ scalarList @stream(initialCount: 0, if: false) }") result = await complete( document, {"scalarList": ["apple", "banana", "coconut"]} ) - assert result == { - "data": { - "scalarList": ["apple", "banana", "coconut"], - }, - } + assert result == {"data": {"scalarList": ["apple", "banana", "coconut"]}} - @pytest.mark.asyncio() - @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") + @pytest.mark.asyncio async def does_not_disable_stream_with_null_if_argument(): document = parse( "query ($shouldStream: Boolean)" @@ -384,23 +347,18 @@ async def does_not_disable_stream_with_null_if_argument(): ) assert result == [ { - "data": { - "scalarList": ["apple", "banana"], - }, + "data": {"scalarList": ["apple", "banana"]}, + "pending": [{"id": "0", "path": ["scalarList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": ["coconut"], - "path": ["scalarList", 2], - } - ], + "incremental": [{"items": ["coconut"], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_multi_dimensional_lists(): document = parse("{ scalarListList @stream(initialCount: 1) }") result = await complete( @@ -415,32 +373,24 @@ async def can_stream_multi_dimensional_lists(): ) assert result == [ { - "data": { - "scalarListList": [["apple", "apple", "apple"]], - }, + "data": {"scalarListList": [["apple", "apple", "apple"]]}, + "pending": [{"id": "0", "path": ["scalarListList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [["banana", "banana", "banana"]], - "path": ["scalarListList", 1], - } - ], + "incremental": [{"items": [["banana", "banana", "banana"]], "id": "0"}], "hasNext": True, }, { "incremental": [ - { - "items": [["coconut", "coconut", "coconut"]], - "path": ["scalarListList", 2], - } + {"items": [["coconut", "coconut", "coconut"]], "id": "0"} ], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_field_that_returns_a_list_of_awaitables(): document = parse( """ @@ -454,7 +404,6 @@ async def can_stream_a_field_that_returns_a_list_of_awaitables(): ) async def await_friend(f): - await sleep(0) return f result = await complete( @@ -469,20 +418,17 @@ async def await_friend(f): {"name": "Han", "id": "2"}, ], }, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_in_correct_order_with_list_of_awaitables(): document = parse( """ @@ -496,7 +442,6 @@ async def can_stream_in_correct_order_with_list_of_awaitables(): ) async def await_friend(f): - await sleep(0) return f result = await complete( @@ -506,38 +451,25 @@ async def await_friend(f): assert result == [ { "data": {"friendList": []}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Luke", "id": "1"}], - "path": ["friendList", 0], - } - ], + "incremental": [{"items": [{"name": "Luke", "id": "1"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Han", "id": "2"}], - "path": ["friendList", 1], - } - ], + "incremental": [{"items": [{"name": "Han", "id": "2"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_field_that_returns_a_list_with_nested_async_fields(): document = parse( """ @@ -572,20 +504,17 @@ async def get_id(f): {"name": "Han", "id": "2"}, ] }, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_in_list_of_awaitables_before_initial_count_reached(): document = parse( """ @@ -599,7 +528,6 @@ async def handles_error_in_list_of_awaitables_before_initial_count_reached(): ) async def await_friend(f, i): - await sleep(0) if i == 1: raise RuntimeError("bad") return f @@ -622,20 +550,17 @@ async def await_friend(f, i): "path": ["friendList", 1], } ], + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_in_list_of_awaitables_after_initial_count_reached(): document = parse( """ @@ -649,7 +574,6 @@ async def handles_error_in_list_of_awaitables_after_initial_count_reached(): ) async def await_friend(f, i): - await sleep(0) if i == 1: raise RuntimeError("bad") return f @@ -665,13 +589,14 @@ async def await_friend(f, i): assert result == [ { "data": {"friendList": [{"name": "Luke", "id": "1"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "bad", @@ -684,17 +609,13 @@ async def await_friend(f, i): "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_field_that_returns_an_async_iterable(): document = parse( """ @@ -709,48 +630,31 @@ async def can_stream_a_field_that_returns_an_async_iterable(): async def friend_list(_info): for i in range(3): - await sleep(0) yield friends[i] result = await complete(document, {"friendList": friend_list}) assert result == [ { "data": {"friendList": []}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Luke", "id": "1"}], - "path": ["friendList", 0], - } - ], + "incremental": [{"items": [{"name": "Luke", "id": "1"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Han", "id": "2"}], - "path": ["friendList", 1], - } - ], + "incremental": [{"items": [{"name": "Han", "id": "2"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], "hasNext": True, }, - { - "hasNext": False, - }, + {"completed": [{"id": "0"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_stream_a_field_that_returns_an_async_iterable_with_initial_count(): document = parse( """ @@ -765,7 +669,6 @@ async def can_stream_a_field_that_returns_an_async_iterable_with_initial_count() async def friend_list(_info): for i in range(3): - await sleep(0) yield friends[i] result = await complete(document, {"friendList": friend_list}) @@ -777,23 +680,17 @@ async def friend_list(_info): {"name": "Han", "id": "2"}, ] }, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"name": "Leia", "id": "3"}], "id": "0"}], "hasNext": True, }, - { - "hasNext": False, - }, + {"completed": [{"id": "0"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def negative_initial_count_throw_error_on_field_returning_async_iterable(): document = parse( """ @@ -821,7 +718,7 @@ async def friend_list(_info): "data": {"friendList": None}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_handle_concurrent_calls_to_next_without_waiting(): document = parse( """ @@ -836,7 +733,6 @@ async def can_handle_concurrent_calls_to_next_without_waiting(): async def friend_list(_info): for i in range(3): - await sleep(0) yield friends[i] result = await complete_async(document, 3, {"friendList": friend_list}) @@ -850,6 +746,7 @@ async def friend_list(_info): {"name": "Han", "id": "2"}, ] }, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, }, @@ -857,19 +754,19 @@ async def friend_list(_info): "done": False, "value": { "incremental": [ - { - "items": [{"name": "Leia", "id": "3"}], - "path": ["friendList", 2], - } + {"items": [{"name": "Leia", "id": "3"}], "id": "0"} ], "hasNext": True, }, }, - {"done": False, "value": {"hasNext": False}}, + { + "done": False, + "value": {"completed": [{"id": "0"}], "hasNext": False}, + }, {"done": True, "value": None}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_in_async_iterable_before_initial_count_is_reached(): document = parse( """ @@ -883,9 +780,7 @@ async def handles_error_in_async_iterable_before_initial_count_is_reached(): ) async def friend_list(_info): - await sleep(0) yield friends[0] - await sleep(0) raise RuntimeError("bad") result = await complete(document, {"friendList": friend_list}) @@ -900,7 +795,7 @@ async def friend_list(_info): "data": {"friendList": None}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_in_async_iterable_after_initial_count_is_reached(): document = parse( """ @@ -914,24 +809,20 @@ async def handles_error_in_async_iterable_after_initial_count_is_reached(): ) async def friend_list(_info): - await sleep(0) yield friends[0] - await sleep(0) raise RuntimeError("bad") result = await complete(document, {"friendList": friend_list}) assert result == [ { - "data": { - "friendList": [{"name": "Luke", "id": "1"}], - }, + "data": {"friendList": [{"name": "Luke", "id": "1"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "bad", @@ -945,7 +836,7 @@ async def friend_list(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_null_for_non_null_list_items_after_initial_count_is_reached(): document = parse( """ @@ -962,16 +853,14 @@ async def handles_null_for_non_null_list_items_after_initial_count_is_reached(): ) assert result == [ { - "data": { - "nonNullFriendList": [{"name": "Luke"}], - }, + "data": {"nonNullFriendList": [{"name": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -986,7 +875,7 @@ async def handles_null_for_non_null_list_items_after_initial_count_is_reached(): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_null_for_non_null_async_items_after_initial_count_is_reached(): document = parse( """ @@ -1000,9 +889,7 @@ async def handles_null_for_non_null_async_items_after_initial_count_is_reached() async def friend_list(_info): try: - await sleep(0) yield friends[0] - await sleep(0) yield None finally: raise RuntimeError("Oops") @@ -1010,16 +897,14 @@ async def friend_list(_info): result = await complete(document, {"nonNullFriendList": friend_list}) assert result == [ { - "data": { - "nonNullFriendList": [{"name": "Luke"}], - }, + "data": {"nonNullFriendList": [{"name": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -1034,7 +919,7 @@ async def friend_list(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_error_thrown_in_complete_value_after_initial_count_is_reached(): document = parse( """ @@ -1050,16 +935,15 @@ async def scalar_list(_info): result = await complete(document, {"scalarList": scalar_list}) assert result == [ { - "data": { - "scalarList": ["Luke"], - }, + "data": {"scalarList": ["Luke"]}, + "pending": [{"id": "0", "path": ["scalarList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["scalarList", 1], + "id": "0", "errors": [ { "message": "String cannot represent value: {}", @@ -1069,11 +953,12 @@ async def scalar_list(_info): ], }, ], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_error_in_complete_value_after_initial_count_is_reached(): document = parse( """ @@ -1089,7 +974,6 @@ async def throw(): raise RuntimeError("Oops") async def get_friend(i): - await sleep(0) return {"nonNullName": throw() if i < 0 else friends[i].name} def get_friends(_info): @@ -1103,16 +987,15 @@ def get_friends(_info): ) assert result == [ { - "data": { - "friendList": [{"nonNullName": "Luke"}], - }, + "data": {"friendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1125,17 +1008,13 @@ def get_friends(_info): "hasNext": True, }, { - "incremental": [ - { - "items": [{"nonNullName": "Han"}], - "path": ["friendList", 2], - }, - ], + "incremental": [{"items": [{"nonNullName": "Han"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_nested_async_error_in_complete_value_after_initial_count(): document = parse( """ @@ -1148,7 +1027,6 @@ async def handles_nested_async_error_in_complete_value_after_initial_count(): ) async def get_friend_name(i): - await sleep(0) if i < 0: raise RuntimeError("Oops") return friends[i].name @@ -1164,16 +1042,15 @@ def get_friends(_info): ) assert result == [ { - "data": { - "friendList": [{"nonNullName": "Luke"}], - }, + "data": {"friendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1186,17 +1063,13 @@ def get_friends(_info): "hasNext": True, }, { - "incremental": [ - { - "items": [{"nonNullName": "Han"}], - "path": ["friendList", 2], - } - ], + "incremental": [{"items": [{"nonNullName": "Han"}], "id": "0"}], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_error_in_complete_value_after_initial_count_non_null(): document = parse( """ @@ -1212,7 +1085,6 @@ async def throw(): raise RuntimeError("Oops") async def get_friend(i): - await sleep(0) return {"nonNullName": throw() if i < 0 else friends[i].name} def get_friends(_info): @@ -1226,16 +1098,14 @@ def get_friends(_info): ) assert result == [ { - "data": { - "nonNullFriendList": [{"nonNullName": "Luke"}], - }, + "data": {"nonNullFriendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1249,7 +1119,7 @@ def get_friends(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_nested_async_error_in_complete_value_after_initial_non_null(): document = parse( """ @@ -1262,7 +1132,6 @@ async def handles_nested_async_error_in_complete_value_after_initial_non_null(): ) async def get_friend_name(i): - await sleep(0) if i < 0: raise RuntimeError("Oops") return friends[i].name @@ -1281,13 +1150,13 @@ def get_friends(_info): "data": { "nonNullFriendList": [{"nonNullName": "Luke"}], }, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1301,7 +1170,7 @@ def get_friends(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_async_error_in_complete_value_after_initial_from_async_iterable(): document = parse( """ @@ -1317,7 +1186,6 @@ async def throw(): raise RuntimeError("Oops") async def get_friend(i): - await sleep(0) return {"nonNullName": throw() if i < 0 else friends[i].name} async def get_friends(_info): @@ -1332,16 +1200,15 @@ async def get_friends(_info): ) assert result == [ { - "data": { - "friendList": [{"nonNullName": "Luke"}], - }, + "data": {"friendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1354,21 +1221,14 @@ async def get_friends(_info): "hasNext": True, }, { - "incremental": [ - { - "items": [{"nonNullName": "Han"}], - "path": ["friendList", 2], - }, - ], + "incremental": [{"items": [{"nonNullName": "Han"}], "id": "0"}], "hasNext": True, }, - { - "hasNext": False, - }, + {"completed": [{"id": "0"}], "hasNext": False}, ] - @pytest.mark.asyncio() - async def handles_async_error_in_complete_value_from_async_iterable_non_null(): + @pytest.mark.asyncio + async def handles_async_error_in_complete_value_from_async_generator_non_null(): document = parse( """ query { @@ -1383,7 +1243,6 @@ async def throw(): raise RuntimeError("Oops") async def get_friend(i): - await sleep(0) return {"nonNullName": throw() if i < 0 else friends[i].name} async def get_friends(_info): @@ -1392,22 +1251,79 @@ async def get_friends(_info): result = await complete( document, + {"nonNullFriendList": get_friends}, + ) + assert result == [ { - "nonNullFriendList": get_friends, + "data": {"nonNullFriendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], + "hasNext": True, }, + { + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Oops", + "locations": [{"line": 4, "column": 17}], + "path": ["nonNullFriendList", 1, "nonNullName"], + }, + ], + }, + ], + "hasNext": False, + }, + ] + + @pytest.mark.asyncio + async def handles_async_errors_in_complete_value_after_initial_count_no_aclose(): + # Handles async errors thrown by complete_value after initialCount is reached + # from async iterable for a non-nullable list when the async iterable does + # not provide an aclose method. + document = parse( + """ + query { + nonNullFriendList @stream(initialCount: 1) { + nonNullName + } + } + """ ) + + async def throw(): + raise RuntimeError("Oops") + + class AsyncIterableWithoutAclose: + def __init__(self): + self.count = 0 + + def __aiter__(self): + return self + + async def __anext__(self): + count = self.count + self.count += 1 + if count == 1: + name = throw() + else: + if count: + count -= 1 # pragma: no cover + name = friends[count].name + return {"nonNullName": name} + + async_iterable = AsyncIterableWithoutAclose() + result = await complete(document, {"nonNullFriendList": async_iterable}) assert result == [ { - "data": { - "nonNullFriendList": [{"nonNullName": "Luke"}], - }, + "data": {"nonNullFriendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], "hasNext": True, }, { - "incremental": [ + "completed": [ { - "items": None, - "path": ["nonNullFriendList", 1], + "id": "0", "errors": [ { "message": "Oops", @@ -1421,7 +1337,76 @@ async def get_friends(_info): }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio + async def handles_async_errors_in_complete_value_after_initial_count_slow_aclose(): + # Handles async errors thrown by completeValue after initialCount is reached + # from async iterable for a non-nullable list when the async iterable provides + # concurrent next/return methods and has a slow aclose() + document = parse( + """ + query { + nonNullFriendList @stream(initialCount: 1) { + nonNullName + } + } + """ + ) + + async def throw(): + raise RuntimeError("Oops") + + class AsyncIterableWithSlowAclose: + def __init__(self): + self.count = 0 + self.finished = False + + def __aiter__(self): + return self + + async def __anext__(self): + if self.finished: + raise StopAsyncIteration # pragma: no cover + count = self.count + self.count += 1 + if count == 1: + name = throw() + else: + if count: + count -= 1 # pragma: no cover + name = friends[count].name + return {"nonNullName": name} + + async def aclose(self): + await sleep(0) + self.finished = True + + async_iterable = AsyncIterableWithSlowAclose() + result = await complete(document, {"nonNullFriendList": async_iterable}) + assert result == [ + { + "data": {"nonNullFriendList": [{"nonNullName": "Luke"}]}, + "pending": [{"id": "0", "path": ["nonNullFriendList"]}], + "hasNext": True, + }, + { + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Oops", + "locations": [{"line": 4, "column": 17}], + "path": ["nonNullFriendList", 1, "nonNullName"], + }, + ], + }, + ], + "hasNext": False, + }, + ] + assert async_iterable.finished + + @pytest.mark.asyncio async def filters_payloads_that_are_nulled(): document = parse( """ @@ -1437,10 +1422,9 @@ async def filters_payloads_that_are_nulled(): ) async def resolve_null(_info): - await sleep(0) + return None async def friend_list(_info): - await sleep(0) yield friends[0] result = await complete( @@ -1458,21 +1442,14 @@ async def friend_list(_info): { "message": "Cannot return null for non-nullable field" " NestedObject.nonNullScalarField.", - "locations": [ - { - "line": 4, - "column": 17, - } - ], + "locations": [{"line": 4, "column": 17}], "path": ["nestedObject", "nonNullScalarField"], }, ], - "data": { - "nestedObject": None, - }, + "data": {"nestedObject": None}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def filters_payloads_that_are_nulled_by_a_later_synchronous_error(): document = parse( """ @@ -1488,7 +1465,6 @@ async def filters_payloads_that_are_nulled_by_a_later_synchronous_error(): ) async def friend_list(_info): - await sleep(0) # pragma: no cover yield friends[0] # pragma: no cover result = await complete( @@ -1510,12 +1486,10 @@ async def friend_list(_info): "path": ["nestedObject", "nonNullScalarField"], }, ], - "data": { - "nestedObject": None, - }, + "data": {"nestedObject": None}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def does_not_filter_payloads_when_null_error_is_in_a_different_path(): document = parse( @@ -1536,11 +1510,9 @@ async def does_not_filter_payloads_when_null_error_is_in_a_different_path(): ) async def error_field(_info): - await sleep(0) raise RuntimeError("Oops") async def friend_list(_info): - await sleep(0) yield friends[0] result = await complete( @@ -1559,13 +1531,17 @@ async def friend_list(_info): "otherNestedObject": {}, "nestedObject": {"nestedFriendList": []}, }, + "pending": [ + {"id": "0", "path": ["otherNestedObject"]}, + {"id": "1", "path": ["nestedObject", "nestedFriendList"]}, + ], "hasNext": True, }, { "incremental": [ { "data": {"scalarField": None}, - "path": ["otherNestedObject"], + "id": "0", "errors": [ { "message": "Oops", @@ -1574,17 +1550,15 @@ async def friend_list(_info): }, ], }, - { - "items": [{"name": "Luke"}], - "path": ["nestedObject", "nestedFriendList", 0], - }, + {"items": [{"name": "Luke"}], "id": "1"}, ], + "completed": [{"id": "0"}], "hasNext": True, }, - {"hasNext": False}, + {"completed": [{"id": "1"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def filters_stream_payloads_that_are_nulled_in_a_deferred_payload(): document = parse( @@ -1605,10 +1579,9 @@ async def filters_stream_payloads_that_are_nulled_in_a_deferred_payload(): ) async def resolve_null(_info): - await sleep(0) + return None async def friend_list(_info): - await sleep(0) yield friends[0] result = await complete( @@ -1625,18 +1598,15 @@ async def friend_list(_info): assert result == [ { - "data": { - "nestedObject": {}, - }, + "data": {"nestedObject": {}}, + "pending": [{"id": "0", "path": ["nestedObject"]}], "hasNext": True, }, { "incremental": [ { - "data": { - "deeperNestedObject": None, - }, - "path": ["nestedObject"], + "data": {"deeperNestedObject": None}, + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -1651,11 +1621,12 @@ async def friend_list(_info): ], }, ], + "completed": [{"id": "0"}], "hasNext": False, }, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def filters_defer_payloads_that_are_nulled_in_a_stream_response(): document = parse( """ @@ -1671,33 +1642,30 @@ async def filters_defer_payloads_that_are_nulled_in_a_stream_response(): ) async def resolve_null(_info): - await sleep(0) + return None async def friend(): - await sleep(0) return { "name": friends[0].name, "nonNullName": resolve_null, } async def friend_list(_info): - await sleep(0) yield await friend() result = await complete(document, {"friendList": friend_list}) assert result == [ { - "data": { - "friendList": [], - }, + "data": {"friendList": []}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { "incremental": [ { "items": [None], - "path": ["friendList", 0], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -1710,26 +1678,23 @@ async def friend_list(_info): ], "hasNext": True, }, - { - "hasNext": False, - }, + {"completed": [{"id": "0"}], "hasNext": False}, ] @pytest.mark.timeout(1) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_iterator_and_ignores_error_when_stream_payloads_are_filtered(): finished = False async def resolve_null(_info): - await sleep(0) + return None async def iterable(_info): nonlocal finished for i in range(3): - await sleep(0) friend = friends[i] yield {"name": friend.name, "nonNullName": None} - finished = True # pragma: no cover + finished = True document = parse( """ @@ -1765,14 +1730,20 @@ async def iterable(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"nestedObject": {}}, "hasNext": True} + assert result1 == { + "data": {"nestedObject": {}}, + "pending": [{"id": "0", "path": ["nestedObject"]}], + "hasNext": True, + } + + assert not finished result2 = await anext(iterator) assert result2.formatted == { "incremental": [ { "data": {"deeperNestedObject": None}, - "path": ["nestedObject"], + "id": "0", "errors": [ { "message": "Cannot return null for non-nullable field" @@ -1787,15 +1758,16 @@ async def iterable(_info): ], }, ], + "completed": [{"id": "0"}], "hasNext": False, } with pytest.raises(StopAsyncIteration): await anext(iterator) - assert not finished # running iterator cannot be canceled + assert finished - @pytest.mark.asyncio() + @pytest.mark.asyncio async def handles_awaitables_from_complete_value_after_initial_count_is_reached(): document = parse( """ @@ -1809,11 +1781,9 @@ async def handles_awaitables_from_complete_value_after_initial_count_is_reached( ) async def get_friend_name(i): - await sleep(0) return friends[i].name async def get_friend(i): - await sleep(0) if i < 2: return friends[i] return {"id": friends[2].id, "name": get_friend_name(i)} @@ -1830,35 +1800,76 @@ async def get_friends(_info): ) assert result == [ { - "data": { - "friendList": [{"id": "1", "name": "Luke"}], - }, + "data": {"friendList": [{"id": "1", "name": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"id": "2", "name": "Han"}], - "path": ["friendList", 1], - } - ], + "incremental": [{"items": [{"id": "2", "name": "Han"}], "id": "0"}], "hasNext": True, }, { - "incremental": [ - { - "items": [{"id": "3", "name": "Leia"}], - "path": ["friendList", 2], - } + "incremental": [{"items": [{"id": "3", "name": "Leia"}], "id": "0"}], + "hasNext": True, + }, + {"completed": [{"id": "0"}], "hasNext": False}, + ] + + @pytest.mark.asyncio + async def handles_overlapping_deferred_and_non_deferred_streams(): + document = parse( + """ + query { + nestedObject { + nestedFriendList @stream(initialCount: 0) { + id + } + } + nestedObject { + ... @defer { + nestedFriendList @stream(initialCount: 0) { + id + name + } + } + } + } + """ + ) + + async def get_nested_friend_list(_info): + for i in range(2): + yield friends[i] + + result = await complete( + document, + { + "nestedObject": { + "nestedFriendList": get_nested_friend_list, + } + }, + ) + + assert result == [ + { + "data": {"nestedObject": {"nestedFriendList": []}}, + "pending": [ + {"id": "0", "path": ["nestedObject", "nestedFriendList"]}, ], "hasNext": True, }, { - "hasNext": False, + "incremental": [{"items": [{"id": "1", "name": "Luke"}], "id": "0"}], + "hasNext": True, + }, + { + "incremental": [{"items": [{"id": "2", "name": "Han"}], "id": "0"}], + "hasNext": True, }, + {"completed": [{"id": "0"}], "hasNext": False}, ] - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_payloads_properly_when_parent_deferred_slower_than_stream(): resolve_slow_field = Event() @@ -1884,7 +1895,6 @@ async def slow_field(_info): async def get_friends(_info): for i in range(2): - await sleep(0) yield friends[i] execute_result = experimental_execute_incrementally( @@ -1902,49 +1912,40 @@ async def get_friends(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"nestedObject": {}}, "hasNext": True} + assert result1 == { + "data": {"nestedObject": {}}, + "pending": [{"id": "0", "path": ["nestedObject"]}], + "hasNext": True, + } resolve_slow_field.set() result2 = await anext(iterator) assert result2.formatted == { + "pending": [{"id": "1", "path": ["nestedObject", "nestedFriendList"]}], "incremental": [ - { - "data": {"scalarField": "slow", "nestedFriendList": []}, - "path": ["nestedObject"], - }, + {"data": {"scalarField": "slow", "nestedFriendList": []}, "id": "0"}, ], + "completed": [{"id": "0"}], "hasNext": True, } result3 = await anext(iterator) assert result3.formatted == { - "incremental": [ - { - "items": [{"name": "Luke"}], - "path": ["nestedObject", "nestedFriendList", 0], - }, - ], + "incremental": [{"items": [{"name": "Luke"}], "id": "1"}], "hasNext": True, } result4 = await anext(iterator) assert result4.formatted == { - "incremental": [ - { - "items": [{"name": "Han"}], - "path": ["nestedObject", "nestedFriendList", 1], - }, - ], + "incremental": [{"items": [{"name": "Han"}], "id": "1"}], "hasNext": True, } result5 = await anext(iterator) - assert result5.formatted == { - "hasNext": False, - } + assert result5.formatted == {"completed": [{"id": "1"}], "hasNext": False} with pytest.raises(StopAsyncIteration): await anext(iterator) @pytest.mark.timeout(1) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fields_that_are_resolved_after_async_iterable_is_complete(): resolve_slow_field = Event() resolve_iterable = Event() @@ -1968,9 +1969,7 @@ async def slow_field(_info): ) async def get_friends(_info): - await sleep(0) yield friends[0] - await sleep(0) yield {"id": friends[1].id, "name": slow_field} await resolve_iterable.wait() @@ -1986,43 +1985,44 @@ async def get_friends(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"friendList": [{"id": "1"}]}, "hasNext": True} + assert result1 == { + "data": {"friendList": [{"id": "1"}]}, + "pending": [ + {"id": "0", "path": ["friendList", 0], "label": "DeferName"}, + {"id": "1", "path": ["friendList"], "label": "stream-label"}, + ], + "hasNext": True, + } resolve_iterable.set() result2 = await anext(iterator) assert result2.formatted == { + "pending": [{"id": "2", "path": ["friendList", 1], "label": "DeferName"}], "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["friendList", 0], - "label": "DeferName", - }, - { - "items": [{"id": "2"}], - "path": ["friendList", 1], - "label": "stream-label", - }, + {"data": {"name": "Luke"}, "id": "0"}, + {"items": [{"id": "2"}], "id": "1"}, ], + "completed": [{"id": "0"}], "hasNext": True, } resolve_slow_field.set() result3 = await anext(iterator) assert result3.formatted == { - "incremental": [ - { - "data": {"name": "Han"}, - "path": ["friendList", 1], - "label": "DeferName", - }, - ], + "completed": [{"id": "1"}], + "hasNext": True, + } + result4 = await anext(iterator) + assert result4.formatted == { + "incremental": [{"data": {"name": "Han"}, "id": "2"}], + "completed": [{"id": "2"}], "hasNext": False, } with pytest.raises(StopAsyncIteration): await anext(iterator) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def can_defer_fields_that_are_resolved_before_async_iterable_is_complete(): resolve_slow_field = Event() resolve_iterable = Event() @@ -2046,11 +2046,8 @@ async def slow_field(_info): ) async def get_friends(_info): - await sleep(0) yield friends[0] - await sleep(0) yield {"id": friends[1].id, "name": slow_field} - await sleep(0) await resolve_iterable.wait() execute_result = await experimental_execute_incrementally( # type: ignore @@ -2065,55 +2062,53 @@ async def get_friends(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"friendList": [{"id": "1"}]}, "hasNext": True} + assert result1 == { + "data": {"friendList": [{"id": "1"}]}, + "pending": [ + {"id": "0", "path": ["friendList", 0], "label": "DeferName"}, + {"id": "1", "path": ["friendList"], "label": "stream-label"}, + ], + "hasNext": True, + } resolve_slow_field.set() result2 = await anext(iterator) assert result2.formatted == { + "pending": [{"id": "2", "path": ["friendList", 1], "label": "DeferName"}], "incremental": [ - { - "data": {"name": "Luke"}, - "path": ["friendList", 0], - "label": "DeferName", - }, - { - "items": [{"id": "2"}], - "path": ["friendList", 1], - "label": "stream-label", - }, + {"data": {"name": "Luke"}, "id": "0"}, + {"items": [{"id": "2"}], "id": "1"}, ], + "completed": [{"id": "0"}], "hasNext": True, } result3 = await anext(iterator) assert result3.formatted == { "incremental": [ - { - "data": {"name": "Han"}, - "path": ["friendList", 1], - "label": "DeferName", - }, + {"data": {"name": "Han"}, "id": "2"}, ], + "completed": [{"id": "2"}], "hasNext": True, } resolve_iterable.set() result4 = await anext(iterator) assert result4.formatted == { + "completed": [{"id": "1"}], "hasNext": False, } with pytest.raises(StopAsyncIteration): await anext(iterator) - @pytest.mark.asyncio() - async def finishes_async_iterable_when_returned_generator_is_closed(): + @pytest.mark.asyncio + async def finishes_async_iterable_when_finished_generator_is_closed(): finished = False async def iterable(_info): nonlocal finished for i in range(3): - await sleep(0) yield friends[i] finished = True @@ -2137,16 +2132,22 @@ async def iterable(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"friendList": [{"id": "1"}]}, "hasNext": True} + assert result1 == { + "data": {"friendList": [{"id": "1"}]}, + "pending": [ + {"id": "0", "path": ["friendList", 0]}, + {"id": "1", "path": ["friendList"]}, + ], + "hasNext": True, + } await iterator.aclose() with pytest.raises(StopAsyncIteration): await anext(iterator) - await sleep(0) assert finished - @pytest.mark.asyncio() + @pytest.mark.asyncio async def finishes_async_iterable_when_underlying_iterator_has_no_close_method(): class Iterable: def __init__(self): @@ -2156,7 +2157,6 @@ def __aiter__(self): return self async def __anext__(self): - await sleep(0) index = self.index self.index = index + 1 try: @@ -2186,6 +2186,7 @@ async def __anext__(self): result1 = execute_result.initial_result assert result1 == { "data": {"friendList": [{"id": "1", "name": "Luke"}]}, + "pending": [{"id": "0", "path": ["friendList"]}], "hasNext": True, } @@ -2193,18 +2194,15 @@ async def __anext__(self): with pytest.raises(StopAsyncIteration): await anext(iterator) - await sleep(0) - await sleep(0) assert iterable.index == 4 - @pytest.mark.asyncio() - async def finishes_async_iterable_when_error_is_raised_in_returned_generator(): + @pytest.mark.asyncio + async def finishes_async_iterable_when_error_is_raised_in_finished_generator(): finished = False async def iterable(_info): nonlocal finished for i in range(3): - await sleep(0) yield friends[i] finished = True @@ -2228,7 +2226,14 @@ async def iterable(_info): iterator = execute_result.subsequent_results result1 = execute_result.initial_result - assert result1 == {"data": {"friendList": [{"id": "1"}]}, "hasNext": True} + assert result1 == { + "data": {"friendList": [{"id": "1"}]}, + "pending": [ + {"id": "0", "path": ["friendList", 0]}, + {"id": "1", "path": ["friendList"]}, + ], + "hasNext": True, + } with pytest.raises(RuntimeError, match="bad"): await iterator.athrow(RuntimeError("bad")) @@ -2236,5 +2241,4 @@ async def iterable(_info): with pytest.raises(StopAsyncIteration): await anext(iterator) - await sleep(0) assert finished diff --git a/tests/execution/test_subscribe.py b/tests/execution/test_subscribe.py index fcbd13ef..3c3cba60 100644 --- a/tests/execution/test_subscribe.py +++ b/tests/execution/test_subscribe.py @@ -13,6 +13,7 @@ ) import pytest + from graphql.execution import ( ExecutionResult, create_source_event_stream, @@ -44,7 +45,7 @@ anext # noqa: B018 except NameError: # pragma: no cover (Python < 3.10) # noinspection PyShadowingBuiltins - async def anext(iterator): # noqa: A001 + async def anext(iterator): """Return the next item from an async iterator.""" return await iterator.__anext__() @@ -197,7 +198,7 @@ def subscribe_with_bad_args( # Check all error cases when initializing the subscription. def describe_subscription_initialization_phase(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_positional_arguments(): document = parse( """ @@ -217,7 +218,7 @@ async def empty_async_iterable(_info): await anext(ai) await ai.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_multiple_subscription_fields_defined_in_schema(): schema = GraphQLSchema( query=DummyQueryType, @@ -242,7 +243,7 @@ async def foo_generator(_info): await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_type_definition_with_sync_subscribe_function(): async def foo_generator(_obj, _info): yield {"foo": "FooValue"} @@ -262,7 +263,7 @@ async def foo_generator(_obj, _info): await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_type_definition_with_async_subscribe_function(): async def foo_generator(_obj, _info): await asyncio.sleep(0) @@ -290,7 +291,7 @@ async def subscribe_fn(obj, info): await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_only_resolve_the_first_field_of_invalid_multi_field(): did_resolve = {"foo": False, "bar": False} @@ -325,7 +326,7 @@ async def subscribe_bar(_obj, _info): # pragma: no cover await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolves_to_an_error_if_schema_does_not_support_subscriptions(): schema = GraphQLSchema(query=DummyQueryType) document = parse("subscription { unknownField }") @@ -343,7 +344,7 @@ async def resolves_to_an_error_if_schema_does_not_support_subscriptions(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolves_to_an_error_for_unknown_subscription_field(): schema = GraphQLSchema( query=DummyQueryType, @@ -364,7 +365,7 @@ async def resolves_to_an_error_for_unknown_subscription_field(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_pass_through_unexpected_errors_thrown_in_subscribe(): schema = GraphQLSchema( query=DummyQueryType, @@ -375,8 +376,7 @@ async def should_pass_through_unexpected_errors_thrown_in_subscribe(): with pytest.raises(AttributeError): subscribe_with_bad_args(schema=schema, document={}) # type: ignore - @pytest.mark.asyncio() - @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") + @pytest.mark.asyncio async def throws_an_error_if_subscribe_does_not_return_an_iterator(): expected_result = ( None, @@ -405,7 +405,7 @@ async def async_fn(obj, info): del result cleanup() - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolves_to_an_error_for_subscription_resolver_errors(): expected_result = ( None, @@ -447,7 +447,7 @@ async def reject_with_error(*args): assert is_awaitable(result) assert await result == expected_result - @pytest.mark.asyncio() + @pytest.mark.asyncio async def resolves_to_an_error_if_variables_were_wrong_type(): schema = GraphQLSchema( query=DummyQueryType, @@ -492,7 +492,7 @@ async def resolves_to_an_error_if_variables_were_wrong_type(): # Once a subscription returns a valid AsyncIterator, it can still yield errors. def describe_subscription_publish_phase(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def produces_a_payload_for_multiple_subscribe_in_same_subscription(): pubsub = SimplePubSub() @@ -527,7 +527,7 @@ async def produces_a_payload_for_multiple_subscribe_in_same_subscription(): assert await payload1 == (expected_payload, None) assert await payload2 == (expected_payload, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def produces_a_payload_when_queried_fields_are_async(): pubsub = SimplePubSub() subscription = create_subscription(pubsub, {"asyncResolver": True}) @@ -564,7 +564,7 @@ async def produces_a_payload_when_queried_fields_are_async(): with pytest.raises(StopAsyncIteration): await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def produces_a_payload_per_subscription_event(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -643,7 +643,7 @@ async def produces_a_payload_per_subscription_event(): with pytest.raises(StopAsyncIteration): assert await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscribe_function_returns_errors_with_defer(): pubsub = SimplePubSub() subscription = create_subscription(pubsub, {"shouldDefer": True}) @@ -707,7 +707,7 @@ async def subscribe_function_returns_errors_with_defer(): with pytest.raises(StopAsyncIteration): assert await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscribe_function_returns_errors_with_stream(): pubsub = SimplePubSub() subscription = create_subscription(pubsub, {"shouldStream": True}) @@ -788,7 +788,7 @@ async def subscribe_function_returns_errors_with_stream(): with pytest.raises(StopAsyncIteration): assert await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def produces_a_payload_when_there_are_multiple_events(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -844,7 +844,7 @@ async def produces_a_payload_when_there_are_multiple_events(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_not_trigger_when_subscription_is_already_done(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -895,7 +895,7 @@ async def should_not_trigger_when_subscription_is_already_done(): with pytest.raises(StopAsyncIteration): await payload - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_not_trigger_when_subscription_is_thrown(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -936,7 +936,7 @@ async def should_not_trigger_when_subscription_is_thrown(): with pytest.raises(StopAsyncIteration): await payload - @pytest.mark.asyncio() + @pytest.mark.asyncio async def event_order_is_correct_for_multiple_publishes(): pubsub = SimplePubSub() subscription = create_subscription(pubsub) @@ -992,7 +992,7 @@ async def event_order_is_correct_for_multiple_publishes(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_handle_error_during_execution_of_source_event(): async def generate_messages(_obj, _info): yield "Hello" @@ -1040,7 +1040,7 @@ def resolve_message(message, _info): # Subsequent events are still executed. assert await anext(subscription) == ({"newMessage": "Bonjour"}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_pass_through_error_thrown_in_source_event_stream(): async def generate_messages(_obj, _info): yield "Hello" @@ -1077,7 +1077,7 @@ def resolve_message(message, _info): with pytest.raises(StopAsyncIteration): await anext(subscription) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_work_with_sync_resolve_function(): async def generate_messages(_obj, _info): yield "Hello" @@ -1105,7 +1105,7 @@ def resolve_message(message, _info): assert await anext(subscription) == ({"newMessage": "Hello"}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_work_with_async_resolve_function(): async def generate_messages(_obj, _info): await asyncio.sleep(0) @@ -1135,7 +1135,7 @@ async def resolve_message(message, _info): assert await anext(subscription) == ({"newMessage": "Hello"}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_work_with_custom_async_iterator(): class MessageGenerator: resolved: List[str] = [] @@ -1185,7 +1185,7 @@ async def resolve(cls, message, _info) -> str: await subscription.aclose() # type: ignore - @pytest.mark.asyncio() + @pytest.mark.asyncio async def should_close_custom_async_iterator(): class MessageGenerator: closed: bool = False diff --git a/tests/execution/test_sync.py b/tests/execution/test_sync.py index 36f8c9a5..af2faf28 100644 --- a/tests/execution/test_sync.py +++ b/tests/execution/test_sync.py @@ -1,4 +1,5 @@ import pytest + from graphql import graphql_sync from graphql.execution import execute, execute_sync from graphql.language import parse @@ -51,7 +52,7 @@ def does_not_return_an_awaitable_if_mutation_fields_are_all_synchronous(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def returns_an_awaitable_if_any_field_is_asynchronous(): doc = "query Example { syncField, asyncField }" result = execute(schema, parse(doc), "rootValue") @@ -80,7 +81,7 @@ def does_not_throw_if_not_encountering_async_execution_with_check_sync(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_execution_with_check_sync(): doc = "query Example { syncField, asyncField }" @@ -93,7 +94,7 @@ async def throws_if_encountering_async_execution_with_check_sync(): del exc_info cleanup() - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_operation_without_check_sync(): doc = "query Example { syncField, asyncField }" @@ -112,8 +113,7 @@ async def throws_if_encountering_async_operation_without_check_sync(): del result cleanup() - @pytest.mark.asyncio() - @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") + @pytest.mark.asyncio async def throws_if_encountering_async_iterable_execution_with_check_sync(): doc = """ query Example { @@ -132,8 +132,7 @@ async def throws_if_encountering_async_iterable_execution_with_check_sync(): del exc_info cleanup() - @pytest.mark.asyncio() - @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") + @pytest.mark.asyncio async def throws_if_encountering_async_iterable_execution_without_check_sync(): doc = """ query Example { @@ -188,7 +187,7 @@ def does_not_throw_if_not_encountering_async_operation_with_check_sync(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_operation_with_check_sync(): doc = "query Example { syncField, asyncField }" @@ -199,7 +198,7 @@ async def throws_if_encountering_async_operation_with_check_sync(): del exc_info cleanup() - @pytest.mark.asyncio() + @pytest.mark.asyncio @pytest.mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning") async def throws_if_encountering_async_operation_without_check_sync(): doc = "query Example { syncField, asyncField }" diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py index 3df1c2f0..5e4058f9 100644 --- a/tests/fixtures/__init__.py +++ b/tests/fixtures/__init__.py @@ -7,11 +7,11 @@ import pytest __all__ = [ + "big_schema_introspection_result", + "big_schema_sdl", "cleanup", "kitchen_sink_query", "kitchen_sink_sdl", - "big_schema_sdl", - "big_schema_introspection_result", ] diff --git a/tests/fixtures/schema_kitchen_sink.graphql b/tests/fixtures/schema_kitchen_sink.graphql index 8ec1f2d8..c1d9d06e 100644 --- a/tests/fixtures/schema_kitchen_sink.graphql +++ b/tests/fixtures/schema_kitchen_sink.graphql @@ -26,6 +26,7 @@ type Foo implements Bar & Baz & Two { five(argument: [String] = ["string", "string"]): String six(argument: InputType = {key: "value"}): Type seven(argument: Int = null): Type + eight(argument: OneOfInputType): Type } type AnnotatedObject @onObject(arg: "value") { @@ -115,6 +116,11 @@ input InputType { answer: Int = 42 } +input OneOfInputType @oneOf { + string: String + int: Int +} + input AnnotatedInput @onInputObject { annotatedField: Type @onInputFieldDefinition } diff --git a/tests/language/test_block_string.py b/tests/language/test_block_string.py index 74f99734..d135dde9 100644 --- a/tests/language/test_block_string.py +++ b/tests/language/test_block_string.py @@ -148,8 +148,8 @@ def __init__(self, string: str) -> None: def __str__(self) -> str: return self.string - _assert_printable(cast(str, LazyString(""))) - _assert_non_printable(cast(str, LazyString(" "))) + _assert_printable(cast("str", LazyString(""))) + _assert_non_printable(cast("str", LazyString(" "))) def describe_print_block_string(): @@ -212,4 +212,4 @@ class LazyString: def __str__(self) -> str: return "lazy" - _assert_block_string(cast(str, LazyString()), '"""lazy"""') + _assert_block_string(cast("str", LazyString()), '"""lazy"""') diff --git a/tests/language/test_block_string_fuzz.py b/tests/language/test_block_string_fuzz.py index feb7ca2b..0e17b4d4 100644 --- a/tests/language/test_block_string_fuzz.py +++ b/tests/language/test_block_string_fuzz.py @@ -1,4 +1,5 @@ import pytest + from graphql.language import Lexer, Source, TokenKind from graphql.language.block_string import ( is_printable_as_block_string, @@ -40,7 +41,7 @@ def assert_non_printable_block_string(test_value: str) -> None: def describe_print_block_string(): - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(80) def correctly_print_random_strings(): # Testing with length >7 is taking exponentially more time. However, it is diff --git a/tests/language/test_lexer.py b/tests/language/test_lexer.py index 0bc9a398..a44e859d 100644 --- a/tests/language/test_lexer.py +++ b/tests/language/test_lexer.py @@ -3,6 +3,7 @@ from typing import Optional, Tuple import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import Lexer, Source, SourceLocation, Token, TokenKind from graphql.language.lexer import is_punctuator_token_kind @@ -393,8 +394,7 @@ def lexes_block_strings(): TokenKind.BLOCK_STRING, 0, 19, 1, 1, "slashes \\\\ \\/" ) assert lex_one( - '"""\n\n spans\n multiple\n' - ' lines\n\n """' + '"""\n\n spans\n multiple\n lines\n\n """' ) == Token(TokenKind.BLOCK_STRING, 0, 68, 1, 1, "spans\n multiple\n lines") def advance_line_after_lexing_multiline_block_string(): diff --git a/tests/language/test_location.py b/tests/language/test_location.py index c9ae2c14..1210795a 100644 --- a/tests/language/test_location.py +++ b/tests/language/test_location.py @@ -41,3 +41,10 @@ def can_compare_with_formatted_location(): different_location = SourceLocation(2, 2).formatted assert not location == different_location # noqa: SIM201 assert location != different_location + + def can_be_hashed(): + location = SourceLocation(1, 2) + same_location = SourceLocation(1, 2) + assert hash(location) == hash(same_location) + different_location = SourceLocation(2, 2) + assert hash(location) != hash(different_location) diff --git a/tests/language/test_parser.py b/tests/language/test_parser.py index b671e444..0121db23 100644 --- a/tests/language/test_parser.py +++ b/tests/language/test_parser.py @@ -3,6 +3,7 @@ from typing import Optional, Tuple, cast import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import ( ArgumentNode, @@ -180,11 +181,11 @@ def parses_multi_byte_characters(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - selection_set = cast(OperationDefinitionNode, definitions[0]).selection_set + selection_set = cast("OperationDefinitionNode", definitions[0]).selection_set selections = selection_set.selections assert isinstance(selections, tuple) assert len(selections) == 1 - arguments = cast(FieldNode, selections[0]).arguments + arguments = cast("FieldNode", selections[0]).arguments assert isinstance(arguments, tuple) assert len(arguments) == 1 value = arguments[0].value @@ -262,7 +263,7 @@ def parses_required_field(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -327,7 +328,7 @@ def parses_field_with_required_list_elements(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -351,7 +352,7 @@ def parses_field_with_optional_list_elements(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -375,7 +376,7 @@ def parses_field_with_required_list(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -399,7 +400,7 @@ def parses_field_with_optional_list(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -423,7 +424,7 @@ def parses_field_with_mixed_list_elements(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) selection_set: SelectionSetNode | None = definition.selection_set assert isinstance(selection_set, SelectionSetNode) selections = selection_set.selections @@ -482,7 +483,7 @@ def creates_ast(): definitions = doc.definitions assert isinstance(definitions, tuple) assert len(definitions) == 1 - definition = cast(OperationDefinitionNode, definitions[0]) + definition = cast("OperationDefinitionNode", definitions[0]) assert isinstance(definition, DefinitionNode) assert definition.loc == (0, 40) assert definition.operation == OperationType.QUERY diff --git a/tests/language/test_printer.py b/tests/language/test_printer.py index 6117c69d..42531096 100644 --- a/tests/language/test_printer.py +++ b/tests/language/test_printer.py @@ -1,6 +1,7 @@ from copy import deepcopy import pytest + from graphql.language import FieldNode, NameNode, parse, print_ast from ..fixtures import kitchen_sink_query # noqa: F401 @@ -59,8 +60,7 @@ def correctly_prints_mutation_operation_with_artifacts(): def prints_query_with_variable_directives(): query_ast_with_variable_directive = parse( - "query ($foo: TestType = { a: 123 }" - " @testDirective(if: true) @test) { id }" + "query ($foo: TestType = { a: 123 } @testDirective(if: true) @test) { id }" ) assert print_ast(query_ast_with_variable_directive) == dedent( """ diff --git a/tests/language/test_schema_parser.py b/tests/language/test_schema_parser.py index a5005a06..df64381a 100644 --- a/tests/language/test_schema_parser.py +++ b/tests/language/test_schema_parser.py @@ -6,6 +6,7 @@ from typing import Optional, Tuple import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import ( ArgumentNode, diff --git a/tests/language/test_schema_printer.py b/tests/language/test_schema_printer.py index 35da0b06..083dcd0f 100644 --- a/tests/language/test_schema_printer.py +++ b/tests/language/test_schema_printer.py @@ -1,6 +1,7 @@ from copy import deepcopy import pytest + from graphql.language import NameNode, ScalarTypeDefinitionNode, parse, print_ast from ..fixtures import kitchen_sink_sdl # noqa: F401 @@ -57,6 +58,7 @@ def prints_kitchen_sink_without_altering_ast(kitchen_sink_sdl): # noqa: F811 five(argument: [String] = ["string", "string"]): String six(argument: InputType = { key: "value" }): Type seven(argument: Int = null): Type + eight(argument: OneOfInputType): Type } type AnnotatedObject @onObject(arg: "value") { @@ -139,6 +141,11 @@ def prints_kitchen_sink_without_altering_ast(kitchen_sink_sdl): # noqa: F811 answer: Int = 42 } + input OneOfInputType @oneOf { + string: String + int: Int + } + input AnnotatedInput @onInputObject { annotatedField: Type @onInputFieldDefinition } diff --git a/tests/language/test_source.py b/tests/language/test_source.py index 02014445..94553109 100644 --- a/tests/language/test_source.py +++ b/tests/language/test_source.py @@ -4,6 +4,7 @@ from typing import cast import pytest + from graphql.language import Source, SourceLocation from ..utils import dedent @@ -68,6 +69,13 @@ def can_be_compared(): assert not source == "bar" # noqa: SIM201 assert source != "bar" + def can_be_hashed(): + source = Source("foo") + same_source = Source("foo") + assert hash(source) == hash(same_source) + different_source = Source("bar") + assert hash(source) != hash(different_source) + def can_create_weak_reference(): source = Source("foo") ref = weakref.ref(source) @@ -80,7 +88,7 @@ def can_create_custom_attribute(): def rejects_invalid_location_offset(): def create_source(location_offset: tuple[int, int]) -> Source: - return Source("", "", cast(SourceLocation, location_offset)) + return Source("", "", cast("SourceLocation", location_offset)) with pytest.raises(TypeError): create_source(None) # type: ignore diff --git a/tests/language/test_visitor.py b/tests/language/test_visitor.py index 1e74c6ff..f3fdb370 100644 --- a/tests/language/test_visitor.py +++ b/tests/language/test_visitor.py @@ -5,6 +5,7 @@ from typing import Any, cast import pytest + from graphql.language import ( BREAK, REMOVE, @@ -580,7 +581,9 @@ class CustomFieldNode(SelectionNode): name: NameNode selection_set: SelectionSetNode | None - custom_selection_set = cast(FieldNode, custom_ast.definitions[0]).selection_set + custom_selection_set = cast( + "FieldNode", custom_ast.definitions[0] + ).selection_set assert custom_selection_set is not None custom_selection_set.selections = ( *custom_selection_set.selections, diff --git a/tests/pyutils/test_async_reduce.py b/tests/pyutils/test_async_reduce.py index cbcef554..0ac606c8 100644 --- a/tests/pyutils/test_async_reduce.py +++ b/tests/pyutils/test_async_reduce.py @@ -1,6 +1,7 @@ from functools import reduce import pytest + from graphql.pyutils import async_reduce, is_awaitable @@ -16,7 +17,7 @@ def callback(accumulator, current_value): assert result == 42 assert result == reduce(callback, values, initial_value) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def works_with_sync_values_and_sync_initial_value(): def callback(accumulator, current_value): return accumulator + "-" + current_value @@ -26,7 +27,7 @@ def callback(accumulator, current_value): assert not is_awaitable(result) assert result == "foo-bar-baz" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def works_with_async_initial_value(): async def async_initial_value(): return "foo" @@ -39,7 +40,7 @@ def callback(accumulator, current_value): assert is_awaitable(result) assert await result == "foo-bar-baz" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def works_with_async_callback(): async def async_callback(accumulator, current_value): return accumulator + "-" + current_value @@ -49,7 +50,7 @@ async def async_callback(accumulator, current_value): assert is_awaitable(result) assert await result == "foo-bar-baz" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def works_with_async_callback_and_async_initial_value(): async def async_initial_value(): return 1 / 8 diff --git a/tests/pyutils/test_description.py b/tests/pyutils/test_description.py index 57edff39..8b82734f 100644 --- a/tests/pyutils/test_description.py +++ b/tests/pyutils/test_description.py @@ -2,6 +2,7 @@ from typing import cast import pytest + from graphql import graphql_sync from graphql.pyutils import ( Description, @@ -33,7 +34,7 @@ def __str__(self) -> str: return str(self.text) -lazy_string = cast(str, LazyString("Why am I so lazy?")) +lazy_string = cast("str", LazyString("Why am I so lazy?")) @contextmanager @@ -42,7 +43,7 @@ def registered(base: type): try: yield None finally: - unregister_description(LazyString) + unregister_description(base) def describe_description(): @@ -169,7 +170,7 @@ def graphql_directive(): assert directive.description is lazy_string assert str(directive.description).endswith("lazy?") - def handels_introspection(): + def handles_introspection(): class Lazy: def __init__(self, text: str): self.text = text @@ -185,8 +186,8 @@ def __str__(self) -> str: with registered(Lazy): field = GraphQLField( GraphQLString, - description=cast(str, description), - deprecation_reason=cast(str, deprecation_reason), + description=cast("str", description), + deprecation_reason=cast("str", deprecation_reason), ) schema = GraphQLSchema(GraphQLObjectType("Query", {"lazyField": field})) @@ -221,8 +222,8 @@ def __str__(self) -> str: with registered(Lazy): field = GraphQLField( GraphQLString, - description=cast(str, description), - deprecation_reason=cast(str, deprecation_reason), + description=cast("str", description), + deprecation_reason=cast("str", deprecation_reason), ) schema = GraphQLSchema(GraphQLObjectType("Query", {"lazyField": field})) diff --git a/tests/pyutils/test_format_list.py b/tests/pyutils/test_format_list.py index ee425eca..09567645 100644 --- a/tests/pyutils/test_format_list.py +++ b/tests/pyutils/test_format_list.py @@ -1,4 +1,5 @@ import pytest + from graphql.pyutils import and_list, or_list diff --git a/tests/pyutils/test_gather_with_cancel.py b/tests/pyutils/test_gather_with_cancel.py new file mode 100644 index 00000000..d4832d80 --- /dev/null +++ b/tests/pyutils/test_gather_with_cancel.py @@ -0,0 +1,133 @@ +from __future__ import annotations + +from asyncio import Event, create_task, gather, sleep, wait_for +from typing import Callable + +import pytest + +from graphql.pyutils import gather_with_cancel, is_awaitable + + +class Controller: + def reset(self, wait=False): + self.event = Event() + if not wait: + self.event.set() + self.returned = [] + + +controller = Controller() + + +async def coroutine(value: int) -> int: + """Simple coroutine that returns a value.""" + if value > 2: + raise RuntimeError("Oops") + await controller.event.wait() + controller.returned.append(value) + return value + + +class CustomAwaitable: + """Custom awaitable that return a value.""" + + def __init__(self, value: int): + self.value = value + self.coroutine = coroutine(value) + + def __await__(self): + return self.coroutine.__await__() + + +awaitable_factories: dict[str, Callable] = { + "coroutine": coroutine, + "task": lambda value: create_task(coroutine(value)), + "custom": lambda value: CustomAwaitable(value), +} + +with_all_types_of_awaitables = pytest.mark.parametrize( + "type_of_awaitable", awaitable_factories +) + + +def describe_gather_with_cancel(): + @with_all_types_of_awaitables + @pytest.mark.asyncio + async def gathers_all_values(type_of_awaitable: str): + factory = awaitable_factories[type_of_awaitable] + values = list(range(3)) + + controller.reset() + aws = [factory(i) for i in values] + + assert await gather(*aws) == values + assert controller.returned == values + + controller.reset() + aws = [factory(i) for i in values] + + result = gather_with_cancel(*aws) + assert is_awaitable(result) + + awaited = await wait_for(result, 1) + assert awaited == values + + @with_all_types_of_awaitables + @pytest.mark.asyncio + async def raises_on_exception(type_of_awaitable: str): + factory = awaitable_factories[type_of_awaitable] + values = list(range(4)) + + controller.reset() + aws = [factory(i) for i in values] + + with pytest.raises(RuntimeError, match="Oops"): + await gather(*aws) + assert controller.returned == values[:-1] + + controller.reset() + aws = [factory(i) for i in values] + + result = gather_with_cancel(*aws) + assert is_awaitable(result) + + with pytest.raises(RuntimeError, match="Oops"): + await wait_for(result, 1) + assert controller.returned == values[:-1] + + @with_all_types_of_awaitables + @pytest.mark.asyncio + async def cancels_on_exception(type_of_awaitable: str): + factory = awaitable_factories[type_of_awaitable] + values = list(range(4)) + + controller.reset(wait=True) + aws = [factory(i) for i in values] + + with pytest.raises(RuntimeError, match="Oops"): + await gather(*aws) + assert not controller.returned + + # check that the standard gather continues to produce results + controller.event.set() + await sleep(0) + assert controller.returned == values[:-1] + + controller.reset(wait=True) + aws = [factory(i) for i in values] + + result = gather_with_cancel(*aws) + assert is_awaitable(result) + + with pytest.raises(RuntimeError, match="Oops"): + await wait_for(result, 1) + assert not controller.returned + + # check that gather_with_cancel stops producing results + controller.event.set() + await sleep(0) + if type_of_awaitable == "custom": + # Cancellation of custom awaitables is not supported + assert controller.returned == values[:-1] + else: + assert not controller.returned diff --git a/tests/pyutils/test_inspect.py b/tests/pyutils/test_inspect.py index 3721d018..94c62b48 100644 --- a/tests/pyutils/test_inspect.py +++ b/tests/pyutils/test_inspect.py @@ -6,6 +6,7 @@ from typing import Any import pytest + from graphql.pyutils import Undefined, inspect from graphql.type import ( GraphQLDirective, @@ -138,7 +139,7 @@ def test_generator(): assert inspect(test_generator) == "" assert inspect(test_generator()) == "" - @pytest.mark.asyncio() + @pytest.mark.asyncio async def inspect_coroutine(): async def test_coroutine(): pass diff --git a/tests/pyutils/test_is_awaitable.py b/tests/pyutils/test_is_awaitable.py index dcee07d9..b05f01af 100644 --- a/tests/pyutils/test_is_awaitable.py +++ b/tests/pyutils/test_is_awaitable.py @@ -3,6 +3,7 @@ from sys import version_info as python_version import pytest + from graphql.pyutils import is_awaitable @@ -66,7 +67,7 @@ async def some_async_function(): assert not isawaitable(some_async_function) assert not is_awaitable(some_async_function) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def recognizes_a_coroutine_object(): async def some_async_function(): return True @@ -92,7 +93,7 @@ def some_function(): assert is_awaitable(some_old_style_coroutine) assert is_awaitable(some_old_style_coroutine) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def recognizes_a_future_object(): async def some_async_function(): return True @@ -105,7 +106,7 @@ async def some_async_function(): assert await some_future is True - @pytest.mark.asyncio() + @pytest.mark.asyncio async def declines_an_async_generator(): async def some_async_generator_function(): yield True diff --git a/tests/pyutils/test_ref_map.py b/tests/pyutils/test_ref_map.py new file mode 100644 index 00000000..95c0056f --- /dev/null +++ b/tests/pyutils/test_ref_map.py @@ -0,0 +1,124 @@ +import pytest + +from graphql.pyutils import RefMap + +obj1 = {"a": 1, "b": 2, "c": 3} +obj2 = obj1.copy() +obj3 = obj1.copy() +obj4 = obj1.copy() + + +def describe_object_map(): + def can_create_an_empty_map(): + m = RefMap[str, int]() + assert not m + assert len(m) == 0 + assert list(m) == [] + assert list(m.keys()) == [] + assert list(m.values()) == [] + assert list(m.items()) == [] + + def can_create_a_map_with_scalar_keys_and_values(): + m = RefMap[str, int](list(obj1.items())) + assert m + assert len(m) == 3 + assert list(m) == ["a", "b", "c"] + assert list(m.keys()) == ["a", "b", "c"] + assert list(m.values()) == [1, 2, 3] + assert list(m.items()) == [("a", 1), ("b", 2), ("c", 3)] + for k, v in m.items(): + assert k in m + assert m[k] == v # noqa: PLR1733 + assert m.get(k) == v + assert v not in m + with pytest.raises(KeyError): + m[v] # type: ignore + assert m.get(v) is None + + def can_create_a_map_with_one_object_as_key(): + m = RefMap[dict, int]([(obj1, 1)]) + assert m + assert len(m) == 1 + assert list(m) == [obj1] + assert list(m.keys()) == [obj1] + assert list(m.values()) == [1] + assert list(m.items()) == [(obj1, 1)] + assert obj1 in m + assert 1 not in m + assert obj2 not in m + assert m[obj1] == 1 + assert m.get(obj1) == 1 + with pytest.raises(KeyError): + m[1] # type: ignore + assert m.get(1) is None + with pytest.raises(KeyError): + m[obj2] + assert m.get(obj2) is None + + def can_create_a_map_with_three_objects_as_keys(): + m = RefMap[dict, int]([(obj1, 1), (obj2, 2), (obj3, 3)]) + assert m + assert len(m) == 3 + assert list(m) == [obj1, obj2, obj3] + assert list(m.keys()) == [obj1, obj2, obj3] + assert list(m.values()) == [1, 2, 3] + assert list(m.items()) == [(obj1, 1), (obj2, 2), (obj3, 3)] + for k, v in m.items(): + assert k in m + assert m[k] == v # noqa: PLR1733 + assert m.get(k) == v + assert v not in m + with pytest.raises(KeyError): + m[v] # type: ignore + assert m.get(v) is None + assert obj4 not in m + with pytest.raises(KeyError): + m[obj4] + assert m.get(obj4) is None + + def can_set_a_key_that_is_an_object(): + m = RefMap[dict, int]() + m[obj1] = 1 + assert m[obj1] == 1 + assert list(m) == [obj1] + with pytest.raises(KeyError): + m[obj2] + m[obj2] = 2 + assert m[obj1] == 1 + assert m[obj2] == 2 + assert list(m) == [obj1, obj2] + m[obj2] = 3 + assert m[obj1] == 1 + assert m[obj2] == 3 + assert list(m) == [obj1, obj2] + assert len(m) == 2 + + def can_delete_a_key_that_is_an_object(): + m = RefMap[dict, int]([(obj1, 1), (obj2, 2), (obj3, 3)]) + del m[obj2] + assert obj2 not in m + assert list(m) == [obj1, obj3] + with pytest.raises(KeyError): + del m[obj2] + assert list(m) == [obj1, obj3] + assert len(m) == 2 + + def can_update_a_map(): + m = RefMap[dict, int]([(obj1, 1), (obj2, 2)]) + m.update([]) + assert list(m.keys()) == [obj1, obj2] + assert len(m) == 2 + m.update([(obj2, 3), (obj3, 4)]) + assert list(m.keys()) == [obj1, obj2, obj3] + assert list(m.values()) == [1, 3, 4] + assert list(m.items()) == [(obj1, 1), (obj2, 3), (obj3, 4)] + assert obj3 in m + assert m[obj2] == 3 + assert m[obj3] == 4 + assert len(m) == 3 + + def can_get_the_representation_of_a_ref_map(): + m = RefMap[dict, int]([(obj1, 1), (obj2, 2)]) + assert repr(m) == ( + "RefMap([({'a': 1, 'b': 2, 'c': 3}, 1), ({'a': 1, 'b': 2, 'c': 3}, 2)])" + ) diff --git a/tests/pyutils/test_ref_set.py b/tests/pyutils/test_ref_set.py new file mode 100644 index 00000000..fead877b --- /dev/null +++ b/tests/pyutils/test_ref_set.py @@ -0,0 +1,89 @@ +import pytest + +from graphql.pyutils import RefSet + +obj1 = ["a", "b", "c"] +obj2 = obj1.copy() +obj3 = obj1.copy() +obj4 = obj1.copy() + + +def describe_object_set(): + def can_create_an_empty_set(): + s = RefSet[int]() + assert not s + assert len(s) == 0 + assert list(s) == [] + + def can_create_a_set_with_scalar_values(): + s = RefSet[str](obj1) + assert s + assert len(s) == 3 + assert list(s) == ["a", "b", "c"] + for v in s: + assert v in s + + def can_create_a_set_with_one_object_as_value(): + s = RefSet[list]([obj1]) + assert s + assert len(s) == 1 + assert obj1 in s + assert obj2 not in s + + def can_create_a_set_with_three_objects_as_keys(): + s = RefSet[list]([obj1, obj2, obj3]) + assert s + assert len(s) == 3 + assert list(s) == [obj1, obj2, obj3] + for v in s: + assert v in s + assert obj4 not in s + + def can_add_a_value_that_is_an_object(): + s = RefSet[list]() + s.add(obj1) + assert obj1 in s + assert list(s) == [obj1] + assert obj2 not in s + s.add(obj2) + assert obj1 in s + assert obj2 in s + assert list(s) == [obj1, obj2] + s.add(obj2) + assert obj1 in s + assert obj2 in s + assert list(s) == [obj1, obj2] + assert len(s) == 2 + + def can_remove_a_value_that_is_an_object(): + s = RefSet[list]([obj1, obj2, obj3]) + s.remove(obj2) + assert obj2 not in s + assert list(s) == [obj1, obj3] + with pytest.raises(KeyError): + s.remove(obj2) + assert list(s) == [obj1, obj3] + assert len(s) == 2 + + def can_discard_a_value_that_is_an_object(): + s = RefSet[list]([obj1, obj2, obj3]) + s.discard(obj2) + assert obj2 not in s + assert list(s) == [obj1, obj3] + s.discard(obj2) + assert list(s) == [obj1, obj3] + assert len(s) == 2 + + def can_update_a_set(): + s = RefSet[list]([obj1, obj2]) + s.update([]) + assert list(s) == [obj1, obj2] + assert len(s) == 2 + s.update([obj2, obj3]) + assert list(s) == [obj1, obj2, obj3] + assert obj3 in s + assert len(s) == 3 + + def can_get_the_representation_of_a_ref_set(): + s = RefSet[list]([obj1, obj2]) + assert repr(s) == ("RefSet([['a', 'b', 'c'], ['a', 'b', 'c']])") diff --git a/tests/pyutils/test_simple_pub_sub.py b/tests/pyutils/test_simple_pub_sub.py index 2f30a8e2..f0a88dcb 100644 --- a/tests/pyutils/test_simple_pub_sub.py +++ b/tests/pyutils/test_simple_pub_sub.py @@ -1,11 +1,12 @@ from asyncio import sleep import pytest + from graphql.pyutils import SimplePubSub, is_awaitable def describe_simple_pub_sub(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscribe_async_iterator_mock(): pubsub = SimplePubSub() iterator = pubsub.get_subscriber() @@ -49,7 +50,7 @@ async def subscribe_async_iterator_mock(): with pytest.raises(StopAsyncIteration): await iterator.__anext__() - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterator_aclose_empties_push_queue(): pubsub = SimplePubSub() assert not pubsub.subscribers @@ -67,7 +68,7 @@ async def iterator_aclose_empties_push_queue(): assert iterator.pull_queue.qsize() == 0 assert not iterator.listening - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterator_aclose_empties_pull_queue(): pubsub = SimplePubSub() assert not pubsub.subscribers @@ -84,7 +85,7 @@ async def iterator_aclose_empties_pull_queue(): assert iterator.pull_queue.qsize() == 0 assert not iterator.listening - @pytest.mark.asyncio() + @pytest.mark.asyncio async def iterator_aclose_is_idempotent(): pubsub = SimplePubSub() iterator = pubsub.get_subscriber() diff --git a/tests/pyutils/test_undefined.py b/tests/pyutils/test_undefined.py index b6f62eea..b34611e3 100644 --- a/tests/pyutils/test_undefined.py +++ b/tests/pyutils/test_undefined.py @@ -1,6 +1,7 @@ import pickle import pytest + from graphql.pyutils import Undefined, UndefinedType diff --git a/tests/star_wars_schema.py b/tests/star_wars_schema.py index 3f8713ab..5f4c0809 100644 --- a/tests/star_wars_schema.py +++ b/tests/star_wars_schema.py @@ -54,7 +54,6 @@ GraphQLSchema, GraphQLString, ) - from tests.star_wars_data import ( get_droid, get_friends, @@ -141,8 +140,7 @@ "name": GraphQLField(GraphQLString, description="The name of the human."), "friends": GraphQLField( GraphQLList(character_interface), - description="The friends of the human," - " or an empty list if they have none.", + description="The friends of the human, or an empty list if they have none.", resolve=lambda human, _info: get_friends(human), ), "appearsIn": GraphQLField( @@ -183,8 +181,7 @@ "name": GraphQLField(GraphQLString, description="The name of the droid."), "friends": GraphQLField( GraphQLList(character_interface), - description="The friends of the droid," - " or an empty list if they have none.", + description="The friends of the droid, or an empty list if they have none.", resolve=lambda droid, _info: get_friends(droid), ), "appearsIn": GraphQLField( @@ -239,7 +236,7 @@ GraphQLNonNull(GraphQLString), description="id of the human" ) }, - resolve=lambda _source, _info, id: get_human(id), + resolve=lambda _source, _info, id: get_human(id), # noqa: A006 ), "droid": GraphQLField( droid_type, @@ -248,7 +245,7 @@ GraphQLNonNull(GraphQLString), description="id of the droid" ) }, - resolve=lambda _source, _info, id: get_droid(id), + resolve=lambda _source, _info, id: get_droid(id), # noqa: A006 ), }, ) diff --git a/tests/test_star_wars_query.py b/tests/test_star_wars_query.py index 6e5bbf59..bb1008b8 100644 --- a/tests/test_star_wars_query.py +++ b/tests/test_star_wars_query.py @@ -1,4 +1,5 @@ import pytest + from graphql import graphql, graphql_sync from .star_wars_schema import star_wars_schema as schema @@ -6,7 +7,7 @@ def describe_star_wars_query_tests(): def describe_basic_queries(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correctly_identifies_r2_d2_as_hero_of_the_star_wars_saga(): source = """ query HeroNameQuery { @@ -18,7 +19,7 @@ async def correctly_identifies_r2_d2_as_hero_of_the_star_wars_saga(): result = await graphql(schema=schema, source=source) assert result == ({"hero": {"name": "R2-D2"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def accepts_positional_arguments_to_graphql(): source = """ query HeroNameQuery { @@ -33,7 +34,7 @@ async def accepts_positional_arguments_to_graphql(): sync_result = graphql_sync(schema, source) assert sync_result == result - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_for_the_id_and_friends_of_r2_d2(): source = """ query HeroNameAndFriendsQuery { @@ -63,7 +64,7 @@ async def allows_us_to_query_for_the_id_and_friends_of_r2_d2(): ) def describe_nested_queries(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_for_the_friends_of_friends_of_r2_d2(): source = """ query NestedQuery { @@ -121,7 +122,7 @@ async def allows_us_to_query_for_the_friends_of_friends_of_r2_d2(): ) def describe_using_ids_and_query_parameters_to_refetch_objects(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_for_r2_d2_directly_using_his_id(): source = """ query { @@ -133,7 +134,7 @@ async def allows_us_to_query_for_r2_d2_directly_using_his_id(): result = await graphql(schema=schema, source=source) assert result == ({"droid": {"name": "R2-D2"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_characters_directly_using_their_id(): source = """ query FetchLukeAndC3POQuery { @@ -151,7 +152,7 @@ async def allows_us_to_query_characters_directly_using_their_id(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_creating_a_generic_query_to_fetch_luke_using_his_id(): source = """ query FetchSomeIDQuery($someId: String!) { @@ -166,7 +167,7 @@ async def allows_creating_a_generic_query_to_fetch_luke_using_his_id(): ) assert result == ({"human": {"name": "Luke Skywalker"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_creating_a_generic_query_to_fetch_han_using_his_id(): source = """ query FetchSomeIDQuery($someId: String!) { @@ -181,7 +182,7 @@ async def allows_creating_a_generic_query_to_fetch_han_using_his_id(): ) assert result == ({"human": {"name": "Han Solo"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def generic_query_that_gets_null_back_when_passed_invalid_id(): source = """ query humanQuery($id: String!) { @@ -197,7 +198,7 @@ async def generic_query_that_gets_null_back_when_passed_invalid_id(): assert result == ({"human": None}, None) def describe_using_aliases_to_change_the_key_in_the_response(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_for_luke_changing_his_key_with_an_alias(): source = """ query FetchLukeAliased { @@ -209,7 +210,7 @@ async def allows_us_to_query_for_luke_changing_his_key_with_an_alias(): result = await graphql(schema=schema, source=source) assert result == ({"luke": {"name": "Luke Skywalker"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def query_for_luke_and_leia_using_two_root_fields_and_an_alias(): source = """ query FetchLukeAndLeiaAliased { @@ -228,7 +229,7 @@ async def query_for_luke_and_leia_using_two_root_fields_and_an_alias(): ) def describe_uses_fragments_to_express_more_complex_queries(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_query_using_duplicated_content(): source = """ query DuplicateFields { @@ -251,7 +252,7 @@ async def allows_us_to_query_using_duplicated_content(): None, ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_use_a_fragment_to_avoid_duplicating_content(): source = """ query UseFragment { @@ -277,7 +278,7 @@ async def allows_us_to_use_a_fragment_to_avoid_duplicating_content(): ) def describe_using_typename_to_find_the_type_of_an_object(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_verify_that_r2_d2_is_a_droid(): source = """ query CheckTypeOfR2 { @@ -290,7 +291,7 @@ async def allows_us_to_verify_that_r2_d2_is_a_droid(): result = await graphql(schema=schema, source=source) assert result == ({"hero": {"__typename": "Droid", "name": "R2-D2"}}, None) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def allows_us_to_verify_that_luke_is_a_human(): source = """ query CheckTypeOfLuke { @@ -307,7 +308,7 @@ async def allows_us_to_verify_that_luke_is_a_human(): ) def describe_reporting_errors_raised_in_resolvers(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correctly_reports_error_on_accessing_secret_backstory(): source = """ query HeroNameQuery { @@ -329,7 +330,7 @@ async def correctly_reports_error_on_accessing_secret_backstory(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correctly_reports_error_on_accessing_backstory_in_a_list(): source = """ query HeroNameQuery { @@ -373,7 +374,7 @@ async def correctly_reports_error_on_accessing_backstory_in_a_list(): ], ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def correctly_reports_error_on_accessing_through_an_alias(): source = """ query HeroNameQuery { diff --git a/tests/test_user_registry.py b/tests/test_user_registry.py index 7d134a52..0cb2b5b9 100644 --- a/tests/test_user_registry.py +++ b/tests/test_user_registry.py @@ -12,6 +12,7 @@ from typing import Any, AsyncIterable, NamedTuple import pytest + from graphql import ( GraphQLArgument, GraphQLBoolean, @@ -212,13 +213,13 @@ async def resolve_subscription_user(event, info, id): # noqa: ARG001, A002 ) -@pytest.fixture() +@pytest.fixture def context(): return {"registry": UserRegistry()} def describe_query(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def query_user(context): user = await context["registry"].create( firstName="John", lastName="Doe", tweets=42, verified=True @@ -250,7 +251,7 @@ async def query_user(context): def describe_mutation(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def create_user(context): received = {} @@ -261,7 +262,7 @@ def receive(msg): return receive # noinspection PyProtectedMember - pubsub = context["registry"]._pubsub # noqa: SLF001s + pubsub = context["registry"]._pubsub # noqa: SLF001 pubsub[None].subscribers.add(subscriber("User")) pubsub["0"].subscribers.add(subscriber("User 0")) @@ -302,7 +303,7 @@ def receive(msg): "User 0": {"user": user, "mutation": MutationEnum.CREATED.value}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def update_user(context): received = {} @@ -358,7 +359,7 @@ def receive(msg): "User 0": {"user": user, "mutation": MutationEnum.UPDATED.value}, } - @pytest.mark.asyncio() + @pytest.mark.asyncio async def delete_user(context): received = {} @@ -400,7 +401,7 @@ def receive(msg): def describe_subscription(): - @pytest.mark.asyncio() + @pytest.mark.asyncio async def subscribe_to_user_mutations(context): query = """ subscription ($userId: ID!) { diff --git a/tests/type/test_assert_name.py b/tests/type/test_assert_name.py index 55ef75c7..24ffc55d 100644 --- a/tests/type/test_assert_name.py +++ b/tests/type/test_assert_name.py @@ -1,4 +1,5 @@ import pytest + from graphql.error import GraphQLError from graphql.type import assert_enum_value_name, assert_name diff --git a/tests/type/test_definition.py b/tests/type/test_definition.py index 88ce94f7..ac7830ef 100644 --- a/tests/type/test_definition.py +++ b/tests/type/test_definition.py @@ -12,6 +12,7 @@ from typing_extensions import TypedDict import pytest + from graphql.error import GraphQLError from graphql.language import ( EnumTypeDefinitionNode, @@ -197,8 +198,7 @@ def parse_literal(_node: ValueNode, _vars=None): with pytest.raises(TypeError) as exc_info: GraphQLScalarType("SomeScalar", parse_literal=parse_literal) assert str(exc_info.value) == ( - "SomeScalar must provide both" - " 'parse_value' and 'parse_literal' functions." + "SomeScalar must provide both 'parse_value' and 'parse_literal' functions." ) def pickles_a_custom_scalar_type(): diff --git a/tests/type/test_directives.py b/tests/type/test_directives.py index 3f29a947..4257d81f 100644 --- a/tests/type/test_directives.py +++ b/tests/type/test_directives.py @@ -1,4 +1,5 @@ import pytest + from graphql.error import GraphQLError from graphql.language import DirectiveDefinitionNode, DirectiveLocation from graphql.type import GraphQLArgument, GraphQLDirective, GraphQLInt, GraphQLString diff --git a/tests/type/test_extensions.py b/tests/type/test_extensions.py index 5aa087e2..d28b9482 100644 --- a/tests/type/test_extensions.py +++ b/tests/type/test_extensions.py @@ -1,4 +1,5 @@ import pytest + from graphql.type import ( GraphQLArgument, GraphQLDirective, diff --git a/tests/type/test_introspection.py b/tests/type/test_introspection.py index 09a21c31..1a52f7a2 100644 --- a/tests/type/test_introspection.py +++ b/tests/type/test_introspection.py @@ -364,6 +364,17 @@ def executes_an_introspection_query(): "isDeprecated": False, "deprecationReason": None, }, + { + "name": "isOneOf", + "args": [], + "type": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": None, + }, + "isDeprecated": False, + "deprecationReason": None, + }, ], "inputFields": None, "interfaces": [], @@ -981,6 +992,12 @@ def executes_an_introspection_query(): } ], }, + { + "name": "oneOf", + "isRepeatable": False, + "locations": ["INPUT_OBJECT"], + "args": [], + }, ], } } @@ -1433,6 +1450,109 @@ def respects_the_include_deprecated_parameter_for_enum_values(): None, ) + def identifies_one_of_for_input_objects(): + schema = build_schema( + """ + input SomeInputObject @oneOf { + a: String + } + + input AnotherInputObject { + a: String + b: String + } + + type Query { + someField(someArg: SomeInputObject): String + anotherField(anotherArg: AnotherInputObject): String + } + """ + ) + + source = """ + { + oneOfInputObject: __type(name: "SomeInputObject") { + isOneOf + } + inputObject: __type(name: "AnotherInputObject") { + isOneOf + } + } + """ + + assert graphql_sync(schema=schema, source=source) == ( + { + "oneOfInputObject": { + "isOneOf": True, + }, + "inputObject": { + "isOneOf": False, + }, + }, + None, + ) + + def returns_null_for_one_of_for_other_types(): + schema = build_schema( + """ + type SomeObject implements SomeInterface { + fieldA: String + } + enum SomeEnum { + SomeObject + } + interface SomeInterface { + fieldA: String + } + union SomeUnion = SomeObject + type Query { + someField(enum: SomeEnum): SomeUnion + anotherField(enum: SomeEnum): SomeInterface + } + """ + ) + + source = """ + { + object: __type(name: "SomeObject") { + isOneOf + } + enum: __type(name: "SomeEnum") { + isOneOf + } + interface: __type(name: "SomeInterface") { + isOneOf + } + scalar: __type(name: "String") { + isOneOf + } + union: __type(name: "SomeUnion") { + isOneOf + } + } + """ + + assert graphql_sync(schema=schema, source=source) == ( + { + "object": { + "isOneOf": None, + }, + "enum": { + "isOneOf": None, + }, + "interface": { + "isOneOf": None, + }, + "scalar": { + "isOneOf": None, + }, + "union": { + "isOneOf": None, + }, + }, + None, + ) + def fails_as_expected_on_the_type_root_field_without_an_arg(): schema = build_schema( """ diff --git a/tests/type/test_predicate.py b/tests/type/test_predicate.py index bd006e74..c741eca3 100644 --- a/tests/type/test_predicate.py +++ b/tests/type/test_predicate.py @@ -1,6 +1,7 @@ from typing import Any import pytest + from graphql.language import DirectiveLocation from graphql.type import ( GraphQLArgument, diff --git a/tests/type/test_scalars.py b/tests/type/test_scalars.py index 27255388..0ef5e548 100644 --- a/tests/type/test_scalars.py +++ b/tests/type/test_scalars.py @@ -3,6 +3,7 @@ from typing import Any import pytest + from graphql.error import GraphQLError from graphql.language import parse_value as parse_value_to_ast from graphql.pyutils import Undefined diff --git a/tests/type/test_schema.py b/tests/type/test_schema.py index f589302b..e678de35 100644 --- a/tests/type/test_schema.py +++ b/tests/type/test_schema.py @@ -1,6 +1,7 @@ from copy import deepcopy import pytest + from graphql.language import ( DirectiveLocation, SchemaDefinitionNode, diff --git a/tests/type/test_validation.py b/tests/type/test_validation.py index eb4e2ab7..a4efe041 100644 --- a/tests/type/test_validation.py +++ b/tests/type/test_validation.py @@ -3,6 +3,7 @@ from operator import attrgetter import pytest + from graphql.language import DirectiveLocation, parse from graphql.pyutils import inspect from graphql.type import ( @@ -241,8 +242,7 @@ def rejects_a_schema_whose_query_root_type_is_not_an_object_type(): ) assert validate_schema(schema) == [ { - "message": "Query root type must be Object type," - " it cannot be Query.", + "message": "Query root type must be Object type, it cannot be Query.", "locations": [(2, 13)], } ] @@ -1593,6 +1593,49 @@ def rejects_with_relevant_locations_for_a_non_input_type(): ] +def describe_type_system_one_of_input_object_fields_must_be_nullable(): + def rejects_non_nullable_fields(): + schema = build_schema( + """ + type Query { + test(arg: SomeInputObject): String + } + + input SomeInputObject @oneOf { + a: String + b: String! + } + """ + ) + assert validate_schema(schema) == [ + { + "message": "OneOf input field SomeInputObject.b must be nullable.", + "locations": [(8, 18)], + } + ] + + def rejects_fields_with_default_values(): + schema = build_schema( + """ + type Query { + test(arg: SomeInputObject): String + } + + input SomeInputObject @oneOf { + a: String + b: String = "foo" + } + """ + ) + assert validate_schema(schema) == [ + { + "message": "OneOf input field SomeInputObject.b" + " cannot have a default value.", + "locations": [(8, 15)], + } + ] + + def describe_objects_must_adhere_to_interfaces_they_implement(): def accepts_an_object_which_implements_an_interface(): schema = build_schema( diff --git a/tests/utilities/test_ast_from_value.py b/tests/utilities/test_ast_from_value.py index 1432d7a4..947f2b18 100644 --- a/tests/utilities/test_ast_from_value.py +++ b/tests/utilities/test_ast_from_value.py @@ -1,6 +1,7 @@ from math import inf, nan import pytest + from graphql.error import GraphQLError from graphql.language import ( BooleanValueNode, diff --git a/tests/utilities/test_build_ast_schema.py b/tests/utilities/test_build_ast_schema.py index a0aefb1a..d0196bd7 100644 --- a/tests/utilities/test_build_ast_schema.py +++ b/tests/utilities/test_build_ast_schema.py @@ -7,6 +7,7 @@ from typing import Union import pytest + from graphql import graphql_sync from graphql.language import DocumentNode, InterfaceTypeDefinitionNode, parse, print_ast from graphql.type import ( @@ -22,6 +23,7 @@ GraphQLInputField, GraphQLInt, GraphQLNamedType, + GraphQLOneOfDirective, GraphQLSchema, GraphQLSkipDirective, GraphQLSpecifiedByDirective, @@ -237,14 +239,15 @@ def supports_descriptions(): ) assert cycle_sdl(sdl) == sdl - def maintains_include_skip_and_specified_by_url_directives(): + def maintains_include_skip_and_three_other_directives(): schema = build_schema("type Query") - assert len(schema.directives) == 4 + assert len(schema.directives) == 5 assert schema.get_directive("skip") is GraphQLSkipDirective assert schema.get_directive("include") is GraphQLIncludeDirective assert schema.get_directive("deprecated") is GraphQLDeprecatedDirective assert schema.get_directive("specifiedBy") is GraphQLSpecifiedByDirective + assert schema.get_directive("oneOf") is GraphQLOneOfDirective def overriding_directives_excludes_specified(): schema = build_schema( @@ -253,10 +256,11 @@ def overriding_directives_excludes_specified(): directive @include on FIELD directive @deprecated on FIELD_DEFINITION directive @specifiedBy on FIELD_DEFINITION + directive @oneOf on OBJECT """ ) - assert len(schema.directives) == 4 + assert len(schema.directives) == 5 get_directive = schema.get_directive assert get_directive("skip") is not GraphQLSkipDirective assert get_directive("skip") is not None @@ -266,19 +270,22 @@ def overriding_directives_excludes_specified(): assert get_directive("deprecated") is not None assert get_directive("specifiedBy") is not GraphQLSpecifiedByDirective assert get_directive("specifiedBy") is not None + assert get_directive("oneOf") is not GraphQLOneOfDirective + assert get_directive("oneOf") is not None - def adding_directives_maintains_include_skip_and_specified_by_directives(): + def adding_directives_maintains_include_skip_and_three_other_directives(): schema = build_schema( """ directive @foo(arg: Int) on FIELD """ ) - assert len(schema.directives) == 5 + assert len(schema.directives) == 6 assert schema.get_directive("skip") is GraphQLSkipDirective assert schema.get_directive("include") is GraphQLIncludeDirective assert schema.get_directive("deprecated") is GraphQLDeprecatedDirective assert schema.get_directive("specifiedBy") is GraphQLSpecifiedByDirective + assert schema.get_directive("oneOf") is GraphQLOneOfDirective assert schema.get_directive("foo") is not None def type_modifiers(): @@ -1133,7 +1140,7 @@ def can_build_invalid_schema(): assert errors def do_not_override_standard_types(): - # Note: not sure it's desired behaviour to just silently ignore override + # Note: not sure it's desired behavior to just silently ignore override # attempts so just documenting it here. schema = build_schema( @@ -1215,6 +1222,25 @@ def can_deep_copy_schema(): # check that printing the copied schema gives the same SDL assert print_schema(copied) == sdl + def can_deep_copy_schema_with_directive_using_args_of_custom_type(): + sdl = dedent(""" + directive @someDirective(someArg: SomeEnum) on FIELD_DEFINITION + + enum SomeEnum { + ONE + TWO + } + + type Query { + someField: String @someDirective(someArg: ONE) + } + """) + schema = build_schema(sdl) + copied = deepcopy(schema) + # custom directives on field definitions cannot be reproduced + expected_sdl = sdl.replace(" @someDirective(someArg: ONE)", "") + assert print_schema(copied) == expected_sdl + def can_pickle_and_unpickle_star_wars_schema(): # create a schema from the star wars SDL schema = build_schema(sdl, assume_valid_sdl=True) @@ -1246,7 +1272,7 @@ def can_deep_copy_pickled_schema(): # check that printing the copied schema gives the same SDL assert print_schema(copied) == sdl - @pytest.mark.slow() + @pytest.mark.slow def describe_deepcopy_and_pickle_big(): # pragma: no cover @pytest.mark.timeout(20) def can_deep_copy_big_schema(big_schema_sdl): # noqa: F811 diff --git a/tests/utilities/test_build_client_schema.py b/tests/utilities/test_build_client_schema.py index 518fb5bf..1455f473 100644 --- a/tests/utilities/test_build_client_schema.py +++ b/tests/utilities/test_build_client_schema.py @@ -1,6 +1,7 @@ -from typing import cast +from typing import TYPE_CHECKING, cast import pytest + from graphql import graphql_sync from graphql.type import ( GraphQLArgument, @@ -22,14 +23,16 @@ introspection_from_schema, print_schema, ) -from graphql.utilities.get_introspection_query import ( - IntrospectionEnumType, - IntrospectionInputObjectType, - IntrospectionInterfaceType, - IntrospectionObjectType, - IntrospectionType, - IntrospectionUnionType, -) + +if TYPE_CHECKING: + from graphql.utilities.get_introspection_query import ( + IntrospectionEnumType, + IntrospectionInputObjectType, + IntrospectionInterfaceType, + IntrospectionObjectType, + IntrospectionType, + IntrospectionUnionType, + ) from ..utils import dedent @@ -714,7 +717,9 @@ def throws_when_missing_definition_for_one_of_the_standard_scalars(): def throws_when_type_reference_is_missing_name(): introspection = introspection_from_schema(dummy_schema) - query_type = cast(IntrospectionType, introspection["__schema"]["queryType"]) + query_type = cast( + "IntrospectionType", introspection["__schema"]["queryType"] + ) assert query_type["name"] == "Query" del query_type["name"] # type: ignore @@ -744,7 +749,7 @@ def throws_when_missing_kind(): def throws_when_missing_interfaces(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -766,7 +771,7 @@ def throws_when_missing_interfaces(): def legacy_support_for_interfaces_with_null_as_interfaces_field(): introspection = introspection_from_schema(dummy_schema) some_interface_introspection = cast( - IntrospectionInterfaceType, + "IntrospectionInterfaceType", next( type_ for type_ in introspection["__schema"]["types"] @@ -783,7 +788,7 @@ def legacy_support_for_interfaces_with_null_as_interfaces_field(): def throws_when_missing_fields(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -805,7 +810,7 @@ def throws_when_missing_fields(): def throws_when_missing_field_args(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -827,7 +832,7 @@ def throws_when_missing_field_args(): def throws_when_output_type_is_used_as_an_arg_type(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -851,7 +856,7 @@ def throws_when_output_type_is_used_as_an_arg_type(): def throws_when_output_type_is_used_as_an_input_value_type(): introspection = introspection_from_schema(dummy_schema) input_object_type_introspection = cast( - IntrospectionInputObjectType, + "IntrospectionInputObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -875,7 +880,7 @@ def throws_when_output_type_is_used_as_an_input_value_type(): def throws_when_input_type_is_used_as_a_field_type(): introspection = introspection_from_schema(dummy_schema) query_type_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -899,7 +904,7 @@ def throws_when_input_type_is_used_as_a_field_type(): def throws_when_missing_possible_types(): introspection = introspection_from_schema(dummy_schema) some_union_introspection = cast( - IntrospectionUnionType, + "IntrospectionUnionType", next( type_ for type_ in introspection["__schema"]["types"] @@ -920,7 +925,7 @@ def throws_when_missing_possible_types(): def throws_when_missing_enum_values(): introspection = introspection_from_schema(dummy_schema) some_enum_introspection = cast( - IntrospectionEnumType, + "IntrospectionEnumType", next( type_ for type_ in introspection["__schema"]["types"] @@ -941,7 +946,7 @@ def throws_when_missing_enum_values(): def throws_when_missing_input_fields(): introspection = introspection_from_schema(dummy_schema) some_input_object_introspection = cast( - IntrospectionInputObjectType, + "IntrospectionInputObjectType", next( type_ for type_ in introspection["__schema"]["types"] @@ -990,11 +995,11 @@ def throws_when_missing_directive_args(): build_client_schema(introspection) def describe_very_deep_decorators_are_not_supported(): - def fails_on_very_deep_lists_more_than_7_levels(): + def fails_on_very_deep_lists_more_than_8_levels(): schema = build_schema( """ type Query { - foo: [[[[[[[[String]]]]]]]] + foo: [[[[[[[[[[String]]]]]]]]]] } """ ) @@ -1009,11 +1014,11 @@ def fails_on_very_deep_lists_more_than_7_levels(): " Decorated type deeper than introspection query." ) - def fails_on_a_very_deep_non_null_more_than_7_levels(): + def fails_on_a_very_deep_more_than_8_levels_non_null(): schema = build_schema( """ type Query { - foo: [[[[String!]!]!]!] + foo: [[[[[String!]!]!]!]!] } """ ) @@ -1028,12 +1033,12 @@ def fails_on_a_very_deep_non_null_more_than_7_levels(): " Decorated type deeper than introspection query." ) - def succeeds_on_deep_types_less_or_equal_7_levels(): - # e.g., fully non-null 3D matrix + def succeeds_on_deep_less_or_equal_8_levels_types(): + # e.g., fully non-null 4D matrix sdl = dedent( """ type Query { - foo: [[[String!]!]!]! + foo: [[[[String!]!]!]!]! } """ ) @@ -1054,7 +1059,7 @@ def recursive_interfaces(): schema = build_schema(sdl, assume_valid=True) introspection = introspection_from_schema(schema) foo_introspection = cast( - IntrospectionObjectType, + "IntrospectionObjectType", next( type_ for type_ in introspection["__schema"]["types"] diff --git a/tests/utilities/test_coerce_input_value.py b/tests/utilities/test_coerce_input_value.py index 61b1feab..5882e5c5 100644 --- a/tests/utilities/test_coerce_input_value.py +++ b/tests/utilities/test_coerce_input_value.py @@ -1,9 +1,10 @@ from __future__ import annotations -from math import nan +from math import isnan, nan from typing import Any, NamedTuple import pytest + from graphql.error import GraphQLError from graphql.pyutils import Undefined from graphql.type import ( @@ -90,7 +91,7 @@ def returns_no_error_for_null_result(): def returns_no_error_for_nan_result(): result = _coerce_value({"value": nan}, TestScalar) - assert expect_value(result) is nan + assert isnan(expect_value(result)) def returns_an_error_for_undefined_result(): result = _coerce_value({"value": Undefined}, TestScalar) @@ -250,6 +251,99 @@ def transforms_values_with_out_type(): result = _coerce_value({"real": 1, "imag": 2}, ComplexInputObject) assert expect_value(result) == 1 + 2j + def describe_for_graphql_input_object_that_is_one_of(): + TestInputObject = GraphQLInputObjectType( + "TestInputObject", + { + "foo": GraphQLInputField(GraphQLInt), + "bar": GraphQLInputField(GraphQLInt), + }, + is_one_of=True, + ) + + def returns_no_error_for_a_valid_input(): + result = _coerce_value({"foo": 123}, TestInputObject) + assert expect_value(result) == {"foo": 123} + + def returns_an_error_if_more_than_one_field_is_specified(): + result = _coerce_value({"foo": 123, "bar": None}, TestInputObject) + assert expect_errors(result) == [ + ( + "Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + [], + {"foo": 123, "bar": None}, + ) + ] + + def returns_an_error_if_the_one_field_is_null(): + result = _coerce_value({"bar": None}, TestInputObject) + assert expect_errors(result) == [ + ( + "Field 'bar' must be non-null.", + ["bar"], + None, + ) + ] + + def returns_an_error_for_an_invalid_field(): + result = _coerce_value({"foo": nan}, TestInputObject) + assert expect_errors(result) == [ + ( + "Int cannot represent non-integer value: nan", + ["foo"], + nan, + ) + ] + + def returns_multiple_errors_for_multiple_invalid_fields(): + result = _coerce_value({"foo": "abc", "bar": "def"}, TestInputObject) + assert expect_errors(result) == [ + ( + "Int cannot represent non-integer value: 'abc'", + ["foo"], + "abc", + ), + ( + "Int cannot represent non-integer value: 'def'", + ["bar"], + "def", + ), + ( + "Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + [], + {"foo": "abc", "bar": "def"}, + ), + ] + + def returns_an_error_for_an_unknown_field(): + result = _coerce_value({"foo": 123, "unknownField": 123}, TestInputObject) + assert expect_errors(result) == [ + ( + "Field 'unknownField' is not defined by type 'TestInputObject'.", + [], + {"foo": 123, "unknownField": 123}, + ) + ] + + def returns_an_error_for_a_misspelled_field(): + result = _coerce_value({"bart": 123}, TestInputObject) + assert expect_errors(result) == [ + ( + "Field 'bart' is not defined by type 'TestInputObject'." + " Did you mean 'bar'?", + [], + {"bart": 123}, + ), + ( + "Exactly one key must be specified" + " for OneOf type 'TestInputObject'.", + [], + {"bart": 123}, + ), + ] + def describe_for_graphql_input_object_with_default_value(): def _get_test_input_object(default_value): return GraphQLInputObjectType( @@ -277,7 +371,7 @@ def returns_nan_as_value(): result = _coerce_value({}, _get_test_input_object(nan)) result_value = expect_value(result) assert "foo" in result_value - assert result_value["foo"] is nan + assert isnan(result_value["foo"]) def describe_for_graphql_list(): TestList = GraphQLList(GraphQLInt) diff --git a/tests/utilities/test_extend_schema.py b/tests/utilities/test_extend_schema.py index 75c70efd..1eb98d38 100644 --- a/tests/utilities/test_extend_schema.py +++ b/tests/utilities/test_extend_schema.py @@ -3,6 +3,7 @@ from typing import Union import pytest + from graphql import graphql_sync from graphql.language import parse, print_ast from graphql.type import ( @@ -1362,8 +1363,7 @@ def does_not_allow_replacing_a_default_directive(): with pytest.raises(TypeError) as exc_info: extend_schema(schema, extend_ast) assert str(exc_info.value).startswith( - "Directive '@include' already exists in the schema." - " It cannot be redefined." + "Directive '@include' already exists in the schema. It cannot be redefined." ) def does_not_allow_replacing_an_existing_enum_value(): diff --git a/tests/utilities/test_find_breaking_changes.py b/tests/utilities/test_find_breaking_changes.py index c9003a6c..bfcc7e72 100644 --- a/tests/utilities/test_find_breaking_changes.py +++ b/tests/utilities/test_find_breaking_changes.py @@ -1,6 +1,7 @@ from graphql.type import ( GraphQLDeprecatedDirective, GraphQLIncludeDirective, + GraphQLOneOfDirective, GraphQLSchema, GraphQLSkipDirective, GraphQLSpecifiedByDirective, @@ -754,8 +755,7 @@ def should_detect_all_breaking_changes(): ), ( BreakingChangeType.TYPE_CHANGED_KIND, - "TypeThatChangesType changed from an Object type to an" - " Interface type.", + "TypeThatChangesType changed from an Object type to an Interface type.", ), ( BreakingChangeType.FIELD_REMOVED, @@ -817,6 +817,7 @@ def should_detect_if_a_directive_was_implicitly_removed(): GraphQLSkipDirective, GraphQLIncludeDirective, GraphQLSpecifiedByDirective, + GraphQLOneOfDirective, ] ) diff --git a/tests/utilities/test_introspection_from_schema.py b/tests/utilities/test_introspection_from_schema.py index 895ade9a..1c9dbd52 100644 --- a/tests/utilities/test_introspection_from_schema.py +++ b/tests/utilities/test_introspection_from_schema.py @@ -3,6 +3,7 @@ from copy import deepcopy import pytest + from graphql.type import GraphQLField, GraphQLObjectType, GraphQLSchema, GraphQLString from graphql.utilities import ( IntrospectionQuery, @@ -105,7 +106,7 @@ def can_deep_copy_pickled_schema(): # check that introspecting the copied schema gives the same result assert introspection_from_schema(copied) == introspected_schema - @pytest.mark.slow() + @pytest.mark.slow def describe_deepcopy_and_pickle_big(): # pragma: no cover @pytest.mark.timeout(20) def can_deep_copy_big_schema(big_schema_sdl): # noqa: F811 diff --git a/tests/utilities/test_print_schema.py b/tests/utilities/test_print_schema.py index 1939ed59..0f5f35b2 100644 --- a/tests/utilities/test_print_schema.py +++ b/tests/utilities/test_print_schema.py @@ -514,6 +514,22 @@ def prints_input_type(): """ ) + def prints_input_type_with_one_of_directive(): + input_type = GraphQLInputObjectType( + name="InputType", + fields={"int": GraphQLInputField(GraphQLInt)}, + is_one_of=True, + ) + + schema = GraphQLSchema(types=[input_type]) + assert expect_printed_schema(schema) == dedent( + """ + input InputType @oneOf { + int: Int + } + """ + ) + def prints_custom_scalar(): odd_type = GraphQLScalarType(name="Odd") @@ -555,7 +571,7 @@ def prints_enum(): def prints_empty_types(): schema = GraphQLSchema( types=[ - GraphQLEnumType("SomeEnum", cast(Dict[str, Any], {})), + GraphQLEnumType("SomeEnum", cast("Dict[str, Any]", {})), GraphQLInputObjectType("SomeInputObject", {}), GraphQLInterfaceType("SomeInterface", {}), GraphQLObjectType("SomeObject", {}), @@ -765,12 +781,17 @@ def prints_introspection_schema(): reason: String = "No longer supported" ) on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE - """Exposes a URL that specifies the behaviour of this scalar.""" + """Exposes a URL that specifies the behavior of this scalar.""" directive @specifiedBy( - """The URL that specifies the behaviour of this scalar.""" + """The URL that specifies the behavior of this scalar.""" url: String! ) on SCALAR + """ + Indicates exactly one field must be supplied and this field must not be `null`. + """ + directive @oneOf on INPUT_OBJECT + """ A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations. """ @@ -813,6 +834,7 @@ def prints_introspection_schema(): enumValues(includeDeprecated: Boolean = false): [__EnumValue!] inputFields(includeDeprecated: Boolean = false): [__InputValue!] ofType: __Type + isOneOf: Boolean } """An enum describing what kind of type a given `__Type` is.""" diff --git a/tests/utilities/test_strip_ignored_characters.py b/tests/utilities/test_strip_ignored_characters.py index d708bfdb..cdc6062d 100644 --- a/tests/utilities/test_strip_ignored_characters.py +++ b/tests/utilities/test_strip_ignored_characters.py @@ -1,6 +1,7 @@ from __future__ import annotations import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import Lexer, Source, TokenKind, parse from graphql.utilities import strip_ignored_characters diff --git a/tests/utilities/test_strip_ignored_characters_fuzz.py b/tests/utilities/test_strip_ignored_characters_fuzz.py index 85c43aec..4c276e07 100644 --- a/tests/utilities/test_strip_ignored_characters_fuzz.py +++ b/tests/utilities/test_strip_ignored_characters_fuzz.py @@ -3,6 +3,7 @@ from json import dumps import pytest + from graphql.error import GraphQLSyntaxError from graphql.language import Lexer, Source, TokenKind from graphql.utilities import strip_ignored_characters @@ -74,7 +75,7 @@ def lex_value(s: str) -> str | None: def describe_strip_ignored_characters(): - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_documents_with_random_combination_of_ignored_characters(): for ignored in ignored_tokens: @@ -85,7 +86,7 @@ def strips_documents_with_random_combination_of_ignored_characters(): ExpectStripped("".join(ignored_tokens)).to_equal("") - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_random_leading_and_trailing_ignored_tokens(): for token in punctuator_tokens + non_punctuator_tokens: @@ -100,7 +101,7 @@ def strips_random_leading_and_trailing_ignored_tokens(): ExpectStripped("".join(ignored_tokens) + token).to_equal(token) ExpectStripped(token + "".join(ignored_tokens)).to_equal(token) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_random_ignored_tokens_between_punctuator_tokens(): for left in punctuator_tokens: @@ -117,7 +118,7 @@ def strips_random_ignored_tokens_between_punctuator_tokens(): left + right ) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_random_ignored_tokens_between_punctuator_and_non_punctuator_tokens(): for non_punctuator in non_punctuator_tokens: @@ -136,7 +137,7 @@ def strips_random_ignored_tokens_between_punctuator_and_non_punctuator_tokens(): punctuator + "".join(ignored_tokens) + non_punctuator ).to_equal(punctuator + non_punctuator) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def strips_random_ignored_tokens_between_non_punctuator_and_punctuator_tokens(): for non_punctuator in non_punctuator_tokens: @@ -159,7 +160,7 @@ def strips_random_ignored_tokens_between_non_punctuator_and_punctuator_tokens(): non_punctuator + "".join(ignored_tokens) + punctuator ).to_equal(non_punctuator + punctuator) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def replace_random_ignored_tokens_between_non_punctuator_and_spread_with_space(): for non_punctuator in non_punctuator_tokens: @@ -177,7 +178,7 @@ def replace_random_ignored_tokens_between_non_punctuator_and_spread_with_space() non_punctuator + " ..." ) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def replace_random_ignored_tokens_between_non_punctuator_tokens_with_space(): for left in non_punctuator_tokens: @@ -194,7 +195,7 @@ def replace_random_ignored_tokens_between_non_punctuator_tokens_with_space(): left + " " + right ) - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def does_not_strip_random_ignored_tokens_embedded_in_the_string(): for ignored in ignored_tokens: @@ -205,7 +206,7 @@ def does_not_strip_random_ignored_tokens_embedded_in_the_string(): ExpectStripped(dumps("".join(ignored_tokens))).to_stay_the_same() - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(10) def does_not_strip_random_ignored_tokens_embedded_in_the_block_string(): ignored_tokens_without_formatting = [ @@ -226,7 +227,7 @@ def does_not_strip_random_ignored_tokens_embedded_in_the_block_string(): '"""|' + "".join(ignored_tokens_without_formatting) + '|"""' ).to_stay_the_same() - @pytest.mark.slow() + @pytest.mark.slow @pytest.mark.timeout(80) def strips_ignored_characters_inside_random_block_strings(): # Testing with length >7 is taking exponentially more time. However it is diff --git a/tests/utilities/test_type_from_ast.py b/tests/utilities/test_type_from_ast.py index 282c8f50..fa75a9f9 100644 --- a/tests/utilities/test_type_from_ast.py +++ b/tests/utilities/test_type_from_ast.py @@ -1,4 +1,5 @@ import pytest + from graphql.language import TypeNode, parse_type from graphql.type import GraphQLList, GraphQLNonNull, GraphQLObjectType from graphql.utilities import type_from_ast diff --git a/tests/utilities/test_type_info.py b/tests/utilities/test_type_info.py index d23b878b..01f7e464 100644 --- a/tests/utilities/test_type_info.py +++ b/tests/utilities/test_type_info.py @@ -375,8 +375,7 @@ def leave(*args): assert print_ast(edited_ast) == print_ast( parse( - "{ human(id: 4) { name, pets { __typename } }," - " alien { __typename } }" + "{ human(id: 4) { name, pets { __typename } }, alien { __typename } }" ) ) diff --git a/tests/utilities/test_value_from_ast.py b/tests/utilities/test_value_from_ast.py index f21abcc2..6622b4dc 100644 --- a/tests/utilities/test_value_from_ast.py +++ b/tests/utilities/test_value_from_ast.py @@ -174,6 +174,15 @@ def coerces_non_null_lists_of_non_null_values(): }, ) + test_one_of_input_obj = GraphQLInputObjectType( + "TestOneOfInput", + { + "a": GraphQLInputField(GraphQLString), + "b": GraphQLInputField(GraphQLString), + }, + is_one_of=True, + ) + def coerces_input_objects_according_to_input_coercion_rules(): assert _value_from("null", test_input_obj) is None assert _value_from("[]", test_input_obj) is Undefined @@ -193,6 +202,14 @@ def coerces_input_objects_according_to_input_coercion_rules(): ) assert _value_from("{ requiredBool: null }", test_input_obj) is Undefined assert _value_from("{ bool: true }", test_input_obj) is Undefined + assert _value_from('{ a: "abc" }', test_one_of_input_obj) == {"a": "abc"} + assert _value_from('{ b: "def" }', test_one_of_input_obj) == {"b": "def"} + assert _value_from('{ a: "abc", b: None }', test_one_of_input_obj) is Undefined + assert _value_from("{ a: null }", test_one_of_input_obj) is Undefined + assert _value_from("{ a: 1 }", test_one_of_input_obj) is Undefined + assert _value_from('{ a: "abc", b: "def" }', test_one_of_input_obj) is Undefined + assert _value_from("{}", test_one_of_input_obj) is Undefined + assert _value_from('{ c: "abc" }', test_one_of_input_obj) is Undefined def accepts_variable_values_assuming_already_coerced(): assert _value_from("$var", GraphQLBoolean, {}) is Undefined diff --git a/tests/utilities/test_value_from_ast_untyped.py b/tests/utilities/test_value_from_ast_untyped.py index 0461cc20..354cfe74 100644 --- a/tests/utilities/test_value_from_ast_untyped.py +++ b/tests/utilities/test_value_from_ast_untyped.py @@ -1,6 +1,6 @@ from __future__ import annotations -from math import nan +from math import isnan, nan from typing import Any from graphql.language import FloatValueNode, IntValueNode, parse_value @@ -14,8 +14,8 @@ def _compare_value(value: Any, expected: Any): assert value is None elif expected is Undefined: assert value is Undefined - elif expected is nan: - assert value is nan + elif isinstance(expected, float) and isnan(expected): + assert isnan(value) else: assert value == expected @@ -65,7 +65,7 @@ def parses_variables(): _expect_value_from_vars("$testVariable", None, Undefined) def parse_invalid_int_as_nan(): - assert value_from_ast_untyped(IntValueNode(value="invalid")) is nan + assert isnan(value_from_ast_untyped(IntValueNode(value="invalid"))) def parse_invalid_float_as_nan(): - assert value_from_ast_untyped(FloatValueNode(value="invalid")) is nan + assert isnan(value_from_ast_untyped(FloatValueNode(value="invalid"))) diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py index 6ae4a6e5..ea374993 100644 --- a/tests/utils/__init__.py +++ b/tests/utils/__init__.py @@ -8,8 +8,8 @@ from .viral_sdl import viral_sdl __all__ = [ - "assert_matching_values", "assert_equal_awaitables_or_values", + "assert_matching_values", "dedent", "gen_fuzz_strings", "viral_schema", diff --git a/tests/utils/assert_equal_awaitables_or_values.py b/tests/utils/assert_equal_awaitables_or_values.py index 8ed8d175..964db1a8 100644 --- a/tests/utils/assert_equal_awaitables_or_values.py +++ b/tests/utils/assert_equal_awaitables_or_values.py @@ -15,7 +15,7 @@ def assert_equal_awaitables_or_values(*items: T) -> T: """Check whether the items are the same and either all awaitables or all values.""" if all(is_awaitable(item) for item in items): - awaitable_items = cast(Tuple[Awaitable], items) + awaitable_items = cast("Tuple[Awaitable]", items) async def assert_matching_awaitables(): return assert_matching_values(*(await asyncio.gather(*awaitable_items))) diff --git a/tests/utils/test_assert_equal_awaitables_or_values.py b/tests/utils/test_assert_equal_awaitables_or_values.py index 214acfea..3e60fbcb 100644 --- a/tests/utils/test_assert_equal_awaitables_or_values.py +++ b/tests/utils/test_assert_equal_awaitables_or_values.py @@ -15,7 +15,7 @@ def does_not_throw_when_given_equal_values(): == test_value ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def does_not_throw_when_given_equal_awaitables(): async def test_value(): return {"test": "test"} @@ -27,7 +27,7 @@ async def test_value(): == await test_value() ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws_when_given_unequal_awaitables(): async def test_value(value): return value @@ -37,7 +37,7 @@ async def test_value(value): test_value({}), test_value({}), test_value({"test": "test"}) ) - @pytest.mark.asyncio() + @pytest.mark.asyncio async def throws_when_given_mixture_of_equal_values_and_awaitables(): async def test_value(): return {"test": "test"} diff --git a/tests/validation/harness.py b/tests/validation/harness.py index 1189e922..737fb2df 100644 --- a/tests/validation/harness.py +++ b/tests/validation/harness.py @@ -12,9 +12,9 @@ from graphql.validation import ASTValidationRule __all__ = [ - "test_schema", - "assert_validation_errors", "assert_sdl_validation_errors", + "assert_validation_errors", + "test_schema", ] test_schema = build_schema( @@ -86,6 +86,11 @@ stringListField: [String] } + input OneOfInput @oneOf { + stringField: String + intField: Int + } + type ComplicatedArgs { # TODO List # TODO Coercion @@ -100,6 +105,7 @@ stringListArgField(stringListArg: [String]): String stringListNonNullArgField(stringListNonNullArg: [String!]): String complexArgField(complexArg: ComplexInput): String + oneOfArgField(oneOfArg: OneOfInput): String multipleReqs(req1: Int!, req2: Int!): String nonNullFieldWithDefault(arg: Int! = 0): String multipleOpts(opt1: Int = 0, opt2: Int = 0): String diff --git a/tests/validation/test_validation.py b/tests/validation/test_validation.py index 37d57e9b..78efbce9 100644 --- a/tests/validation/test_validation.py +++ b/tests/validation/test_validation.py @@ -1,4 +1,5 @@ import pytest + from graphql.error import GraphQLError from graphql.language import parse from graphql.utilities import TypeInfo, build_schema @@ -70,8 +71,7 @@ def deprecated_validates_using_a_custom_type_info(): "Cannot query field 'human' on type 'QueryRoot'. Did you mean 'human'?", "Cannot query field 'meowsVolume' on type 'Cat'." " Did you mean 'meowsVolume'?", - "Cannot query field 'barkVolume' on type 'Dog'." - " Did you mean 'barkVolume'?", + "Cannot query field 'barkVolume' on type 'Dog'. Did you mean 'barkVolume'?", ] def validates_using_a_custom_rule(): diff --git a/tests/validation/test_values_of_correct_type.py b/tests/validation/test_values_of_correct_type.py index e19228aa..7cf20648 100644 --- a/tests/validation/test_values_of_correct_type.py +++ b/tests/validation/test_values_of_correct_type.py @@ -931,6 +931,29 @@ def full_object_with_fields_in_different_order(): """ ) + def describe_valid_one_of_input_object_value(): + def exactly_one_field(): + assert_valid( + """ + { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: "abc" }) + } + } + """ + ) + + def exactly_one_non_nullable_variable(): + assert_valid( + """ + query ($string: String!) { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: $string }) + } + } + """ + ) + def describe_invalid_input_object_value(): def partial_object_missing_required(): assert_errors( @@ -1097,6 +1120,77 @@ def allows_custom_scalar_to_accept_complex_literals(): schema=schema, ) + def describe_invalid_one_of_input_object_value(): + def invalid_field_type(): + assert_errors( + """ + { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: 2 }) + } + } + """, + [ + { + "message": "String cannot represent a non string value: 2", + "locations": [(4, 60)], + }, + ], + ) + + def exactly_one_null_field(): + assert_errors( + """ + { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: null }) + } + } + """, + [ + { + "message": "Field 'OneOfInput.stringField' must be non-null.", + "locations": [(4, 45)], + }, + ], + ) + + def exactly_one_nullable_variable(): + assert_errors( + """ + query ($string: String) { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: $string }) + } + } + """, + [ + { + "message": "Variable 'string' must be non-nullable to be used" + " for OneOf Input Object 'OneOfInput'.", + "locations": [(4, 45)], + }, + ], + ) + + def more_than_one_field(): + assert_errors( + """ + { + complicatedArgs { + oneOfArgField(oneOfArg: { stringField: "abc", intField: 123 }) + } + } + """, + [ + { + "message": "OneOf Input Object 'OneOfInput'" + " must specify exactly one key.", + "locations": [(4, 45)], + }, + ], + ) + def describe_directive_arguments(): def with_directives_of_valid_types(): assert_valid( diff --git a/tox.ini b/tox.ini index f32bcfff..685389e6 100644 --- a/tox.ini +++ b/tox.ini @@ -1,23 +1,24 @@ [tox] -envlist = py3{7,8,9,10,11,12}, pypy3{9,10}, ruff, mypy, docs +envlist = py3{7,8,9,10,11,12,13}, pypy3{9,10}, ruff, mypy, docs isolated_build = true [gh-actions] python = - 3: py311 + 3: py313 3.7: py37 3.8: py38 3.9: py39 3.10: py310 3.11: py311 3.12: py312 - pypy3: pypy9 + 3.13: py313 + pypy3: pypy39 pypy3.9: pypy39 pypy3.10: pypy310 [testenv:ruff] basepython = python3.12 -deps = ruff>=0.5.7,<0.6 +deps = ruff>=0.12,<0.13 commands = ruff check src tests ruff format --check src tests @@ -25,7 +26,7 @@ commands = [testenv:mypy] basepython = python3.12 deps = - mypy>=1.11,<2 + mypy>=1.16,<2 pytest>=8.3,<9 commands = mypy src tests @@ -33,8 +34,8 @@ commands = [testenv:docs] basepython = python3.12 deps = - sphinx>=7,<8 - sphinx_rtd_theme>=2.0,<3 + sphinx>=8,<9 + sphinx_rtd_theme>=3,<4 commands = sphinx-build -b html -nEW docs docs/_build/html @@ -42,13 +43,13 @@ commands = deps = pytest>=7.4,<9 pytest-asyncio>=0.21.1,<1 - pytest-benchmark>=4,<5 - pytest-cov>=4.1,<6 + pytest-benchmark>=4,<6 + pytest-cov>=4.1,<7 pytest-describe>=2.2,<3 - pytest-timeout>=2.3,<3 - py37,py38,py39,pypy39: typing-extensions>=4.7.1,<5 + pytest-timeout>=2.4,<3 + py3{7,8,9},pypy39: typing-extensions>=4.7.1,<5 commands = - # to also run the time-consuming tests: tox -e py311 -- --run-slow - # to run the benchmarks: tox -e py311 -- -k benchmarks --benchmark-enable - py37,py38,py39,py310,py311,pypy39,pypy310: pytest tests {posargs} + # to also run the time-consuming tests: tox -e py312 -- --run-slow + # to run the benchmarks: tox -e py312 -- -k benchmarks --benchmark-enable + py3{7,8,9,10,11,13},pypy3{9,10}: pytest tests {posargs} py312: pytest tests {posargs: --cov-report=term-missing --cov=graphql --cov=tests --cov-fail-under=100}