diff --git a/.all-contributorsrc b/.all-contributorsrc
index 610c8223..a625ed92 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -240,6 +240,96 @@
"contributions": [
"example"
]
+ },
+ {
+ "login": "FelixXu35",
+ "name": "Felix Xu",
+ "avatar_url": "https://avatars.githubusercontent.com/u/61252303?v=4",
+ "profile": "https://www.linkedin.com/in/felix-xu-16a153196/",
+ "contributions": [
+ "tutorial",
+ "code",
+ "test"
+ ]
+ },
+ {
+ "login": "hongyehu",
+ "name": "Hong-Ye Hu",
+ "avatar_url": "https://avatars.githubusercontent.com/u/50563225?v=4",
+ "profile": "https://scholar.harvard.edu/hongyehu/home",
+ "contributions": [
+ "doc"
+ ]
+ },
+ {
+ "login": "PeilinZHENG",
+ "name": "peilin",
+ "avatar_url": "https://avatars.githubusercontent.com/u/45784888?v=4",
+ "profile": "https://github.com/PeilinZHENG",
+ "contributions": [
+ "tutorial",
+ "code",
+ "test",
+ "doc"
+ ]
+ },
+ {
+ "login": "EmilianoG-byte",
+ "name": "Cristian Emiliano Godinez Ramirez",
+ "avatar_url": "https://avatars.githubusercontent.com/u/57567043?v=4",
+ "profile": "https://emilianog-byte.github.io",
+ "contributions": [
+ "code",
+ "test"
+ ]
+ },
+ {
+ "login": "ztzhu1",
+ "name": "ztzhu",
+ "avatar_url": "https://avatars.githubusercontent.com/u/111620128?v=4",
+ "profile": "https://github.com/ztzhu1",
+ "contributions": [
+ "code"
+ ]
+ },
+ {
+ "login": "royess",
+ "name": "Rabqubit",
+ "avatar_url": "https://avatars.githubusercontent.com/u/31059422?v=4",
+ "profile": "https://github.com/royess",
+ "contributions": [
+ "example"
+ ]
+ },
+ {
+ "login": "king-p3nguin",
+ "name": "Kazuki Tsuoka",
+ "avatar_url": "https://avatars.githubusercontent.com/u/103920010?v=4",
+ "profile": "https://github.com/king-p3nguin",
+ "contributions": [
+ "code",
+ "test",
+ "doc",
+ "example"
+ ]
+ },
+ {
+ "login": "Gopal-Dahale",
+ "name": "Gopal Ramesh Dahale",
+ "avatar_url": "https://avatars.githubusercontent.com/u/49199003?v=4",
+ "profile": "https://gopal-dahale.github.io/",
+ "contributions": [
+ "example"
+ ]
+ },
+ {
+ "login": "AbdullahKazi500",
+ "name": "Chanandellar Bong",
+ "avatar_url": "https://avatars.githubusercontent.com/u/75779966?v=4",
+ "profile": "https://github.com/AbdullahKazi500",
+ "contributions": [
+ "example"
+ ]
}
],
"contributorsPerLine": 6,
@@ -247,5 +337,6 @@
"repoType": "github",
"repoHost": "https://github.com",
"projectName": "tensorcircuit",
- "projectOwner": "tencent-quantum-lab"
+ "projectOwner": "tencent-quantum-lab",
+ "commitType": "docs"
}
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000..176a458f
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+* text=auto
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 7a0a7dff..964f9d67 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,7 +7,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-20.04, macos-latest] # macos-latest disabled to save quota
- python-version: [3.8]
+ python-version: ["3.10"]
fail-fast: false
steps:
- uses: actions/checkout@v2
diff --git a/.github/workflows/nightly_release.yml b/.github/workflows/nightly_release.yml
index ce62cc9c..8a1dbda8 100644
--- a/.github/workflows/nightly_release.yml
+++ b/.github/workflows/nightly_release.yml
@@ -18,7 +18,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
- python-version: 3.8
+ python-version: "3.10"
cache: "pip"
- name: install dependencies
run: |
diff --git a/.gitignore b/.gitignore
index c1649c55..c21b5efb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,3 +28,4 @@ examples/Unified AD model.ipynb
docs/source/locale/zh/LC_MESSAGES/textbook.po
docs/source/locale/zh/LC_MESSAGES/whitepapertoc_cn.po
docs/source/locale/zh/LC_MESSAGES/textbooktoc.po
+test.qasm
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
new file mode 100644
index 00000000..2acb1049
--- /dev/null
+++ b/.readthedocs.yaml
@@ -0,0 +1,24 @@
+# .readthedocs.yaml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+formats:
+ - pdf
+
+# Set the version of Python and other tools you might need
+build:
+ os: ubuntu-20.04
+ tools:
+ python: "3.8"
+
+# Build documentation in the docs/ directory with Sphinx
+sphinx:
+ configuration: docs/source/conf.py
+# We recommend specifying your dependencies to enable reproducible builds:
+# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
+python:
+ install:
+ - requirements: requirements/requirements-rtd.txt
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 52e40c05..3f072c9a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,134 @@
## Unreleased
+### Added
+
+- Add support for parameter expression in qiskit translation
+
+## 0.12.0
+
+### Added
+
+- Add translation of r gate from qiskit
+
+- Add `det` method at backends
+
+- Add fermion Gaussian state simulator in `fgs.py`
+
+- Add `partial_transpose` and `entanglement_negativity` method in `quantum.py`
+
+- Add `reduced_wavefunction` method in `quantum.py` to get reduced pure state
+
+### Changed
+
+- move ensemble module to applications/ai (breaking changes)
+
+- tc2qiskit now record qiskit measure with incremental clbit from 0
+
+### Fixed
+
+- Support degenerate eigenvalue for jax backend `eigh` method when using AD
+
+- Fixed `cu` gate translation from qiskit to avoid qiskit bug
+
+- Fixed jax refactoring (0.4.24) where SVD and QR return a namedtuple instead of a tuple
+
+- Fix qiskit<1.0 and tf<2.16
+
+## 0.11.0
+
+### Added
+
+- Add multiple GPU VQE examples using jax pmap
+
+- Add `with_prob` option to `general_kraus` so that the probability of each option can be returned together
+
+- Add benchmark example showcasing new way of implementing matrix product using vmap
+
+- Add keras3 example showcasing integration with tc
+
+- Add circuit copy method that avoid shallow copy issue `Circuit.copy()`
+
+- Add end to end infrastructures and methods for classical shadow in `shadows.py`
+
+- Add classical shadow tutorial
+
+- Add NN-VQE tutorial
+
+### Fixed
+
+- improve the `adaptive_vmap` to support internal jit and pytree output
+
+- fix `pauli_gates` dtype unchange issue when set new dtype (not recommend to use this attr anymore)
+
+- fix rem `apply_correction` bug when non-numpy backend is set
+
+- fix tf warning for `cast` with higher version of tf
+
+### Changed
+
+- The static method `BaseCircuit.copy` is renamed as `BaseCircuit.copy_nodes` (breaking changes)
+
+## 0.10.0
+
+### Added
+
+- `c.measure_instruction(*qubits)` now supports multiple ints specified at the same time
+
+- `c.expectation_ps()` now also supports `ps` argument directly (pauli structures)
+
+- Add tc version print in `tc.about()` method
+
+- tc now supports fancy batch indexing for gates, e.g. `c.rxx([0, 1, 2], [1, 2, 3], theta=K.ones([3]))`
+
+- Task management via group tag (when `submit_task` and `list_tasks`)
+
+- `batch_expectation_ps` now supports local device without topology and thus unify the interface for numerical exact simulation, numerical simulation with measurement shots and QPU experiments
+
+- introduce two stage compiling for `batch_expectation_ps` to save some compiling overhead
+
+- Add experimental support for ODE backend pulse level control simulation/analog quantum computing
+
+- make the pulse level control support differentiating the end time
+
+- Add new qem module with qem methods: zne, dd and rc
+
+### Fixed
+
+- `tc.results.counts.plot_histogram` now can dispatch kws to corresponding qiskit method
+
+- New implementation for `c.inverse()` to partially avoid unrecognized gate name issue
+
+- Fixed bug for `batch_expectation_ps` for jax backend
+
+- Partially fix the SVD numerical stability bug on tf backend when using `MPSCircuit`
+
+- List syntax for gate now supports range
+
+## 0.9.1
+
+### Added
+
+- Add `tc.TorchHardwarLayer` for shortcut layer construction of quantum hardware experiments
+
+- Add cotengra contractor setup shortcut
+
+- Add simplecompiler module to assite qiskit compile for better performance when targeting rz native basis
+
+### Changed
+
+- Add compiler and cloud namespace to the global tensorcircuit namespace
+
+- Refactor composed compiler pipeline interface to include simple_compiler, using `DefaultCompiler` for now (breaking)
+
+- Refactor `batch_submit_template` wrapper to make it a standard abstraction layer between tc cloud infras and `batch_expectation_ps` abstraction, providing another way to adpot other cloud providers with only `batch_submit_template` implemented
+
+### Fixed
+
+- `submit_task` return (list of dict vs dict) follows the data type of provided circuit instead of the number of circuits
+
+- Fix qubit mapping related bug when using `batch_expectation_ps` or `simple_compile`
+
## 0.9.0
### Added
diff --git a/LICENSE b/LICENSE
index d6456956..253d6ac1 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,202 +1,65 @@
+Tencent is pleased to support the open source community by making tensorcircuit available.
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+Copyright (C) 2025 THL A29 Limited, a Tencent company. All rights reserved.
+
+tensorcircuit is licensed under the Apache License Version 2.0.
+
+
+Terms of the Apache License Version 2.0:
+--------------------------------------------------------------------
+Apache License
+
+Version 2.0, January 2004
+
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
+
+"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of this License; and
+
+You must cause any modified files to carry prominent notices stating that You changed the files; and
+
+You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
+
+If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
+
+You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/README.md b/README.md
index 848de62c..c46b54c7 100644
--- a/README.md
+++ b/README.md
@@ -29,15 +29,15 @@
English | 简体中文
-TensorCircuit is the next generation of quantum circuit simulators with support for automatic differentiation, just-in-time compiling, hardware acceleration, and vectorized parallelism.
+TensorCircuit is the next generation of quantum software framework with support for automatic differentiation, just-in-time compiling, hardware acceleration, and vectorized parallelism.
-TensorCircuit is built on top of modern machine learning frameworks and is machine learning backend agnostic. It is specifically suitable for highly efficient simulations of quantum-classical hybrid paradigm and variational quantum algorithms.
+TensorCircuit is built on top of modern machine learning frameworks: Jax, TensorFlow, and PyTorch. It is specifically suitable for highly efficient simulations of quantum-classical hybrid paradigm and variational quantum algorithms in ideal, noisy and approximate cases. It also supports real quantum hardware access and provides CPU/GPU/QPU hybrid deployment solutions since v0.9.
## Getting Started
-Please begin with [Quick Start](/docs/source/quickstart.rst).
+Please begin with [Quick Start](/docs/source/quickstart.rst) in the [full documentation](https://tensorcircuit.readthedocs.io/).
-For more information and introductions, please refer to helpful [example scripts](/examples) and [full documentation](https://tensorcircuit.readthedocs.io/). API docstrings and test cases in [tests](/tests) are also informative.
+For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to 70+ [example scripts](/examples) and 30+ [tutorial notebooks](https://tensorcircuit.readthedocs.io/en/latest/#tutorials). API docstrings and test cases in [tests](/tests) are also informative.
The following are some minimal demos.
@@ -76,6 +76,57 @@ theta = tc.array_to_tensor(1.0)
print(g(theta))
```
+
+ More highlight features for TensorCircuit (click for details)
+
+- Sparse Hamiltonian generation and expectation evaluation:
+
+```python
+n = 6
+pauli_structures = []
+weights = []
+for i in range(n):
+ pauli_structures.append(tc.quantum.xyz2ps({"z": [i, (i + 1) % n]}, n=n))
+ weights.append(1.0)
+for i in range(n):
+ pauli_structures.append(tc.quantum.xyz2ps({"x": [i]}, n=n))
+ weights.append(-1.0)
+h = tc.quantum.PauliStringSum2COO(pauli_structures, weights)
+print(h)
+# BCOO(complex64[64, 64], nse=448)
+c = tc.Circuit(n)
+c.h(range(n))
+energy = tc.templates.measurements.operator_expectation(c, h)
+# -6
+```
+
+- Large-scale simulation with tensor network engine
+
+```python
+# tc.set_contractor("cotengra-30-10")
+n=500
+c = tc.Circuit(n)
+c.h(0)
+c.cx(range(n-1), range(1, n))
+c.expectation_ps(z=[0, n-1], reuse=False)
+```
+
+- Density matrix simulator and quantum info quantities
+
+```python
+c = tc.DMCircuit(2)
+c.h(0)
+c.cx(0, 1)
+c.depolarizing(1, px=0.1, py=0.1, pz=0.1)
+dm = c.state()
+print(tc.quantum.entropy(dm))
+print(tc.quantum.entanglement_entropy(dm, [0]))
+print(tc.quantum.entanglement_negativity(dm, [0]))
+print(tc.quantum.log_negativity(dm, [0]))
+```
+
+
+
## Install
The package is written in pure Python and can be obtained via pip as:
@@ -90,14 +141,7 @@ We recommend you install this package with tensorflow also installed as:
pip install tensorcircuit[tensorflow]
```
-Other optional dependencies include `[torch]`, `[jax]` and `[qiskit]`.
-
-For the nightly build of tensorcircuit with new features, try:
-
-```python
-pip uninstall tensorcircuit
-pip install tensorcircuit-nightly
-```
+Other optional dependencies include `[torch]`, `[jax]`, `[qiskit]` and `[cloud]`.
We also have [Docker support](/docker).
@@ -105,7 +149,9 @@ We also have [Docker support](/docker).
- Tensor network simulation engine based
-- JIT, AD, vectorized parallelism compatible, GPU support
+- JIT, AD, vectorized parallelism compatible
+
+- GPU support, quantum device access support, hybrid deployment support
- Efficiency
@@ -115,30 +161,93 @@ We also have [Docker support](/docker).
- Elegance
- - Flexibility: customized contraction, multiple ML backend/interface choices, multiple dtype precisions
+ - Flexibility: customized contraction, multiple ML backend/interface choices, multiple dtype precisions, multiple QPU providers
- API design: quantum for humans, less code, more power
+- Batteries included
+
+
+ Tons of amazing features and built in tools for research (click for details)
+
+ - Support **super large circuit simulation** using tensor network engine.
+
+ - Support **noisy simulation** with both Monte Carlo and density matrix (tensor network powered) modes.
+
+ - Support **approximate simulation** with MPS-TEBD modes.
+
+ - Support **analog/digital hybrid simulation** (time dependent Hamiltonian evolution, **pulse** level simulation) with neural ode modes.
+
+ - Support **Fermion Gaussian state** simulation with expectation, entanglement, measurement, ground state, real and imaginary time evolution.
+
+ - Support **qudits simulation**.
+
+ - Support **parallel** quantum circuit evaluation across **multiple GPUs**.
+
+ - Highly customizable **noise model** with gate error and scalable readout error.
+
+ - Support for **non-unitary** gate and post-selection simulation.
+
+ - Support **real quantum devices access** from different providers.
+
+ - **Scalable readout error mitigation** native to both bitstring and expectation level with automatic qubit mapping consideration.
+
+ - **Advanced quantum error mitigation methods** and pipelines such as ZNE, DD, RC, etc.
+
+ - Support **MPS/MPO** as representations for input states, quantum gates and observables to be measured.
+
+ - Support **vectorized parallelism** on circuit inputs, circuit parameters, circuit structures, circuit measurements and these vectorization can be nested.
+
+ - Gradients can be obtained with both **automatic differenation** and parameter shift (vmap accelerated) modes.
+
+ - **Machine learning interface/layer/model** abstraction in both TensorFlow and PyTorch for both numerical simulation and real QPU experiments.
+
+ - Circuit sampling supports both final state sampling and perfect sampling from tensor networks.
+
+ - Light cone reduction support for local expectation calculation.
+
+ - Highly customizable tensor network contraction path finder with opteinsum interface.
+
+ - Observables are supported in measurement, sparse matrix, dense matrix and MPO format.
+
+ - Super fast weighted sum Pauli string Hamiltonian matrix generation.
+
+ - Reusable common circuit/measurement/problem templates and patterns.
+
+ - Jittable classical shadow infrastructures.
+
+ - SOTA quantum algorithm and model implementations.
+
+ - Support hybrid workflows and pipelines with CPU/GPU/QPU hardware from local/cloud/hpc resources using tf/torch/jax/cupy/numpy frameworks all at the same time.
+
+
+
## Contributing
### Status
-This project is released by [Tencent Quantum Lab](https://quantum.tencent.com/) and is created and maintained by [Shi-Xin Zhang](https://github.com/refraction-ray) with current core authors [Shi-Xin Zhang](https://github.com/refraction-ray) and [Yu-Qin Chen](https://github.com/yutuer21). We also thank [contributions](https://github.com/tencent-quantum-lab/tensorcircuit/graphs/contributors) from the lab and the open source community.
+This project is created and maintained by [Shi-Xin Zhang](https://github.com/refraction-ray) with current core authors [Shi-Xin Zhang](https://github.com/refraction-ray) and [Yu-Qin Chen](https://github.com/yutuer21). We also thank [contributions](https://github.com/tencent-quantum-lab/tensorcircuit/graphs/contributors) from the open source community.
### Citation
-If this project helps in your research, please cite our software whitepaper published in Quantum:
+If this project helps in your research, please cite our software whitepaper to acknowledge the work put into the development of TensorCircuit.
-[TensorCircuit: a Quantum Software Framework for the NISQ Era](https://quantum-journal.org/papers/q-2023-02-02-912/)
+[TensorCircuit: a Quantum Software Framework for the NISQ Era](https://quantum-journal.org/papers/q-2023-02-02-912/) (published in Quantum)
which is also a good introduction to the software.
+Research works citing TensorCircuit can be highlighted in [Research and Applications section](https://github.com/tencent-quantum-lab/tensorcircuit#research-and-applications).
+
### Guidelines
For contribution guidelines and notes, see [CONTRIBUTING](/CONTRIBUTING.md).
We welcome [issues](https://github.com/tencent-quantum-lab/tensorcircuit/issues), [PRs](https://github.com/tencent-quantum-lab/tensorcircuit/pulls), and [discussions](https://github.com/tencent-quantum-lab/tensorcircuit/discussions) from everyone, and these are all hosted on GitHub.
+### License
+
+TensorCircuit is open source, released under the Apache License, Version 2.0.
+
### Contributors
@@ -173,6 +282,17 @@ We welcome [issues](https://github.com/tencent-quantum-lab/tensorcircuit/issues)
 隐公观鱼 💻 ⚠️ |
 WiuYuan 💡 |
+  Felix Xu ✅ 💻 ⚠️ |
+  Hong-Ye Hu 📖 |
+  peilin ✅ 💻 ⚠️ 📖 |
+  Cristian Emiliano Godinez Ramirez 💻 ⚠️ |
+
+
+  ztzhu 💻 |
+  Rabqubit 💡 |
+  Kazuki Tsuoka 💻 ⚠️ 📖 💡 |
+  Gopal Ramesh Dahale 💡 |
+  Chanandellar Bong 💡 |
@@ -194,19 +314,92 @@ We welcome [issues](https://github.com/tencent-quantum-lab/tensorcircuit/issues)
### DQAS
For the application of Differentiable Quantum Architecture Search, see [applications](/tensorcircuit/applications).
-Reference paper: https://arxiv.org/pdf/2010.08561.pdf (published in QST).
+
+Reference paper: https://arxiv.org/abs/2010.08561 (published in QST).
### VQNHE
For the application of Variational Quantum-Neural Hybrid Eigensolver, see [applications](/tensorcircuit/applications).
-Reference paper: https://arxiv.org/pdf/2106.05105.pdf (published in PRL) and https://arxiv.org/pdf/2112.10380.pdf.
-### VQEX - MBL
+Reference paper: https://arxiv.org/abs/2106.05105 (published in PRL) and https://arxiv.org/abs/2112.10380 (published in AQT).
+
+### VQEX-MBL
For the application of VQEX on MBL phase identification, see the [tutorial](/docs/source/tutorials/vqex_mbl.ipynb).
-Reference paper: https://arxiv.org/pdf/2111.13719.pdf (published in PRB).
-### Stark - DTC
+Reference paper: https://arxiv.org/abs/2111.13719 (published in PRB).
+
+### Stark-DTC
For the numerical demosntration of discrete time crystal enabled by Stark many-body localization, see the Floquet simulation [demo](/examples/timeevolution_trotter.py).
-Reference paper: https://arxiv.org/pdf/2208.02866.pdf (published in PRL).
+
+Reference paper: https://arxiv.org/abs/2208.02866 (published in PRL).
+
+### RA-Training
+
+For the numerical simulation of variational quantum algorithm training using random gate activation strategy by us, see the [project repo](https://github.com/ls-iastu/RAtraining).
+
+Reference paper: https://arxiv.org/abs/2303.08154 (published in PRR as a Letter).
+
+### TenCirChem
+
+[TenCirChem](https://github.com/tencent-quantum-lab/TenCirChem) is an efficient and versatile quantum computation package for molecular properties. TenCirChem is based on TensorCircuit and is optimized for chemistry applications.
+
+Reference paper: https://arxiv.org/abs/2303.10825 (published in JCTC).
+
+### EMQAOA-DARBO
+
+For the numerical simulation and hardware experiments with error mitigation on QAOA, see the [project repo](https://github.com/sherrylixuecheng/EMQAOA-DARBO).
+
+Reference paper: https://arxiv.org/abs/2303.14877 (published in Communications Physics).
+
+### NN-VQA
+
+For the setup and simulation code of neural network encoded variational quantum eigensolver, see the [demo](/docs/source/tutorials/nnvqe.ipynb).
+
+Reference paper: https://arxiv.org/abs/2308.01068 (published in PRApplied).
+
+### More works
+
+
+ More research works and code projects using TensorCircuit (click for details)
+
+- Neural Predictor based Quantum Architecture Search: https://arxiv.org/abs/2103.06524 (published in Machine Learning: Science and Technology).
+
+- Quantum imaginary-time control for accelerating the ground-state preparation: https://arxiv.org/abs/2112.11782 (published in PRR).
+
+- Efficient Quantum Simulation of Electron-Phonon Systems by Variational Basis State Encoder: https://arxiv.org/abs/2301.01442 (published in PRR).
+
+- Variational Quantum Simulations of Finite-Temperature Dynamical Properties via Thermofield Dynamics: https://arxiv.org/abs/2206.05571.
+
+- Understanding quantum machine learning also requires rethinking generalization: https://arxiv.org/abs/2306.13461 (published in Nature Communications).
+
+- Decentralized Quantum Federated Learning for Metaverse: Analysis, Design and Implementation: https://arxiv.org/abs/2306.11297. Code: https://github.com/s222416822/BQFL.
+
+- Non-IID quantum federated learning with one-shot communication complexity: https://arxiv.org/abs/2209.00768 (published in Quantum Machine Intelligence). Code: https://github.com/JasonZHM/quantum-fed-infer.
+
+- Quantum generative adversarial imitation learning: https://doi.org/10.1088/1367-2630/acc605 (published in New Journal of Physics).
+
+- GSQAS: Graph Self-supervised Quantum Architecture Search: https://arxiv.org/abs/2303.12381 (published in Physica A: Statistical Mechanics and its Applications).
+
+- Practical advantage of quantum machine learning in ghost imaging: https://www.nature.com/articles/s42005-023-01290-1 (published in Communications Physics).
+
+- Zero and Finite Temperature Quantum Simulations Powered by Quantum Magic: https://arxiv.org/abs/2308.11616.
+
+- Comparison of Quantum Simulators for Variational Quantum Search: A Benchmark Study: https://arxiv.org/abs/2309.05924.
+
+- Statistical analysis of quantum state learning process in quantum neural networks: https://arxiv.org/abs/2309.14980 (published in NeurIPS).
+
+- Generative quantum machine learning via denoising diffusion probabilistic models: https://arxiv.org/abs/2310.05866 (published in PRL).
+
+- Quantum imaginary time evolution and quantum annealing meet topological sector optimization: https://arxiv.org/abs/2310.04291.
+
+- Google Summer of Code 2023 Projects (QML4HEP): https://github.com/ML4SCI/QMLHEP, https://github.com/Gopal-Dahale/qgnn-hep, https://github.com/salcc/QuantumTransformers.
+
+- Absence of barren plateaus in finite local-depth circuits with long-range entanglement: https://arxiv.org/abs/2311.01393 (published in PRL).
+
+- Non-Markovianity benefits quantum dynamics simulation: https://arxiv.org/abs/2311.17622.
+
+
+
+If you want to highlight your research work or projects here, feel free to add by opening PR.
diff --git a/README_cn.md b/README_cn.md
index d22b37fd..8137eb85 100644
--- a/README_cn.md
+++ b/README_cn.md
@@ -25,15 +25,17 @@
English | 简体中文
-TensorCircuit 是下一代量子电路模拟器,支持自动微分、即时编译、硬件加速和向量并行化。
+TensorCircuit 是下一代量子软件框架,完美支持自动微分、即时编译、硬件加速和向量并行化。
-TensorCircuit 建立在现代机器学习框架之上,并且与机器学习后端无关。 它特别适用于量子经典混合范式和变分量子算法的高效模拟。
+TensorCircuit 建立在现代机器学习框架 Jax, TensorFlow, PyTorch 之上,支持机器学习后端无关的统一界面。 其特别适用于理想情况、含噪声情况及可控近似情况下,大规模量子经典混合范式和变分量子算法的高效模拟。
+
+TensorCircuit 现在支持真实量子硬件连接和实验,并提供优雅的 CPU/GPU/QPU 混合部署训练方案(v0.9+)。
## 入门
-请从 [快速上手](/docs/source/quickstart.rst) 和 [Jupyter 教程](/docs/source/tutorials) 开始。
+请从 [完整文档](https://tensorcircuit.readthedocs.io/zh/latest/) 中的 [快速上手](/docs/source/quickstart.rst) 开始。
-有关更多信息和介绍,请参阅有用的 [示例脚本](/examples) 和 [完整文档](https://tensorcircuit.readthedocs.io/zh/latest/)。 [测试](/tests) 中的 API docstring 和测试用例也提供了丰富的信息。
+有关软件用法,算法实现和工程范式演示的更多信息和介绍,请参阅 70+ [示例脚本](/examples) 和 30+ [案例教程](https://tensorcircuit.readthedocs.io/zh/latest/#tutorials)。 [测试](/tests) 用例和 API docstring 也提供了丰富的使用信息。
以下是一些最简易的演示。
@@ -50,7 +52,7 @@ print(c.expectation_ps(z=[0, 1]))
print(c.sample(allow_state=True, batch=1024, format="count_dict_bin"))
```
-- 运行时特性定制:
+- 运行时特性设置:
```python
tc.set_backend("tensorflow")
@@ -86,7 +88,7 @@ pip install tensorcircuit
pip install tensorcircuit[tensorflow]
```
-其他安装选项包括: `[torch]`, `[jax]` and `[qiskit]`。
+其他安装选项包括: `[torch]`, `[jax]`, `[qiskit]` 和 `[cloud]`。
此外我们有每日发布的最新版本 pip package,可以尝鲜开发的最新功能,请通过以下方式安装:
@@ -101,7 +103,9 @@ pip install tensorcircuit-nightly
- 基于张量网络模拟引擎
-- 即时编译、自动微分、向量并行化兼容,GPU 支持
+- 即时编译、自动微分、向量并行化兼容
+
+- GPU 支持、量子硬件支持、混合部署方案支持
- 效率
@@ -111,7 +115,7 @@ pip install tensorcircuit-nightly
- 优雅
- - 灵活性:自定义张量收缩、多种 ML 后端/接口选择、多种数值精度
+ - 灵活性:自定义张量收缩、多种 ML 后端/接口选择、多种数值精度、多种量子硬件
- API 设计:人类可理解的量子,更少的代码,更多的可能
@@ -119,11 +123,11 @@ pip install tensorcircuit-nightly
### 现况
-该项目由[腾讯量子实验室](https://quantum.tencent.com/)发布,由 [Shi-Xin Zhang](https://github.com/refraction-ray) 创造并维护。当前核心作者包括 [Shi-Xin Zhang](https://github.com/refraction-ray) 和 [Yu-Qin Chen](https://github.com/yutuer21)。我们也感谢来自实验室和开源社区的[贡献](https://github.com/tencent-quantum-lab/tensorcircuit/graphs/contributors)。
+该项目由 [Shi-Xin Zhang](https://github.com/refraction-ray) 创造并维护。当前核心作者包括 [Shi-Xin Zhang](https://github.com/refraction-ray) 和 [Yu-Qin Chen](https://github.com/yutuer21)。我们也感谢来自开源社区的[贡献](https://github.com/tencent-quantum-lab/tensorcircuit/graphs/contributors)。
### 引用
-如果该软件对您的研究有帮助, 请引用我们发表在 Quantum 期刊的白皮书文章:
+如果该软件对您的研究有帮助, 请引用我们发表在 Quantum 期刊的白皮书文章来支持我们的研发付出。
[TensorCircuit: a Quantum Software Framework for the NISQ Era](https://quantum-journal.org/papers/q-2023-02-02-912/).
@@ -133,24 +137,56 @@ pip install tensorcircuit-nightly
我们欢迎大家提出 [issues](https://github.com/tencent-quantum-lab/tensorcircuit/issues), [PR](https://github.com/tencent-quantum-lab/tensorcircuit/pulls), 和 [讨论](https://github.com/tencent-quantum-lab/tensorcircuit/discussions),这些都托管在 GitHub 上。
+### 协议
+
+TensorCircuit 是基于 Apache License 2.0 的开源软件。
+
## 研究和应用
### DQAS
可微量子架构搜索的应用见 [应用](/tensorcircuit/applications)。
-参考论文:https://arxiv.org/pdf/2010.08561.pdf (QST)。
+
+参考论文:https://arxiv.org/abs/2010.08561 (QST)。
### VQNHE
关于变分量子神经混合本征求解器的应用,请参见 [应用](tensorcircuit/applications)。
-参考论文:https://arxiv.org/pdf/2106.05105.pdf (PRL) 和 https://arxiv.org/pdf/2112.10380.pdf 。
-### VQEX - MBL
+参考论文:https://arxiv.org/abs/2106.05105 (PRL) 和 https://arxiv.org/abs/2112.10380 。
+
+### VQEX-MBL
VQEX 在 MBL 相位识别上的应用见 [教程](/docs/source/tutorials/vqex_mbl.ipynb)。
-参考论文: https://arxiv.org/pdf/2111.13719.pdf (PRB)。
-### Stark - DTC
+参考论文: https://arxiv.org/abs/2111.13719 (PRB)。
+
+### Stark-DTC
+
+数值验证 Stark 多体局域化稳定的离散时间晶体,类似的 Floquet 系统模拟请参考 [例子](/examples/timeevolution_trotter.py)。
+
+参考论文: https://arxiv.org/abs/2208.02866 (PRL)。
+
+### RA-Training
+
+利用我们提出的随机量子门激活策略训练优化变分量子算法的实现请参考 [项目](https://github.com/ls-iastu/RAtraining).
+
+参考论文: https://arxiv.org/abs/2303.08154。
+
+### TenCirChem
+
+[TenCirChem](https://github.com/tencent-quantum-lab/TenCirChem) 是高效的,专注于处理和计算分子性质的量子计算软件。其基于 TensorCircuit 并为量子化学任务进行了专门的优化。
+
+参考论文: https://arxiv.org/abs/2303.10825 (JCTC)。
+
+### EMQAOA-DARBO
+
+数值模拟和带错误消除的真实量子硬件实验验证 QAOA 优化的代码请参考 [项目](https://github.com/sherrylixuecheng/EMQAOA-DARBO)。
+
+参考论文: https://arxiv.org/abs/2303.14877。
+
+### NN-VQA
+
+关于神经网络编码的变分量子算法的实现和工作流, 见 [教程](/docs/source/tutorials/nnvqe.ipynb)。
-数值验证 Stark 多体局域化稳定的离散时间晶体,类似的 Floquet 系统模拟请参考 [例子](/examples/timeevolution_trotter.py).
-参考论文: https://arxiv.org/pdf/2208.02866.pdf (PRL).
+参考论文: https://arxiv.org/abs/2308.01068。
diff --git a/check_all.sh b/check_all.sh
old mode 100755
new mode 100644
diff --git a/docker/Dockerfile_v2 b/docker/Dockerfile_v2
new file mode 100644
index 00000000..9f2f4523
--- /dev/null
+++ b/docker/Dockerfile_v2
@@ -0,0 +1,37 @@
+FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu20.04
+# nvidia/cuda:11.6.0-cudnn8-devel-ubuntu20.04
+
+RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y \
+ wget \
+ git \
+ vim \
+ pandoc
+
+RUN wget -q -P /tmp \
+ https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
+ && bash /tmp/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda \
+ && rm /tmp/Miniconda3-latest-Linux-x86_64.sh
+
+ENV PATH="/opt/conda/bin:$PATH"
+
+RUN conda install -y \
+ pip \
+ python=3.10
+
+COPY requirements/requirements-docker-v2.txt /requirements-docker-v2.txt
+
+# RUN pip install -r /requirements-docker-v2.txt -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
+RUN pip install -i https://pypi.tuna.tsinghua.edu.cn/simple -r /requirements-docker-v2.txt -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
+
+# RUN pip install nvidia-cudnn-cu11==8.6.0.163 ray
+RUN pip install -i https://pypi.tuna.tsinghua.edu.cn/simple nvidia-cudnn-cu11==8.6.0.163 ray
+
+RUN pip install tensorcircuit
+
+# requirements conflict for ray
+# jax must have cudnn>8.6 otherwise fail when init array on gpu,
+# while torch insists cudnn 8.5 in setup but 8.6 can also work for torch
+
+RUN echo export TF_CPP_MIN_LOG_LEVEL=3 >> ~/.bashrc
+
+CMD ["/bin/bash"]
\ No newline at end of file
diff --git a/docker/README.md b/docker/README.md
index 4b011c1b..1c5aea0d 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -4,22 +4,26 @@ Run the following command to build the docker for tensorcircuit at parent path:
sudo docker build . -f docker/Dockerfile -t tensorcircuit
```
-One can also pull the [official image](https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit) from DockerHub as
+Since v0.10 we introduce new docker env based on ubuntu20.04+cuda11.7+py3.10 (+ pip installed tensorcircuit package), build the new docker use
```bash
-sudo docker pull tensorcircuit/tensorcircuit
+sudo docker build . -f docker/Dockerfile_v2 -t tensorcircuit
```
+One can also pull the [official image](https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit) from DockerHub as
+
+```bash
+sudo docker pull tensorcircuit/tensorcircuit
+```
Run the docker container by the following command:
```bash
sudo docker run -it --network host --gpus all tensorcircuit
-# if one also wants mount local source code, also add args `-v "$(pwd)":/app`
+# if one also wants to mount local source code, also add args `-v "$(pwd)":/root`
-# for old dockerfile with no runtime env setting
-# sudo docker run -it --network host -e LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.0/targets/x86_64-linux/lib -e PYTHONPATH=/app -v "$(pwd)":/app --gpus all tensorcircuit
+# using tensorcircuit/tensorcircuit:latest to run the prebuild docker image from dockerhub
```
-`export TF_CPP_MIN_LOG_LEVEL=3` maybe necessary since jax suprisingly frequently complain about ptxas version problem. And `export CUDA_VISIBLE_DEVICES=-1` if you want to test only on CPU.
+`export CUDA_VISIBLE_DEVICES=-1` if you want to test only on CPU.
diff --git a/docs/source/api/about.rst b/docs/source/api/about.rst
index 8f7bbf76..e065f1eb 100644
--- a/docs/source/api/about.rst
+++ b/docs/source/api/about.rst
@@ -1,5 +1,5 @@
tensorcircuit.about
-==================================================
+================================================================================
.. automodule:: tensorcircuit.about
:members:
:undoc-members:
diff --git a/docs/source/api/abstractcircuit.rst b/docs/source/api/abstractcircuit.rst
index 2caf0af1..3d67a499 100644
--- a/docs/source/api/abstractcircuit.rst
+++ b/docs/source/api/abstractcircuit.rst
@@ -1,5 +1,5 @@
tensorcircuit.abstractcircuit
-==================================================
+================================================================================
.. automodule:: tensorcircuit.abstractcircuit
:members:
:undoc-members:
diff --git a/docs/source/api/applications.rst b/docs/source/api/applications.rst
index ad329ccf..85c31126 100644
--- a/docs/source/api/applications.rst
+++ b/docs/source/api/applications.rst
@@ -1,9 +1,13 @@
tensorcircuit.applications
-==================================================
+================================================================================
.. toctree::
+ applications/ai.rst
applications/dqas.rst
+ applications/finance.rst
applications/graphdata.rst
applications/layers.rst
+ applications/optimization.rst
+ applications/physics.rst
applications/utils.rst
applications/vags.rst
applications/van.rst
diff --git a/docs/source/api/applications/ai.rst b/docs/source/api/applications/ai.rst
new file mode 100644
index 00000000..96a22cdb
--- /dev/null
+++ b/docs/source/api/applications/ai.rst
@@ -0,0 +1,4 @@
+tensorcircuit.applications.ai
+================================================================================
+.. toctree::
+ ai/ensemble.rst
\ No newline at end of file
diff --git a/docs/source/api/applications/ai/ensemble.rst b/docs/source/api/applications/ai/ensemble.rst
new file mode 100644
index 00000000..0173ac00
--- /dev/null
+++ b/docs/source/api/applications/ai/ensemble.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.ai.ensemble
+================================================================================
+.. automodule:: tensorcircuit.applications.ai.ensemble
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/dqas.rst b/docs/source/api/applications/dqas.rst
index 32457e1f..73cacd43 100644
--- a/docs/source/api/applications/dqas.rst
+++ b/docs/source/api/applications/dqas.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.dqas
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.dqas
:members:
:undoc-members:
diff --git a/docs/source/api/applications/finance.rst b/docs/source/api/applications/finance.rst
new file mode 100644
index 00000000..d3302b31
--- /dev/null
+++ b/docs/source/api/applications/finance.rst
@@ -0,0 +1,4 @@
+tensorcircuit.applications.finance
+================================================================================
+.. toctree::
+ finance/portfolio.rst
\ No newline at end of file
diff --git a/docs/source/api/applications/finance/portfolio.rst b/docs/source/api/applications/finance/portfolio.rst
new file mode 100644
index 00000000..993b5754
--- /dev/null
+++ b/docs/source/api/applications/finance/portfolio.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.finance.portfolio
+================================================================================
+.. automodule:: tensorcircuit.applications.finance.portfolio
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/graphdata.rst b/docs/source/api/applications/graphdata.rst
index 22e1af13..44851513 100644
--- a/docs/source/api/applications/graphdata.rst
+++ b/docs/source/api/applications/graphdata.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.graphdata
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.graphdata
:members:
:undoc-members:
diff --git a/docs/source/api/applications/layers.rst b/docs/source/api/applications/layers.rst
index 69303e98..d4f49e81 100644
--- a/docs/source/api/applications/layers.rst
+++ b/docs/source/api/applications/layers.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.layers
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.layers
:members:
:undoc-members:
diff --git a/docs/source/api/applications/optimization.rst b/docs/source/api/applications/optimization.rst
new file mode 100644
index 00000000..87a0ffbb
--- /dev/null
+++ b/docs/source/api/applications/optimization.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.optimization
+================================================================================
+.. automodule:: tensorcircuit.applications.optimization
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/physics.rst b/docs/source/api/applications/physics.rst
new file mode 100644
index 00000000..98d1a2ed
--- /dev/null
+++ b/docs/source/api/applications/physics.rst
@@ -0,0 +1,5 @@
+tensorcircuit.applications.physics
+================================================================================
+.. toctree::
+ physics/baseline.rst
+ physics/fss.rst
\ No newline at end of file
diff --git a/docs/source/api/applications/physics/baseline.rst b/docs/source/api/applications/physics/baseline.rst
new file mode 100644
index 00000000..2ac581ba
--- /dev/null
+++ b/docs/source/api/applications/physics/baseline.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.physics.baseline
+================================================================================
+.. automodule:: tensorcircuit.applications.physics.baseline
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/physics/fss.rst b/docs/source/api/applications/physics/fss.rst
new file mode 100644
index 00000000..d65cd6c1
--- /dev/null
+++ b/docs/source/api/applications/physics/fss.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.physics.fss
+================================================================================
+.. automodule:: tensorcircuit.applications.physics.fss
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/utils.rst b/docs/source/api/applications/utils.rst
index d4549700..4114e7d8 100644
--- a/docs/source/api/applications/utils.rst
+++ b/docs/source/api/applications/utils.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.utils
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.utils
:members:
:undoc-members:
diff --git a/docs/source/api/applications/vags.rst b/docs/source/api/applications/vags.rst
index 5b951bd3..af0f451f 100644
--- a/docs/source/api/applications/vags.rst
+++ b/docs/source/api/applications/vags.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.vags
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.vags
:members:
:undoc-members:
diff --git a/docs/source/api/applications/van.rst b/docs/source/api/applications/van.rst
index 463e44d2..5c90f2e5 100644
--- a/docs/source/api/applications/van.rst
+++ b/docs/source/api/applications/van.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.van
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.van
:members:
:undoc-members:
diff --git a/docs/source/api/applications/vqes.rst b/docs/source/api/applications/vqes.rst
index e3c775e5..d868c634 100644
--- a/docs/source/api/applications/vqes.rst
+++ b/docs/source/api/applications/vqes.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.vqes
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.vqes
:members:
:undoc-members:
diff --git a/docs/source/api/backends.rst b/docs/source/api/backends.rst
index 4504e569..cfc63bec 100644
--- a/docs/source/api/backends.rst
+++ b/docs/source/api/backends.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends
-==================================================
+================================================================================
.. toctree::
backends/backend_factory.rst
backends/cupy_backend.rst
diff --git a/docs/source/api/backends/backend_factory.rst b/docs/source/api/backends/backend_factory.rst
index 8864abfe..6df6374e 100644
--- a/docs/source/api/backends/backend_factory.rst
+++ b/docs/source/api/backends/backend_factory.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.backend_factory
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.backend_factory
:members:
:undoc-members:
diff --git a/docs/source/api/backends/cupy_backend.rst b/docs/source/api/backends/cupy_backend.rst
index 743fe8f3..1e2421eb 100644
--- a/docs/source/api/backends/cupy_backend.rst
+++ b/docs/source/api/backends/cupy_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.cupy_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.cupy_backend
:members:
:undoc-members:
diff --git a/docs/source/api/backends/jax_backend.rst b/docs/source/api/backends/jax_backend.rst
index e0dfe7c3..209409bc 100644
--- a/docs/source/api/backends/jax_backend.rst
+++ b/docs/source/api/backends/jax_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.jax_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.jax_backend
:members:
:undoc-members:
diff --git a/docs/source/api/backends/numpy_backend.rst b/docs/source/api/backends/numpy_backend.rst
index af19d26b..735f969f 100644
--- a/docs/source/api/backends/numpy_backend.rst
+++ b/docs/source/api/backends/numpy_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.numpy_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.numpy_backend
:members:
:undoc-members:
diff --git a/docs/source/api/backends/pytorch_backend.rst b/docs/source/api/backends/pytorch_backend.rst
index df2712c6..0d10f664 100644
--- a/docs/source/api/backends/pytorch_backend.rst
+++ b/docs/source/api/backends/pytorch_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.pytorch_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.pytorch_backend
:members:
:undoc-members:
diff --git a/docs/source/api/backends/tensorflow_backend.rst b/docs/source/api/backends/tensorflow_backend.rst
index 52663b1a..a595418e 100644
--- a/docs/source/api/backends/tensorflow_backend.rst
+++ b/docs/source/api/backends/tensorflow_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.tensorflow_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.tensorflow_backend
:members:
:undoc-members:
diff --git a/docs/source/api/basecircuit.rst b/docs/source/api/basecircuit.rst
index 6b014bb1..79c2636e 100644
--- a/docs/source/api/basecircuit.rst
+++ b/docs/source/api/basecircuit.rst
@@ -1,5 +1,5 @@
tensorcircuit.basecircuit
-==================================================
+================================================================================
.. automodule:: tensorcircuit.basecircuit
:members:
:undoc-members:
diff --git a/docs/source/api/channels.rst b/docs/source/api/channels.rst
index 3a7cf3af..d9d6fd00 100644
--- a/docs/source/api/channels.rst
+++ b/docs/source/api/channels.rst
@@ -1,5 +1,5 @@
tensorcircuit.channels
-==================================================
+================================================================================
.. automodule:: tensorcircuit.channels
:members:
:undoc-members:
diff --git a/docs/source/api/circuit.rst b/docs/source/api/circuit.rst
index 910c5ef6..59c76ddd 100644
--- a/docs/source/api/circuit.rst
+++ b/docs/source/api/circuit.rst
@@ -1,5 +1,5 @@
tensorcircuit.circuit
-==================================================
+================================================================================
.. automodule:: tensorcircuit.circuit
:members:
:undoc-members:
diff --git a/docs/source/api/cloud.rst b/docs/source/api/cloud.rst
new file mode 100644
index 00000000..be2faf7d
--- /dev/null
+++ b/docs/source/api/cloud.rst
@@ -0,0 +1,11 @@
+tensorcircuit.cloud
+================================================================================
+.. toctree::
+ cloud/abstraction.rst
+ cloud/apis.rst
+ cloud/config.rst
+ cloud/local.rst
+ cloud/quafu_provider.rst
+ cloud/tencent.rst
+ cloud/utils.rst
+ cloud/wrapper.rst
\ No newline at end of file
diff --git a/docs/source/api/cloud/abstraction.rst b/docs/source/api/cloud/abstraction.rst
new file mode 100644
index 00000000..3f00247c
--- /dev/null
+++ b/docs/source/api/cloud/abstraction.rst
@@ -0,0 +1,7 @@
+tensorcircuit.cloud.abstraction
+================================================================================
+.. automodule:: tensorcircuit.cloud.abstraction
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/cloud/apis.rst b/docs/source/api/cloud/apis.rst
new file mode 100644
index 00000000..fe623eec
--- /dev/null
+++ b/docs/source/api/cloud/apis.rst
@@ -0,0 +1,7 @@
+tensorcircuit.cloud.apis
+================================================================================
+.. automodule:: tensorcircuit.cloud.apis
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/cloud/config.rst b/docs/source/api/cloud/config.rst
new file mode 100644
index 00000000..8f6282a0
--- /dev/null
+++ b/docs/source/api/cloud/config.rst
@@ -0,0 +1,7 @@
+tensorcircuit.cloud.config
+================================================================================
+.. automodule:: tensorcircuit.cloud.config
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/cloud/local.rst b/docs/source/api/cloud/local.rst
new file mode 100644
index 00000000..649f66d6
--- /dev/null
+++ b/docs/source/api/cloud/local.rst
@@ -0,0 +1,7 @@
+tensorcircuit.cloud.local
+================================================================================
+.. automodule:: tensorcircuit.cloud.local
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/cloud/quafu_provider.rst b/docs/source/api/cloud/quafu_provider.rst
new file mode 100644
index 00000000..06d15eee
--- /dev/null
+++ b/docs/source/api/cloud/quafu_provider.rst
@@ -0,0 +1,7 @@
+tensorcircuit.cloud.quafu_provider
+================================================================================
+.. automodule:: tensorcircuit.cloud.quafu_provider
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/cloud/tencent.rst b/docs/source/api/cloud/tencent.rst
new file mode 100644
index 00000000..431c3294
--- /dev/null
+++ b/docs/source/api/cloud/tencent.rst
@@ -0,0 +1,7 @@
+tensorcircuit.cloud.tencent
+================================================================================
+.. automodule:: tensorcircuit.cloud.tencent
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/cloud/utils.rst b/docs/source/api/cloud/utils.rst
new file mode 100644
index 00000000..a7e33fe4
--- /dev/null
+++ b/docs/source/api/cloud/utils.rst
@@ -0,0 +1,7 @@
+tensorcircuit.cloud.utils
+================================================================================
+.. automodule:: tensorcircuit.cloud.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/cloud/wrapper.rst b/docs/source/api/cloud/wrapper.rst
new file mode 100644
index 00000000..d65d3c07
--- /dev/null
+++ b/docs/source/api/cloud/wrapper.rst
@@ -0,0 +1,7 @@
+tensorcircuit.cloud.wrapper
+================================================================================
+.. automodule:: tensorcircuit.cloud.wrapper
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/compiler.rst b/docs/source/api/compiler.rst
index 43370b18..cb47419f 100644
--- a/docs/source/api/compiler.rst
+++ b/docs/source/api/compiler.rst
@@ -1,5 +1,6 @@
tensorcircuit.compiler
-==================================================
+================================================================================
.. toctree::
compiler/composed_compiler.rst
- compiler/qiskit_compiler.rst
\ No newline at end of file
+ compiler/qiskit_compiler.rst
+ compiler/simple_compiler.rst
\ No newline at end of file
diff --git a/docs/source/api/compiler/composed_compiler.rst b/docs/source/api/compiler/composed_compiler.rst
index c856636d..07f7f23e 100644
--- a/docs/source/api/compiler/composed_compiler.rst
+++ b/docs/source/api/compiler/composed_compiler.rst
@@ -1,5 +1,5 @@
tensorcircuit.compiler.composed_compiler
-==================================================
+================================================================================
.. automodule:: tensorcircuit.compiler.composed_compiler
:members:
:undoc-members:
diff --git a/docs/source/api/compiler/qiskit_compiler.rst b/docs/source/api/compiler/qiskit_compiler.rst
index 369b4740..b46ae8dc 100644
--- a/docs/source/api/compiler/qiskit_compiler.rst
+++ b/docs/source/api/compiler/qiskit_compiler.rst
@@ -1,5 +1,5 @@
tensorcircuit.compiler.qiskit_compiler
-==================================================
+================================================================================
.. automodule:: tensorcircuit.compiler.qiskit_compiler
:members:
:undoc-members:
diff --git a/docs/source/api/compiler/simple_compiler.rst b/docs/source/api/compiler/simple_compiler.rst
new file mode 100644
index 00000000..941efba5
--- /dev/null
+++ b/docs/source/api/compiler/simple_compiler.rst
@@ -0,0 +1,7 @@
+tensorcircuit.compiler.simple_compiler
+================================================================================
+.. automodule:: tensorcircuit.compiler.simple_compiler
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/cons.rst b/docs/source/api/cons.rst
index 6e077058..d4f48ab6 100644
--- a/docs/source/api/cons.rst
+++ b/docs/source/api/cons.rst
@@ -1,5 +1,5 @@
tensorcircuit.cons
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cons
:members:
:undoc-members:
diff --git a/docs/source/api/densitymatrix.rst b/docs/source/api/densitymatrix.rst
index 571647d2..274dc323 100644
--- a/docs/source/api/densitymatrix.rst
+++ b/docs/source/api/densitymatrix.rst
@@ -1,5 +1,5 @@
tensorcircuit.densitymatrix
-==================================================
+================================================================================
.. automodule:: tensorcircuit.densitymatrix
:members:
:undoc-members:
diff --git a/docs/source/api/experimental.rst b/docs/source/api/experimental.rst
index 16761d4c..dbdfa068 100644
--- a/docs/source/api/experimental.rst
+++ b/docs/source/api/experimental.rst
@@ -1,5 +1,5 @@
tensorcircuit.experimental
-==================================================
+================================================================================
.. automodule:: tensorcircuit.experimental
:members:
:undoc-members:
diff --git a/docs/source/api/fgs.rst b/docs/source/api/fgs.rst
new file mode 100644
index 00000000..f00001b4
--- /dev/null
+++ b/docs/source/api/fgs.rst
@@ -0,0 +1,7 @@
+tensorcircuit.fgs
+================================================================================
+.. automodule:: tensorcircuit.fgs
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/gates.rst b/docs/source/api/gates.rst
index 8f72fbcc..71428553 100644
--- a/docs/source/api/gates.rst
+++ b/docs/source/api/gates.rst
@@ -1,5 +1,5 @@
tensorcircuit.gates
-==================================================
+================================================================================
.. automodule:: tensorcircuit.gates
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces.rst b/docs/source/api/interfaces.rst
index 6371d824..5b234d0f 100644
--- a/docs/source/api/interfaces.rst
+++ b/docs/source/api/interfaces.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces
-==================================================
+================================================================================
.. toctree::
interfaces/numpy.rst
interfaces/scipy.rst
diff --git a/docs/source/api/interfaces/numpy.rst b/docs/source/api/interfaces/numpy.rst
index 5df8b0bb..5271b873 100644
--- a/docs/source/api/interfaces/numpy.rst
+++ b/docs/source/api/interfaces/numpy.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.numpy
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.numpy
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces/scipy.rst b/docs/source/api/interfaces/scipy.rst
index c263bd93..284dcbe9 100644
--- a/docs/source/api/interfaces/scipy.rst
+++ b/docs/source/api/interfaces/scipy.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.scipy
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.scipy
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces/tensorflow.rst b/docs/source/api/interfaces/tensorflow.rst
index e02981b9..8ac1a344 100644
--- a/docs/source/api/interfaces/tensorflow.rst
+++ b/docs/source/api/interfaces/tensorflow.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.tensorflow
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.tensorflow
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces/tensortrans.rst b/docs/source/api/interfaces/tensortrans.rst
index b666e177..a92b166d 100644
--- a/docs/source/api/interfaces/tensortrans.rst
+++ b/docs/source/api/interfaces/tensortrans.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.tensortrans
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.tensortrans
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces/torch.rst b/docs/source/api/interfaces/torch.rst
index 28090f54..5f7e3dea 100644
--- a/docs/source/api/interfaces/torch.rst
+++ b/docs/source/api/interfaces/torch.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.torch
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.torch
:members:
:undoc-members:
diff --git a/docs/source/api/keras.rst b/docs/source/api/keras.rst
index 5ed313b2..9f2e4860 100644
--- a/docs/source/api/keras.rst
+++ b/docs/source/api/keras.rst
@@ -1,5 +1,5 @@
tensorcircuit.keras
-==================================================
+================================================================================
.. automodule:: tensorcircuit.keras
:members:
:undoc-members:
diff --git a/docs/source/api/mps_base.rst b/docs/source/api/mps_base.rst
index caf11b36..039da259 100644
--- a/docs/source/api/mps_base.rst
+++ b/docs/source/api/mps_base.rst
@@ -1,5 +1,5 @@
tensorcircuit.mps_base
-==================================================
+================================================================================
.. automodule:: tensorcircuit.mps_base
:members:
:undoc-members:
diff --git a/docs/source/api/mpscircuit.rst b/docs/source/api/mpscircuit.rst
index a4de8119..58a68f56 100644
--- a/docs/source/api/mpscircuit.rst
+++ b/docs/source/api/mpscircuit.rst
@@ -1,5 +1,5 @@
tensorcircuit.mpscircuit
-==================================================
+================================================================================
.. automodule:: tensorcircuit.mpscircuit
:members:
:undoc-members:
diff --git a/docs/source/api/noisemodel.rst b/docs/source/api/noisemodel.rst
index ab152857..4930d8f0 100644
--- a/docs/source/api/noisemodel.rst
+++ b/docs/source/api/noisemodel.rst
@@ -1,5 +1,5 @@
tensorcircuit.noisemodel
-==================================================
+================================================================================
.. automodule:: tensorcircuit.noisemodel
:members:
:undoc-members:
diff --git a/docs/source/api/quantum.rst b/docs/source/api/quantum.rst
index c9d13b6b..f25c8a5d 100644
--- a/docs/source/api/quantum.rst
+++ b/docs/source/api/quantum.rst
@@ -1,5 +1,5 @@
tensorcircuit.quantum
-==================================================
+================================================================================
.. automodule:: tensorcircuit.quantum
:members:
:undoc-members:
diff --git a/docs/source/api/results.rst b/docs/source/api/results.rst
index 0bea95e7..2e60327c 100644
--- a/docs/source/api/results.rst
+++ b/docs/source/api/results.rst
@@ -1,5 +1,6 @@
tensorcircuit.results
-==================================================
+================================================================================
.. toctree::
results/counts.rst
+ results/qem.rst
results/readout_mitigation.rst
\ No newline at end of file
diff --git a/docs/source/api/results/counts.rst b/docs/source/api/results/counts.rst
index 7542d722..7f145206 100644
--- a/docs/source/api/results/counts.rst
+++ b/docs/source/api/results/counts.rst
@@ -1,5 +1,5 @@
tensorcircuit.results.counts
-==================================================
+================================================================================
.. automodule:: tensorcircuit.results.counts
:members:
:undoc-members:
diff --git a/docs/source/api/results/qem.rst b/docs/source/api/results/qem.rst
new file mode 100644
index 00000000..160098f7
--- /dev/null
+++ b/docs/source/api/results/qem.rst
@@ -0,0 +1,5 @@
+tensorcircuit.results.qem
+================================================================================
+.. toctree::
+ qem/benchmark_circuits.rst
+ qem/qem_methods.rst
\ No newline at end of file
diff --git a/docs/source/api/results/qem/benchmark_circuits.rst b/docs/source/api/results/qem/benchmark_circuits.rst
new file mode 100644
index 00000000..3c339884
--- /dev/null
+++ b/docs/source/api/results/qem/benchmark_circuits.rst
@@ -0,0 +1,7 @@
+tensorcircuit.results.qem.benchmark_circuits
+================================================================================
+.. automodule:: tensorcircuit.results.qem.benchmark_circuits
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/results/qem/qem_methods.rst b/docs/source/api/results/qem/qem_methods.rst
new file mode 100644
index 00000000..a95bdf95
--- /dev/null
+++ b/docs/source/api/results/qem/qem_methods.rst
@@ -0,0 +1,7 @@
+tensorcircuit.results.qem.qem_methods
+================================================================================
+.. automodule:: tensorcircuit.results.qem.qem_methods
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/results/readout_mitigation.rst b/docs/source/api/results/readout_mitigation.rst
index 0d9baa3d..325fe21a 100644
--- a/docs/source/api/results/readout_mitigation.rst
+++ b/docs/source/api/results/readout_mitigation.rst
@@ -1,5 +1,5 @@
tensorcircuit.results.readout_mitigation
-==================================================
+================================================================================
.. automodule:: tensorcircuit.results.readout_mitigation
:members:
:undoc-members:
diff --git a/docs/source/api/shadows.rst b/docs/source/api/shadows.rst
new file mode 100644
index 00000000..7aea082e
--- /dev/null
+++ b/docs/source/api/shadows.rst
@@ -0,0 +1,7 @@
+tensorcircuit.shadows
+================================================================================
+.. automodule:: tensorcircuit.shadows
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/simplify.rst b/docs/source/api/simplify.rst
index c1816c31..22833f9f 100644
--- a/docs/source/api/simplify.rst
+++ b/docs/source/api/simplify.rst
@@ -1,5 +1,5 @@
tensorcircuit.simplify
-==================================================
+================================================================================
.. automodule:: tensorcircuit.simplify
:members:
:undoc-members:
diff --git a/docs/source/api/templates.rst b/docs/source/api/templates.rst
index 330fa6db..202b049d 100644
--- a/docs/source/api/templates.rst
+++ b/docs/source/api/templates.rst
@@ -1,9 +1,10 @@
tensorcircuit.templates
-==================================================
+================================================================================
.. toctree::
+ templates/ansatz.rst
templates/blocks.rst
templates/chems.rst
+ templates/conversions.rst
templates/dataset.rst
- templates/ensemble.rst
templates/graphs.rst
templates/measurements.rst
\ No newline at end of file
diff --git a/docs/source/api/templates/ansatz.rst b/docs/source/api/templates/ansatz.rst
new file mode 100644
index 00000000..15f19650
--- /dev/null
+++ b/docs/source/api/templates/ansatz.rst
@@ -0,0 +1,7 @@
+tensorcircuit.templates.ansatz
+================================================================================
+.. automodule:: tensorcircuit.templates.ansatz
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/templates/blocks.rst b/docs/source/api/templates/blocks.rst
index 0c88f3d9..b7a0945a 100644
--- a/docs/source/api/templates/blocks.rst
+++ b/docs/source/api/templates/blocks.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.blocks
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.blocks
:members:
:undoc-members:
diff --git a/docs/source/api/templates/chems.rst b/docs/source/api/templates/chems.rst
index 8a31f9d3..d06d9e39 100644
--- a/docs/source/api/templates/chems.rst
+++ b/docs/source/api/templates/chems.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.chems
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.chems
:members:
:undoc-members:
diff --git a/docs/source/api/templates/conversions.rst b/docs/source/api/templates/conversions.rst
new file mode 100644
index 00000000..38cbe47f
--- /dev/null
+++ b/docs/source/api/templates/conversions.rst
@@ -0,0 +1,7 @@
+tensorcircuit.templates.conversions
+================================================================================
+.. automodule:: tensorcircuit.templates.conversions
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/templates/dataset.rst b/docs/source/api/templates/dataset.rst
index 36b9e510..aa6cdfa7 100644
--- a/docs/source/api/templates/dataset.rst
+++ b/docs/source/api/templates/dataset.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.dataset
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.dataset
:members:
:undoc-members:
diff --git a/docs/source/api/templates/ensemble.rst b/docs/source/api/templates/ensemble.rst
deleted file mode 100644
index c7dd6f85..00000000
--- a/docs/source/api/templates/ensemble.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-tensorcircuit.templates.ensemble
-==================================================
-.. automodule:: tensorcircuit.templates.ensemble
- :members:
- :undoc-members:
- :show-inheritance:
- :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/templates/graphs.rst b/docs/source/api/templates/graphs.rst
index 0a2141f0..b86ab51e 100644
--- a/docs/source/api/templates/graphs.rst
+++ b/docs/source/api/templates/graphs.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.graphs
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.graphs
:members:
:undoc-members:
diff --git a/docs/source/api/templates/measurements.rst b/docs/source/api/templates/measurements.rst
index 2113f03b..7e05673c 100644
--- a/docs/source/api/templates/measurements.rst
+++ b/docs/source/api/templates/measurements.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.measurements
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.measurements
:members:
:undoc-members:
diff --git a/docs/source/api/torchnn.rst b/docs/source/api/torchnn.rst
index 5a5b2775..9f9c6598 100644
--- a/docs/source/api/torchnn.rst
+++ b/docs/source/api/torchnn.rst
@@ -1,5 +1,5 @@
tensorcircuit.torchnn
-==================================================
+================================================================================
.. automodule:: tensorcircuit.torchnn
:members:
:undoc-members:
diff --git a/docs/source/api/translation.rst b/docs/source/api/translation.rst
index a33667f7..f320c909 100644
--- a/docs/source/api/translation.rst
+++ b/docs/source/api/translation.rst
@@ -1,5 +1,5 @@
tensorcircuit.translation
-==================================================
+================================================================================
.. automodule:: tensorcircuit.translation
:members:
:undoc-members:
diff --git a/docs/source/api/utils.rst b/docs/source/api/utils.rst
index 3fa45319..93ee9496 100644
--- a/docs/source/api/utils.rst
+++ b/docs/source/api/utils.rst
@@ -1,5 +1,5 @@
tensorcircuit.utils
-==================================================
+================================================================================
.. automodule:: tensorcircuit.utils
:members:
:undoc-members:
diff --git a/docs/source/api/vis.rst b/docs/source/api/vis.rst
index f27680f1..2cdc89e2 100644
--- a/docs/source/api/vis.rst
+++ b/docs/source/api/vis.rst
@@ -1,5 +1,5 @@
tensorcircuit.vis
-==================================================
+================================================================================
.. automodule:: tensorcircuit.vis
:members:
:undoc-members:
diff --git a/docs/source/cnconf.py b/docs/source/cnconf.py
index 8b01d026..ceecf794 100644
--- a/docs/source/cnconf.py
+++ b/docs/source/cnconf.py
@@ -48,7 +48,9 @@
"sphinx_copybutton",
"nbsphinx",
"toctree_filter",
+ "sphinx.ext.napoleon",
"myst_parser",
+ "sphinx_design",
]
autosectionlabel_prefix_document = True
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 4f8e644e..9d8147d9 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -50,8 +50,11 @@
"toctree_filter",
"sphinx.ext.napoleon",
"myst_parser",
+ "sphinx_design",
]
+nbsphinx_allow_errors = True
+
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
@@ -144,6 +147,7 @@
# Output file base name for HTML help builder.
htmlhelp_basename = "tensorcircuitdoc"
+html_title = "TensorCircuit Documentation"
# -- Options for LaTeX output ------------------------------------------------
diff --git a/docs/source/contribs/development_Mac.md b/docs/source/contribs/development_Mac.md
new file mode 100644
index 00000000..b2682f32
--- /dev/null
+++ b/docs/source/contribs/development_Mac.md
@@ -0,0 +1,113 @@
+# Tensorcircuit Installation Guide on MacOS
+
+Contributed by [_Mark (Zixuan) Song_](https://marksong.tech)
+
+Apple has updated Tensorflow (for MacOS) so that installation on M-series (until M2) and Intel-series Mac can follow the exact same procedure.
+
+## Starting From Scratch
+
+For completely new Macos or Macos without Xcode installed.
+
+If you have Xcode installed, skip to Install TC backends.
+
+### Install Xcode Command Line Tools
+
+Need graphical access to the machine.
+
+Run `xcode-select --install` to install if on optimal internet.
+
+Or Download it from [Apple](https://developer.apple.com/download/more/) Command Line Tools installation image then install it if the internet connection is weak.
+
+## Install TC Backends
+
+There are four backends to choose from, Numpy, Tensorflow, Jax, and Torch.
+
+### Install Jax, Pytorch (Optional)
+
+```bash
+pip install [Package Name]
+```
+### Install Tensorflow (Optional - Recommended)
+
+#### Install Miniconda (Optional - Recommended)
+
+If you wish to install Tensorflow optimized for MacOS (`tensorflow-macos`) or Tensorflow GPU optimized (`tensorflow-metal`) please install miniconda.
+
+If you wish to install Vanilla Tensorflow developed by Google (`tensorflow`) please skip this step.
+
+```bash
+curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh
+bash ~/miniconda.sh -b -p $HOME/miniconda
+source ~/miniconda/bin/activate
+conda install -c apple tensorflow-deps
+```
+
+#### Installation
+
+```bash
+pip install tensorflow
+```
+
+If you wish to use tensorflow-metal PluggableDevice, then continue install (not recommended):
+
+```bash
+pip install tensorflow-metal
+```
+
+#### Verify Tensorflow Installation
+
+```python
+import tensorflow as tf
+
+cifar = tf.keras.datasets.cifar100
+(x_train, y_train), (x_test, y_test) = cifar.load_data()
+model = tf.keras.applications.ResNet50(
+ include_top=True,
+ weights=None,
+ input_shape=(32, 32, 3),
+ classes=100,)
+
+loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
+model.fit(x_train, y_train, epochs=5, batch_size=64)
+```
+
+## Install Tensorcircuit
+
+```bash
+pip install tensorcircuit
+```
+
+## Benchmarking
+
+This data is collected by running `benchmarks/scripts/vqe_tc.py` 10 times and average results.
+
+
+
+ |
+ Vanilla Tensorflow |
+ Apple Tensorflow |
+ Apple Tensorflow with Metal Plugin |
+
+
+ Construction Time |
+ 11.49241641s |
+ 11.31878941s |
+ 11.6103961s |
+
+
+ Iteration time |
+ 0.002313011s |
+ 0.002333004s |
+ 0.046412581s |
+
+
+ Total time |
+ 11.72371747s |
+ 11.55208979s |
+ 16.25165417s |
+
+
+
+
+Until July 2023, this has been tested on Intel Macs running Ventura, M1 Macs running Ventura, M2 Macs running Ventura, and M2 Macs running Sonoma beta.
\ No newline at end of file
diff --git a/docs/source/contribs/development_MacARM.md b/docs/source/contribs/development_MacARM.md
index ffddf582..73c63948 100644
--- a/docs/source/contribs/development_MacARM.md
+++ b/docs/source/contribs/development_MacARM.md
@@ -2,6 +2,9 @@
Contributed by Mark (Zixuan) Song
+.. warning::
+ This page is deprecated. Please visit `the update tutorial `_ for the latest information.
+
## Starting From Scratch
For completely new macos or macos without xcode and brew
@@ -43,13 +46,7 @@ pip install [Package Name]
### Install Tensorflow (Optional)
-#### Install Tensorflow (Recommended Approach)
-
-❗️ Tensorflow with MacOS optimization would not function correctly in version 2.11.0 and before. Do not use this version of tensorflow if you intented to train any machine learning model.
-
-FYI: Error can occur when machine learning training or gpu related code is involved.
-
-⚠️ Tensorflow without macos optimization does not support Metal API and utilizing GPU (both intel chips and M-series chips) until at least tensorflow 2.11. Tensorflow-macos would fail when running `tc.backend.to_dense()`
+#### Install Tensorflow without MacOS optimization
```
conda config --add channels conda-forge
@@ -75,13 +72,45 @@ model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
model.fit(x_train, y_train, epochs=5, batch_size=64)
```
+#### Install Tensorflow with MacOS optimization (Recommended)
+
+For tensorflow version 2.13 or later:
+```
+pip install tensorflow
+pip install tensorflow-metal
+```
+
+For tensorflow version 2.12 or earlier:
+```
+pip install tensorflow-macos
+pip install tensorflow-metal
+```
+
+#### Verify Tensorflow Installation
+
+```
+import tensorflow as tf
+
+cifar = tf.keras.datasets.cifar100
+(x_train, y_train), (x_test, y_test) = cifar.load_data()
+model = tf.keras.applications.ResNet50(
+ include_top=True,
+ weights=None,
+ input_shape=(32, 32, 3),
+ classes=100,)
+
+loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
+model.fit(x_train, y_train, epochs=5, batch_size=64)
+```
+
## Install Tensorcircuit
```
pip install tensorcircuit
```
-Testing Platform (Tested Feb 2023)
+Testing Platform (Tested Jun 2023)
- Platform 1:
- MacOS Ventura 13.1 (Build version 22C65)
@@ -89,3 +118,6 @@ Testing Platform (Tested Feb 2023)
- Platform 2:
- MacOS Ventura 13.2 (Build version 22D49)
- M1 Ultra (Virtual)
+- Platform 4:
+ - MacOS Sonoma 14.0 Beta 2 (Build version 23A5276g)
+ - M2 Max
\ No newline at end of file
diff --git a/docs/source/contribs/development_MacM1.rst b/docs/source/contribs/development_MacM1.rst
index 3df9c949..8ce9f058 100644
--- a/docs/source/contribs/development_MacM1.rst
+++ b/docs/source/contribs/development_MacM1.rst
@@ -4,7 +4,7 @@ Contributed by (Yuqin Chen)
.. warning::
- This page is deprecated. Please visit `the update tutorial `_ for latest information.
+ This page is deprecated. Please visit `the update tutorial `_ for the latest information.
Why We Can't Run TensorCircuit on TensorlowBackend with Apple M1
diff --git a/docs/source/contribs/development_MacM2.md b/docs/source/contribs/development_MacM2.md
new file mode 100644
index 00000000..b3daf5fb
--- /dev/null
+++ b/docs/source/contribs/development_MacM2.md
@@ -0,0 +1,53 @@
+# Tensorcircuit Installation Guide on MacOS
+
+Contributed by [Hong-Ye Hu](https://github.com/hongyehu)
+
+.. warning::
+ This page is deprecated. Please visit `the update tutorial `_ for the latest information.
+
+The key issue addressed in this document is **how to install both TensorFlow and Jax on a M2 chip MacOS without conflict**.
+
+## Starting From Scratch
+
+### Install Xcode Command Line Tools
+
+Need graphical access to the machine.
+
+Run `xcode-select --install` to install if on optimal internet.
+
+Or Download from [Apple](https://developer.apple.com/download/more/) Command Line Tools installation image then install if internet connection is weak.
+
+## Install Miniconda
+
+Due to the limitation of MacOS and packages, the lastest version of python does not always function as desired, thus miniconda installation is advised to solve the issues. And use anaconda virtual environment is always a good habit.
+
+```
+curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh
+bash ~/miniconda.sh -b -p $HOME/miniconda
+source ~/miniconda/bin/activate
+```
+
+## Install Packages
+First, create a virtual environment, and make sure the python version is 3.8.5 by
+```
+conda create --name NewEnv python==3.8.5
+conda activate NewEnv
+```
+Then, install the TensorFlow from `.whl` file (file can be downloaded from this [URL](https://drive.google.com/drive/folders/1oSipZLnoeQB0Awz8U68KYeCPsULy_dQ7)). This will install TensorFlow version 2.4.1
+```
+pip install ~/Downloads/tensorflow-2.4.1-py3-none-any.whl
+```
+Next, one need to install **Jax** and **Optax** by
+```
+conda install jax==0.3.0
+conda install optax==0.1.4
+```
+Now, hopefully, you should be able to use both Jax and TensorFlow in this environment. But sometimes, it may give you an error "ERROR: package Chardet not found.".
+If that is the case, you can install it by `conda install chardet`.
+Lastly, install tensorcircuit
+```
+pip install tensorcircuit
+```
+This is the solution that seems to work for M2-chip MacOS. Please let me know if there is a better solution!
+
+
diff --git a/docs/source/contribs/development_Mac_cn.md b/docs/source/contribs/development_Mac_cn.md
new file mode 100644
index 00000000..f23fd01f
--- /dev/null
+++ b/docs/source/contribs/development_Mac_cn.md
@@ -0,0 +1,114 @@
+# MacOS Tensorcircuit 安装教程
+
+[_Mark (Zixuan) Song_](https://marksong.tech) 撰写
+
+由于苹果更新了Tensorflow,因此M系列(直到M2)和英特尔系列Mac上的安装可以遵循完全相同的过程。
+
+## 从头开始
+
+对于全新的Macos或未安装Xcode的Macos。
+
+若您已安装Xcode,请跳转到安装TC后端。
+
+### 安装Xcode命令行工具
+
+需要对机器的图形访问
+
+如果网络良好,请运行`xcode-select --install`进行安装。
+
+或者,如果网络连接不理想,请从[苹果](https://developer.apple.com/download/more/)下载命令行工具安装映像,然后进行安装。
+
+## 安装TC后端
+
+有四个后端可供选择,Numpy,Tensorflow,Jax和Torch。
+
+### 安装Jax、Pytorch(可选)
+
+```bash
+pip install [Package Name]
+```
+
+### 安装Tensorflow(可选 - 推荐)
+
+#### 安装miniconda(可选 - 推荐)
+
+若您希望使用苹果为MacOS优化的Tensorflow(`tensorflow-macos`)或使用Tensorflow GPU优化(`tensorflow-metal`)请安装mimiconda。
+
+若您希望使Google开发的原版Tensorflow(`tensorflow`)请跳过此步骤。
+
+```bash
+curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh
+bash ~/miniconda.sh -b -p $HOME/miniconda
+source ~/miniconda/bin/activate
+conda install -c apple tensorflow-deps
+```
+
+#### 安装步骤
+
+```bash
+pip install tensorflow
+```
+
+若您希望使用苹果为Tensorflow优化的Metal后端,请继续运行(不建议):
+
+```bash
+pip install tensorflow-metal
+```
+
+#### 验证Tensorflow安装
+
+```python
+import tensorflow as tf
+
+cifar = tf.keras.datasets.cifar100
+(x_train, y_train), (x_test, y_test) = cifar.load_data()
+model = tf.keras.applications.ResNet50(
+ include_top=True,
+ weights=None,
+ input_shape=(32, 32, 3),
+ classes=100,)
+
+loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
+model.fit(x_train, y_train, epochs=5, batch_size=64)
+```
+
+## 安装Tensorcircuit
+
+```bash
+pip install tensorcircuit
+```
+
+## 测试与比较
+
+以下数据由运行`benchmarks/scripts/vqe_tc.py` 10次并取平均值得到。
+
+
+
+ |
+ 原版Tensorflow |
+ 苹果优化版Tensorflow |
+ 苹果优化版Tensorflow并安装Tensorflow Metal插件 |
+
+
+ 构建时间 |
+ 11.49241641s |
+ 11.31878941s |
+ 11.6103961s |
+
+
+ 迭代时间 |
+ 0.002313011s |
+ 0.002333004s |
+ 0.046412581s |
+
+
+ 从时间 |
+ 11.72371747s |
+ 11.55208979s |
+ 16.25165417s |
+
+
+
+
+直到2023年7月,这已在运行Ventura的英特尔i9 Mac、运行Ventura的M1 Mac、运行Ventura的M2 Mac、运行Sonoma测试版的M2 Mac上进行了测试。
\ No newline at end of file
diff --git a/docs/source/contribution.rst b/docs/source/contribution.rst
index 5e8d7385..d8dbd493 100644
--- a/docs/source/contribution.rst
+++ b/docs/source/contribution.rst
@@ -146,7 +146,59 @@ We use `sphinx `__ to manage the document
The source files for docs are .rst file in docs/source.
-For English docs, ``sphinx-build source build/html`` in docs dir is enough. The html version of the docs are in docs/build/html.
+For English docs, ``sphinx-build source build/html`` and ``make latexpdf LATEXMKOPTS="-silent"`` in docs dir are enough.
+The html and pdf version of the docs are in docs/build/html and docs/build/latex, respectively.
+
+**Formula Environment Attention**
+
+It should be noted that the formula environment ``$$CONTENT$$`` in markdown is equivalent to the ``equation`` environment in latex.
+Therefore, in the jupyter notebook documents, do not nest the formula environment in ``$$CONTENT$$`` that is incompatible with
+``equation`` in latex, such as ``eqnarray``, which will cause errors in the pdf file built by ``nbsphinx``.
+However, compatible formula environments can be used. For example, this legal code in markdown
+
+.. code-block:: markdown
+
+ $$
+ \begin{split}
+ X&=Y\\
+ &=Z
+ \end{split}
+ $$
+
+will be convert to
+
+.. code-block:: latex
+
+ \begin{equation}
+ \begin{split}
+ X&=Y\\
+ &=Z
+ \end{split}
+ \end{equation}
+
+in latex automatically by ``nbsphinx``, which is a legal latex code. However, this legal code in markdown
+
+.. code-block:: markdown
+
+ $$
+ \begin{eqnarray}
+ X&=&Y\\
+ &=&Z
+ \end{eqnarray}
+ $$
+
+will be convert to
+
+.. code-block:: latex
+
+ \begin{equation}
+ \begin{eqnarray}
+ X&=&Y\\
+ &=&Z
+ \end{eqnarray}
+ \end{equation}
+
+in latex, which is an illegal latex code.
**Auto Generation of API Docs:**
diff --git a/docs/source/generate_rst.py b/docs/source/generate_rst.py
index 60fcf0de..9b112c46 100644
--- a/docs/source/generate_rst.py
+++ b/docs/source/generate_rst.py
@@ -5,7 +5,7 @@
class RSTGenerator:
- title_line = "=" * 50
+ title_line = "=" * 80
toctree = ".. toctree::\n {}"
automodule = ".. automodule:: {}\n :members:\n :undoc-members:\n :show-inheritance:\n :inherited-members:"
@@ -21,11 +21,14 @@ def __init__(
def cleanup(self):
if os.path.exists("modules.rst"):
os.remove("modules.rst")
- shutil.rmtree(self.dfolder)
+ try:
+ shutil.rmtree(self.dfolder)
+ except FileNotFoundError:
+ pass
os.makedirs(self.dfolder)
def write(self, path, content):
- if type(content) == type([]):
+ if isinstance(content, list):
content = "\n".join(content)
with open(path, "w") as f:
@@ -33,70 +36,68 @@ def write(self, path, content):
print(f"Finish writing {path}")
- def single_file_module(self):
- """Process the module in the self.pfolder/*.py"""
-
- for module_name in glob.glob(pj(self.pfolder, "*.py")):
+ def _file_generate(self, package_parents):
+ file_list = []
+ for module_name in glob.glob(pj(self.pfolder, *package_parents, "*.py")):
module_name = os.path.basename(module_name)[:-3]
if module_name in self.ingnored_modules:
continue
- rst_file = pj(self.dfolder, f"{module_name}.rst")
+ rst_file = pj(self.dfolder, *package_parents, f"{module_name}.rst")
+ name = f"{self.name}"
+ for n in package_parents:
+ name += f".{n}"
+ name += f".{module_name}"
content = [
- f"{self.name}.{module_name}",
+ name,
self.title_line,
- self.automodule.format(f"{self.name}.{module_name}"),
+ self.automodule.format(name),
]
-
self.write(rst_file, content)
- self.tree[rst_file] = []
-
- def subdir_files_module(self):
- """Write the rst files for modules with subdir or files"""
- for subdir in glob.glob(pj(self.pfolder, "*/")):
+ if not package_parents:
+ upper = self.dfolder
+ else:
+ upper = package_parents[-1]
+ file_list.append(upper + f"/{module_name}.rst")
+ for subdir in glob.glob(pj(self.pfolder, *package_parents, "*/")):
if "_" in subdir:
continue
-
subdir = os.path.basename(os.path.normpath(subdir))
- os.makedirs(pj(self.dfolder, subdir), exist_ok=True)
- rst_file = pj(self.dfolder, f"{subdir}.rst")
- self.tree[rst_file] = []
-
- for module_name in glob.glob(pj(self.pfolder, subdir, f"*.py")):
- module_name = os.path.basename(module_name)[:-3]
- if module_name in self.ingnored_modules:
- continue
-
- content = [
- f"{self.name}.{subdir}.{module_name}",
- self.title_line,
- self.automodule.format(f"{self.name}.{subdir}.{module_name}"),
- ]
-
- self.write(pj(self.dfolder, subdir, f"{module_name}.rst"), content)
- self.tree[rst_file].append(f"{subdir}/{module_name}.rst")
-
+ os.makedirs(pj(self.dfolder, *package_parents, subdir), exist_ok=True)
+ rst_file = pj(self.dfolder, *package_parents, f"{subdir}.rst")
+ subdir_filelist = self._file_generate(package_parents + [subdir])
+
+ name = f"{self.name}"
+ for n in package_parents:
+ name += f".{n}"
+ name += f".{subdir}"
content = [
- f"{self.name}.{subdir}",
+ name,
self.title_line,
- self.toctree.format("\n ".join(sorted(self.tree[rst_file]))),
+ self.toctree.format("\n ".join(sorted(subdir_filelist))),
]
self.write(rst_file, content)
- def modules_file(self):
+ if not package_parents:
+ upper = self.dfolder
+ else:
+ upper = package_parents[-1]
+ file_list.append(upper + f"/{subdir}.rst")
+ return file_list
+
+ def modules_file(self, file_list):
"""Write the modules.rst"""
content = [
self.name,
self.title_line,
- self.toctree.format("\n ".join(sorted(self.tree.keys()))),
+ self.toctree.format("\n ".join(sorted(file_list))),
]
self.write("modules.rst", content)
def start(self):
self.cleanup()
- self.single_file_module()
- self.subdir_files_module()
- self.modules_file()
+ file_list = self._file_generate([])
+ self.modules_file(file_list)
if __name__ == "__main__":
diff --git a/docs/source/index.rst b/docs/source/index.rst
index e45dd318..bd055a60 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -1,41 +1,196 @@
-Guide to TensorCircuit
-==================================
+TensorCircuit Documentation
+===========================================================
.. image:: https://github.com/tencent-quantum-lab/tensorcircuit/blob/master/docs/source/statics/logov2.jpg?raw=true
:target: https://github.com/tencent-quantum-lab/tensorcircuit
-TensorCircuit is an open source quantum circuit and algorithm simulation framework.
-* It is built for human beings. 👽
+**Welcome and congratulations! You have found TensorCircuit.** 👏
+
+Introduction
+---------------
+
+TensorCircuit is an open-source high-performance quantum computing software framework in Python.
+
+* It is built for humans. 👽
* It is designed for speed, flexibility and elegance. 🚀
* It is empowered by advanced tensor network simulator engine. 🔋
+* It is ready for quantum hardware access with CPU/GPU/QPU (local/cloud) hybrid solutions. 🖥
+
* It is implemented with industry-standard machine learning frameworks: TensorFlow, JAX, and PyTorch. 🤖
* It is compatible with machine learning engineering paradigms: automatic differentiation, just-in-time compilation, vectorized parallelism and GPU acceleration. 🛠
-Links
-----------
+With the help of TensorCircuit, now get ready to efficiently and elegantly solve interesting and challenging quantum computing problems: from academic research prototype to industry application deployment.
+
+
+
+
+Relevant Links
+--------------------
+
+TensorCircuit is created and maintained by `Shi-Xin Zhang `_ and this version is released by `Tencent Quantum Lab `_.
-TensorCircuit is created and maintained by `Shi-Xin Zhang `_ and this version of the software is released by `Tencent Quantum Lab `_.
The current core authors of TensorCircuit are `Shi-Xin Zhang `_ and `Yu-Qin Chen `_.
-We also thank `contributions `_ from the lab and the open source community.
+We also thank `contributions `_ from the open source community.
+
+If you have any further questions or collaboration ideas, please use the issue tracker or forum below, or send email to shixinzhang#tencent.com.
+
+
+.. card-carousel:: 2
+
+ .. card:: Source code
+ :link: https://github.com/tencent-quantum-lab/tensorcircuit
+ :shadow: md
+
+ GitHub
+
+
+ .. card:: Documentation
+ :link: https://tensorcircuit.readthedocs.io
+ :shadow: md
+
+ Readthedocs
+
+
+ .. card:: Whitepaper
+ :link: https://quantum-journal.org/papers/q-2023-02-02-912/
+ :shadow: md
+
+ *Quantum* journal
+
+
+ .. card:: Issue Tracker
+ :link: https://github.com/tencent-quantum-lab/tensorcircuit/issues
+ :shadow: md
+
+ GitHub Issues
+
+
+ .. card:: Forum
+ :link: https://github.com/tencent-quantum-lab/tensorcircuit/discussions
+ :shadow: md
+
+ GitHub Discussions
+
+
+ .. card:: PyPI
+ :link: https://pypi.org/project/tensorcircuit
+ :shadow: md
+
+ ``pip install``
+
+
+ .. card:: DockerHub
+ :link: https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit
+ :shadow: md
+
+ ``docker pull``
+
+
+ .. card:: Application
+ :link: https://github.com/tencent-quantum-lab/tensorcircuit#research-and-applications
+ :shadow: md
+
+ Research using TC
+
+
+ .. card:: Cloud
+ :link: https://quantum.tencent.com/cloud
+
+ Tencent Quantum Cloud
+
+
+
+
+..
+ * Source code: https://github.com/tencent-quantum-lab/tensorcircuit
+
+ * Documentation: https://tensorcircuit.readthedocs.io
+
+ * Software Whitepaper (published in Quantum): https://quantum-journal.org/papers/q-2023-02-02-912/
+
+ * Issue Tracker: https://github.com/tencent-quantum-lab/tensorcircuit/issues
+
+ * Forum: https://github.com/tencent-quantum-lab/tensorcircuit/discussions
+
+ * PyPI page: https://pypi.org/project/tensorcircuit
+
+ * DockerHub page: https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit
+
+ * Research and projects based on TensorCircuit: https://github.com/tencent-quantum-lab/tensorcircuit#research-and-applications
+
+ * Tencent Quantum Cloud Service: https://quantum.tencent.com/cloud/
+
+
+
+Unified Quantum Programming
+------------------------------
+
+TensorCircuit is unifying infrastructures and interfaces for quantum computing.
+
+.. grid:: 1 2 4 4
+ :margin: 0
+ :padding: 0
+ :gutter: 2
+
+ .. grid-item-card:: Unified Backends
+ :columns: 12 6 3 3
+ :shadow: md
+
+ Jax/TensorFlow/PyTorch/Numpy/Cupy
+
+ .. grid-item-card:: Unified Devices
+ :columns: 12 6 3 3
+ :shadow: md
+
+ CPU/GPU/TPU
+
+ .. grid-item-card:: Unified Providers
+ :columns: 12 6 3 3
+ :shadow: md
+
+ QPUs from different vendors
+
+ .. grid-item-card:: Unified Resources
+ :columns: 12 6 3 3
+ :shadow: md
+
+ local/cloud/HPC
+
+
+.. grid:: 1 2 4 4
+ :margin: 0
+ :padding: 0
+ :gutter: 2
+
+ .. grid-item-card:: Unified Interfaces
+ :columns: 12 6 3 3
+ :shadow: md
+
+ numerical sim/hardware exp
-* Source code: https://github.com/tencent-quantum-lab/tensorcircuit
+ .. grid-item-card:: Unified Engines
+ :columns: 12 6 3 3
+ :shadow: md
-* Software Whitepaper in Quantum: https://quantum-journal.org/papers/q-2023-02-02-912/
+ ideal/noisy/approximate simulation
-* Documentation: https://tensorcircuit.readthedocs.io
+ .. grid-item-card:: Unified Representations
+ :columns: 12 6 3 3
+ :shadow: md
-* Issue Tracker: https://github.com/tencent-quantum-lab/tensorcircuit/issues
+ from/to_IR/qiskit/openqasm/json
-* Forum: https://github.com/tencent-quantum-lab/tensorcircuit/discussions
+ .. grid-item-card:: Unified Pipelines
+ :columns: 12 6 3 3
+ :shadow: md
-* PyPI page: https://pypi.org/project/tensorcircuit
+ stateless functional programming/stateful ML models
-* DockerHub page: https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit
diff --git a/docs/source/infras.rst b/docs/source/infras.rst
index fb65098a..576de2a9 100644
--- a/docs/source/infras.rst
+++ b/docs/source/infras.rst
@@ -32,7 +32,7 @@ Overview of Modules
**ML Interfaces Related Modules:**
-- :py:mod:`tensorcircuit.interfaces`: Provide interfaces when quantum simulation backend is different from neural libraries. Currently include PyTorch and scipy optimizer interfaces.
+- :py:mod:`tensorcircuit.interfaces`: Provide interfaces when quantum simulation backend is different from neural libraries. Currently include PyTorch, TensorFlow, NumPy and SciPy optimizer interfaces.
- :py:mod:`tensorcircuit.keras`: Provide TensorFlow Keras layers, as well as wrappers of jitted function, save/load from tf side.
@@ -62,7 +62,13 @@ Overview of Modules
**Processing and error mitigation on sample results:**
-- :py:mod:`tensorcircuit.results`: Provide tools to process count dict and to apply error mitigation
+- :py:mod:`tensorcircuit.results`: Provide tools to process count dict and to apply error mitigation.
+
+**Cloud quantum hardware access module:**
+
+- :py:mod:`tensorcircuit.cloud`: Provide quantum cloud SDK that can access and program the real quantum hardware.
+
+- :py:mod:`tensorcircuit.compiler`: Provide compiler chains to compile and transform quantum circuits.
**Shortcuts and Templates for Circuit Manipulation:**
@@ -149,4 +155,29 @@ Also, note how ``^`` is overloaded as ``tn.connect`` to connect edges between di
The convention to define the ``QuOperator`` is firstly giving ``out_edges`` (left index or row index of the matrix) and then giving ``in_edges`` (right index or column index of the matrix). The edges list contains edge objects from the TensorNetwork library.
Such QuOperator/QuVector abstraction support various calculations only possible on matrix/vectors, such as matmul (``@``), adjoint (``.adjoint()``), scalar multiplication (``*``), tensor product (``|``), and partial trace (``.partial_trace(subsystems_to_trace_out)``).
-To extract the matrix information of these objects, we can use ``.eval()`` or ``.eval_matrix()``, the former keeps the shape information of the tensor network while the latter gives the matrix representation with shape rank 2.
\ No newline at end of file
+To extract the matrix information of these objects, we can use ``.eval()`` or ``.eval_matrix()``, the former keeps the shape information of the tensor network while the latter gives the matrix representation with shape rank 2.
+
+
+Quantum Cloud SDK: Layerwise API design
+-----------------------------------------------------
+
+From lower level to higher level, a view of API layers invoking QPU calls
+
+- Vendor specific implementation of functional API in, e.g., :py:mod:`tensorcircuit.cloud.tencent`
+
+- Provider agnostic functional lower level API for task/device management in :py:mod:`tensorcircuit.cloud.apis`
+
+- Object oriented abstraction for Provider/Device/Task in :py:mod:`tensorcircuit.cloud.abstraction`
+
+- Unified batch submission interface as standarized in :py:meth:`tensorcircuit.cloud.wrapper.batch_submit_template`
+
+- Numerical and experimental unified all-in-one interface as :py:meth:`tensorcircuit.cloud.wrapper.batch_expectation_ps`
+
+- Application level code with QPU calls built directly on ``batch_expectation_ps`` or more fancy algorithms can be built on ``batch_submit_func`` so that these algorithms can be reused as long as one function ``batch_submit_func`` is defined for a given vendor (cheaper than defining a new provider from lower level).
+
+
+.. Note::
+
+ For compiler, error mitigation and results post-processing parts, they can be carefully designed to decouple with the QPU calls,
+ so they are separately implemented in :py:mod:`tensorcircuit.compiler` and :py:mod:`tensorcircuit.results`,
+ and they can be independently useful even without tc's cloud access.
diff --git a/docs/source/locale/zh/LC_MESSAGES/api.po b/docs/source/locale/zh/LC_MESSAGES/api.po
index ce19c1ca..b7958e1e 100644
--- a/docs/source/locale/zh/LC_MESSAGES/api.po
+++ b/docs/source/locale/zh/LC_MESSAGES/api.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: tensorcircuit\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-02-02 14:19+0800\n"
+"POT-Creation-Date: 2023-07-14 15:43+0800\n"
"PO-Revision-Date: 2022-04-13 14:58+0800\n"
"Last-Translator: Xinghan Yang\n"
"Language: cn\n"
@@ -16,7 +16,15 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.9.1\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../source/api/about.rst:2
+msgid "tensorcircuit.about"
+msgstr ""
+
+#: of tensorcircuit.about:1 tensorcircuit.about.about:1
+msgid "Prints the information for tensorcircuit installation and environment."
+msgstr ""
#: ../../source/api/abstractcircuit.rst:2
msgid "tensorcircuit.abstractcircuit"
@@ -30,11 +38,17 @@ msgstr ""
#: tensorcircuit.applications.utils.FakeModule:1
#: tensorcircuit.applications.vqes.VQNHE:1
#: tensorcircuit.backends.jax_backend.optax_optimizer:1
+#: tensorcircuit.backends.pytorch_backend.torch_jit_func:1
#: tensorcircuit.backends.pytorch_backend.torch_optimizer:1
#: tensorcircuit.backends.tensorflow_backend.keras_optimizer:1
+#: tensorcircuit.cloud.abstraction.Device:1
+#: tensorcircuit.cloud.abstraction.Provider:1
+#: tensorcircuit.cloud.abstraction.Task:1
+#: tensorcircuit.compiler.composed_compiler.Compiler:1
#: tensorcircuit.gates.GateF:1 tensorcircuit.noisemodel.NoiseConf:1
#: tensorcircuit.quantum.QuOperator:1
#: tensorcircuit.results.readout_mitigation.ReadoutMit:1
+#: tensorcircuit.templates.ensemble.bagging:1
#: tensorcircuit.templates.graphs.Grid2DCoord:1
msgid "Bases: :py:class:`object`"
msgstr ""
@@ -51,46 +65,55 @@ msgstr ""
msgid "example"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss
-#: keras.engine.base_layer.Layer.add_metric
-#: keras.engine.base_layer.Layer.add_update
-#: keras.engine.base_layer.Layer.add_weight keras.engine.base_layer.Layer.apply
-#: keras.engine.base_layer.Layer.build
-#: keras.engine.base_layer.Layer.compute_mask
-#: keras.engine.base_layer.Layer.compute_output_shape
-#: keras.engine.base_layer.Layer.compute_output_signature
-#: keras.engine.base_layer.Layer.from_config
-#: keras.engine.base_layer.Layer.get_input_at
-#: keras.engine.base_layer.Layer.get_input_mask_at
-#: keras.engine.base_layer.Layer.get_input_shape_at
-#: keras.engine.base_layer.Layer.get_losses_for
-#: keras.engine.base_layer.Layer.get_output_at
-#: keras.engine.base_layer.Layer.get_output_mask_at
-#: keras.engine.base_layer.Layer.get_output_shape_at
-#: keras.engine.base_layer.Layer.get_updates_for
-#: keras.engine.base_layer.Layer.set_weights keras.engine.training.Model.build
-#: keras.engine.training.Model.compile keras.engine.training.Model.evaluate
-#: keras.engine.training.Model.fit keras.engine.training.Model.from_config
-#: keras.engine.training.Model.get_layer
-#: keras.engine.training.Model.load_weights
-#: keras.engine.training.Model.make_predict_function
-#: keras.engine.training.Model.make_test_function
-#: keras.engine.training.Model.make_train_function
-#: keras.engine.training.Model.predict
-#: keras.engine.training.Model.predict_on_batch
-#: keras.engine.training.Model.predict_step keras.engine.training.Model.save
-#: keras.engine.training.Model.save_spec
-#: keras.engine.training.Model.save_weights keras.engine.training.Model.summary
-#: keras.engine.training.Model.test_on_batch
-#: keras.engine.training.Model.test_step keras.engine.training.Model.to_json
-#: keras.engine.training.Model.to_yaml
-#: keras.engine.training.Model.train_on_batch
-#: keras.engine.training.Model.train_step
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config
+#: keras.src.engine.base_layer.Layer.add_loss
+#: keras.src.engine.base_layer.Layer.add_metric
+#: keras.src.engine.base_layer.Layer.add_update
+#: keras.src.engine.base_layer.Layer.add_weight
+#: keras.src.engine.base_layer.Layer.build
+#: keras.src.engine.base_layer.Layer.build_from_config
+#: keras.src.engine.base_layer.Layer.compute_mask
+#: keras.src.engine.base_layer.Layer.compute_output_shape
+#: keras.src.engine.base_layer.Layer.compute_output_signature
+#: keras.src.engine.base_layer.Layer.from_config
+#: keras.src.engine.base_layer.Layer.get_input_at
+#: keras.src.engine.base_layer.Layer.get_input_mask_at
+#: keras.src.engine.base_layer.Layer.get_input_shape_at
+#: keras.src.engine.base_layer.Layer.get_output_at
+#: keras.src.engine.base_layer.Layer.get_output_mask_at
+#: keras.src.engine.base_layer.Layer.get_output_shape_at
+#: keras.src.engine.base_layer.Layer.load_own_variables
+#: keras.src.engine.base_layer.Layer.save_own_variables
+#: keras.src.engine.base_layer.Layer.set_weights
+#: keras.src.engine.training.Model.build
+#: keras.src.engine.training.Model.compile
+#: keras.src.engine.training.Model.compile_from_config
+#: keras.src.engine.training.Model.compute_loss
+#: keras.src.engine.training.Model.compute_metrics
+#: keras.src.engine.training.Model.evaluate
+#: keras.src.engine.training.Model.export keras.src.engine.training.Model.fit
+#: keras.src.engine.training.Model.from_config
+#: keras.src.engine.training.Model.get_layer
+#: keras.src.engine.training.Model.load_weights
+#: keras.src.engine.training.Model.make_predict_function
+#: keras.src.engine.training.Model.make_test_function
+#: keras.src.engine.training.Model.make_train_function
+#: keras.src.engine.training.Model.predict
+#: keras.src.engine.training.Model.predict_on_batch
+#: keras.src.engine.training.Model.predict_step
+#: keras.src.engine.training.Model.save
+#: keras.src.engine.training.Model.save_weights
+#: keras.src.engine.training.Model.summary
+#: keras.src.engine.training.Model.test_on_batch
+#: keras.src.engine.training.Model.test_step
+#: keras.src.engine.training.Model.to_json
+#: keras.src.engine.training.Model.to_yaml
+#: keras.src.engine.training.Model.train_on_batch
+#: keras.src.engine.training.Model.train_step
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config
#: of tensorcircuit.abstractcircuit.AbstractCircuit.append
#: tensorcircuit.abstractcircuit.AbstractCircuit.append_from_qir
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list
#: tensorcircuit.abstractcircuit.AbstractCircuit.barrier_instruction
#: tensorcircuit.abstractcircuit.AbstractCircuit.cond_measurement
#: tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps
@@ -99,6 +122,7 @@ msgstr ""
#: tensorcircuit.abstractcircuit.AbstractCircuit.from_qir
#: tensorcircuit.abstractcircuit.AbstractCircuit.from_qiskit
#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_count
+#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_count_by_condition
#: tensorcircuit.abstractcircuit.AbstractCircuit.initial_mapping
#: tensorcircuit.abstractcircuit.AbstractCircuit.inverse
#: tensorcircuit.abstractcircuit.AbstractCircuit.measure_instruction
@@ -169,6 +193,7 @@ msgstr ""
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reshape2
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reshapem
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reverse
+#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scan
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scatter
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.set_random_state
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.sizen
@@ -183,6 +208,74 @@ msgstr ""
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.tree_map
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.tree_unflatten
#: tensorcircuit.backends.backend_factory.get_backend
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.acos
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.acosh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.arange
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmax
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmin
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.asin
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.asinh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atan
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atan2
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atanh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cast
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.concat
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cond
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.coo_sparse_matrix
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.copy
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cosh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cumsum
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.device
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.device_move
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.dtype
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.eigvalsh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.eye
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.i
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.imag
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.is_sparse
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.is_tensor
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.jvp
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.kron
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.left_shift
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.max
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.mean
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.min
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.mod
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.numpy
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.onehot
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.real
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.relu
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.reshape
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.right_shift
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.scatter
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.searchsorted
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.set_random_state
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.shape_tuple
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sigmoid
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sign
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sinh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.size
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.softmax
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.solve
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sparse_dense_matmul
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stack
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randc
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.std
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stop_gradient
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.switch
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tan
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tanh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tensordot
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tile
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.to_dense
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.unique_with_counts
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.value_and_grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vjp
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vmap
#: tensorcircuit.backends.jax_backend.JaxBackend.acos
#: tensorcircuit.backends.jax_backend.JaxBackend.acosh
#: tensorcircuit.backends.jax_backend.JaxBackend.arange
@@ -227,6 +320,7 @@ msgstr ""
#: tensorcircuit.backends.jax_backend.JaxBackend.real
#: tensorcircuit.backends.jax_backend.JaxBackend.relu
#: tensorcircuit.backends.jax_backend.JaxBackend.right_shift
+#: tensorcircuit.backends.jax_backend.JaxBackend.scan
#: tensorcircuit.backends.jax_backend.JaxBackend.scatter
#: tensorcircuit.backends.jax_backend.JaxBackend.searchsorted
#: tensorcircuit.backends.jax_backend.JaxBackend.set_random_state
@@ -425,6 +519,7 @@ msgstr ""
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.real
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.relu
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.right_shift
+#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scan
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scatter
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.searchsorted
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.set_random_state
@@ -471,6 +566,7 @@ msgstr ""
#: tensorcircuit.channels.evol_superop
#: tensorcircuit.channels.generaldepolarizingchannel
#: tensorcircuit.channels.is_hermitian_matrix
+#: tensorcircuit.channels.isotropicdepolarizingchannel
#: tensorcircuit.channels.kraus_identity_check
#: tensorcircuit.channels.kraus_to_choi tensorcircuit.channels.kraus_to_super
#: tensorcircuit.channels.kraus_to_super_gate
@@ -489,6 +585,20 @@ msgstr ""
#: tensorcircuit.circuit.Circuit.replace_mps_inputs
#: tensorcircuit.circuit.Circuit.unitary_kraus
#: tensorcircuit.circuit.Circuit.wavefunction tensorcircuit.circuit.expectation
+#: tensorcircuit.cloud.abstraction.Device.topology_graph
+#: tensorcircuit.cloud.abstraction.Task.details
+#: tensorcircuit.cloud.abstraction.Task.results
+#: tensorcircuit.cloud.apis.get_device tensorcircuit.cloud.apis.get_provider
+#: tensorcircuit.cloud.apis.get_task tensorcircuit.cloud.apis.get_task_details
+#: tensorcircuit.cloud.apis.get_token tensorcircuit.cloud.apis.list_devices
+#: tensorcircuit.cloud.apis.list_properties tensorcircuit.cloud.apis.list_tasks
+#: tensorcircuit.cloud.apis.resubmit_task tensorcircuit.cloud.apis.set_device
+#: tensorcircuit.cloud.apis.set_provider tensorcircuit.cloud.apis.set_token
+#: tensorcircuit.cloud.apis.submit_task tensorcircuit.cloud.tencent.submit_task
+#: tensorcircuit.cloud.utils.set_proxy
+#: tensorcircuit.cloud.wrapper.batch_expectation_ps
+#: tensorcircuit.compiler.composed_compiler.DefaultCompiler.__init__
+#: tensorcircuit.compiler.qiskit_compiler.qiskit_compile
#: tensorcircuit.cons.get_contractor tensorcircuit.cons.get_dtype
#: tensorcircuit.cons.plain_contractor tensorcircuit.cons.runtime_backend
#: tensorcircuit.cons.runtime_dtype tensorcircuit.cons.set_contractor
@@ -501,6 +611,7 @@ msgstr ""
#: tensorcircuit.densitymatrix.DMCircuit.expectation
#: tensorcircuit.densitymatrix.DMCircuit.to_circuit
#: tensorcircuit.densitymatrix.DMCircuit2.apply_general_kraus_delayed..apply
+#: tensorcircuit.experimental.evol_global tensorcircuit.experimental.evol_local
#: tensorcircuit.experimental.hamiltonian_evol
#: tensorcircuit.experimental.parameter_shift_grad
#: tensorcircuit.experimental.parameter_shift_grad_v2
@@ -522,6 +633,7 @@ msgstr ""
#: tensorcircuit.interfaces.tensortrans.numpy_args_to_backend
#: tensorcircuit.interfaces.tensortrans.which_backend
#: tensorcircuit.interfaces.torch.torch_interface
+#: tensorcircuit.interfaces.torch.torch_interface_kws
#: tensorcircuit.keras.QuantumLayer.__init__
#: tensorcircuit.keras.QuantumLayer.build tensorcircuit.keras.load_func
#: tensorcircuit.keras.output_asis_loss tensorcircuit.keras.save_func
@@ -543,6 +655,8 @@ msgstr ""
#: tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors
#: tensorcircuit.mpscircuit.split_tensor
#: tensorcircuit.noisemodel.NoiseConf.add_noise
+#: tensorcircuit.noisemodel.NoiseConf.add_noise_by_condition
+#: tensorcircuit.noisemodel.NoiseConf.channel_count
#: tensorcircuit.noisemodel.apply_qir_with_noise
#: tensorcircuit.noisemodel.circuit_with_noise
#: tensorcircuit.noisemodel.expectation_noisfy
@@ -579,7 +693,7 @@ msgstr ""
#: tensorcircuit.quantum.gibbs_state
#: tensorcircuit.quantum.heisenberg_hamiltonian tensorcircuit.quantum.identity
#: tensorcircuit.quantum.measurement_counts
-#: tensorcircuit.quantum.mutual_information
+#: tensorcircuit.quantum.mutual_information tensorcircuit.quantum.ps2xyz
#: tensorcircuit.quantum.quantum_constructor tensorcircuit.quantum.quimb2qop
#: tensorcircuit.quantum.reduced_density_matrix
#: tensorcircuit.quantum.renyi_entropy tensorcircuit.quantum.renyi_free_energy
@@ -587,8 +701,15 @@ msgstr ""
#: tensorcircuit.quantum.sample_bin2int tensorcircuit.quantum.sample_int2bin
#: tensorcircuit.quantum.spin_by_basis tensorcircuit.quantum.taylorlnm
#: tensorcircuit.quantum.tn2qop tensorcircuit.quantum.trace_distance
-#: tensorcircuit.quantum.truncated_free_energy
+#: tensorcircuit.quantum.truncated_free_energy tensorcircuit.quantum.xyz2ps
#: tensorcircuit.results.counts.expectation
+#: tensorcircuit.results.counts.plot_histogram
+#: tensorcircuit.results.qem.qem_methods.add_dd
+#: tensorcircuit.results.qem.qem_methods.apply_dd
+#: tensorcircuit.results.qem.qem_methods.apply_rc
+#: tensorcircuit.results.qem.qem_methods.apply_zne
+#: tensorcircuit.results.qem.qem_methods.prune_ddcircuit
+#: tensorcircuit.results.qem.qem_methods.used_qubits
#: tensorcircuit.results.readout_mitigation.ReadoutMit.__init__
#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction
#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation
@@ -606,7 +727,6 @@ msgstr ""
#: tensorcircuit.templates.blocks.example_block
#: tensorcircuit.templates.blocks.qft
#: tensorcircuit.templates.blocks.state_centric
-#: tensorcircuit.templates.chems.get_ps
#: tensorcircuit.templates.graphs.Grid2DCoord.__init__
#: tensorcircuit.templates.graphs.Grid2DCoord.all_cols
#: tensorcircuit.templates.graphs.Grid2DCoord.all_rows
@@ -619,6 +739,7 @@ msgstr ""
#: tensorcircuit.templates.measurements.operator_expectation
#: tensorcircuit.templates.measurements.sparse_expectation
#: tensorcircuit.templates.measurements.spin_glass_measurements
+#: tensorcircuit.torchnn.HardwareNet.__init__
#: tensorcircuit.torchnn.QuantumNet.__init__ tensorcircuit.translation.eqasm2tc
#: tensorcircuit.translation.perm_matrix tensorcircuit.translation.qir2cirq
#: tensorcircuit.translation.qir2json tensorcircuit.translation.qir2qiskit
@@ -630,13 +751,23 @@ msgstr ""
#: tensorcircuit.vis.render_pdf
#: tensorflow.python.module.module.Module.with_name_scope
#: tensornetwork.backends.abstract_backend.AbstractBackend.deserialize_tensor
+#: tensornetwork.backends.abstract_backend.AbstractBackend.diagonal
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigh
#: tensornetwork.backends.abstract_backend.AbstractBackend.eigs
#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh
#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eps
#: tensornetwork.backends.abstract_backend.AbstractBackend.gmres
+#: tensornetwork.backends.abstract_backend.AbstractBackend.index_update
+#: tensornetwork.backends.abstract_backend.AbstractBackend.inv
+#: tensornetwork.backends.abstract_backend.AbstractBackend.item
#: tensornetwork.backends.abstract_backend.AbstractBackend.pivot
#: tensornetwork.backends.abstract_backend.AbstractBackend.power
#: tensornetwork.backends.abstract_backend.AbstractBackend.serialize_tensor
+#: tensornetwork.backends.abstract_backend.AbstractBackend.shape_tensor
+#: tensornetwork.backends.abstract_backend.AbstractBackend.slice
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd
+#: tensornetwork.backends.abstract_backend.AbstractBackend.trace
#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal
#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigh
#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs
@@ -701,7 +832,6 @@ msgstr ""
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_tuple
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sign
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.slice
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality
@@ -725,18 +855,24 @@ msgstr ""
#: torch.nn.modules.module.Module.get_buffer
#: torch.nn.modules.module.Module.get_parameter
#: torch.nn.modules.module.Module.get_submodule
+#: torch.nn.modules.module.Module.ipu
#: torch.nn.modules.module.Module.load_state_dict
#: torch.nn.modules.module.Module.named_buffers
#: torch.nn.modules.module.Module.named_modules
#: torch.nn.modules.module.Module.named_parameters
#: torch.nn.modules.module.Module.parameters
#: torch.nn.modules.module.Module.register_buffer
+#: torch.nn.modules.module.Module.register_forward_hook
+#: torch.nn.modules.module.Module.register_forward_pre_hook
+#: torch.nn.modules.module.Module.register_full_backward_hook
+#: torch.nn.modules.module.Module.register_full_backward_pre_hook
#: torch.nn.modules.module.Module.register_parameter
#: torch.nn.modules.module.Module.requires_grad_
#: torch.nn.modules.module.Module.set_extra_state
-#: torch.nn.modules.module.Module.to torch.nn.modules.module.Module.to_empty
-#: torch.nn.modules.module.Module.train torch.nn.modules.module.Module.type
-#: torch.nn.modules.module.Module.xpu torch.nn.modules.module.Module.zero_grad
+#: torch.nn.modules.module.Module.state_dict torch.nn.modules.module.Module.to
+#: torch.nn.modules.module.Module.to_empty torch.nn.modules.module.Module.train
+#: torch.nn.modules.module.Module.type torch.nn.modules.module.Module.xpu
+#: torch.nn.modules.module.Module.zero_grad
msgid "Parameters"
msgstr ""
@@ -750,40 +886,44 @@ msgid ""
"means plain concatenation."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight keras.engine.base_layer.Layer.apply
-#: keras.engine.base_layer.Layer.compute_mask
-#: keras.engine.base_layer.Layer.compute_output_shape
-#: keras.engine.base_layer.Layer.compute_output_signature
-#: keras.engine.base_layer.Layer.count_params
-#: keras.engine.base_layer.Layer.from_config
-#: keras.engine.base_layer.Layer.get_config
-#: keras.engine.base_layer.Layer.get_input_at
-#: keras.engine.base_layer.Layer.get_input_mask_at
-#: keras.engine.base_layer.Layer.get_input_shape_at
-#: keras.engine.base_layer.Layer.get_losses_for
-#: keras.engine.base_layer.Layer.get_output_at
-#: keras.engine.base_layer.Layer.get_output_mask_at
-#: keras.engine.base_layer.Layer.get_output_shape_at
-#: keras.engine.base_layer.Layer.get_updates_for
-#: keras.engine.base_layer.Layer.get_weights
-#: keras.engine.training.Model.evaluate keras.engine.training.Model.fit
-#: keras.engine.training.Model.from_config
-#: keras.engine.training.Model.get_config keras.engine.training.Model.get_layer
-#: keras.engine.training.Model.get_weights
-#: keras.engine.training.Model.load_weights
-#: keras.engine.training.Model.make_predict_function
-#: keras.engine.training.Model.make_test_function
-#: keras.engine.training.Model.make_train_function
-#: keras.engine.training.Model.predict
-#: keras.engine.training.Model.predict_on_batch
-#: keras.engine.training.Model.predict_step
-#: keras.engine.training.Model.save_spec
-#: keras.engine.training.Model.test_on_batch
-#: keras.engine.training.Model.test_step keras.engine.training.Model.to_json
-#: keras.engine.training.Model.to_yaml
-#: keras.engine.training.Model.train_on_batch
-#: keras.engine.training.Model.train_step
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config
+#: keras.src.engine.base_layer.Layer.add_weight
+#: keras.src.engine.base_layer.Layer.compute_mask
+#: keras.src.engine.base_layer.Layer.compute_output_shape
+#: keras.src.engine.base_layer.Layer.compute_output_signature
+#: keras.src.engine.base_layer.Layer.count_params
+#: keras.src.engine.base_layer.Layer.from_config
+#: keras.src.engine.base_layer.Layer.get_build_config
+#: keras.src.engine.base_layer.Layer.get_config
+#: keras.src.engine.base_layer.Layer.get_input_at
+#: keras.src.engine.base_layer.Layer.get_input_mask_at
+#: keras.src.engine.base_layer.Layer.get_input_shape_at
+#: keras.src.engine.base_layer.Layer.get_output_at
+#: keras.src.engine.base_layer.Layer.get_output_mask_at
+#: keras.src.engine.base_layer.Layer.get_output_shape_at
+#: keras.src.engine.base_layer.Layer.get_weights
+#: keras.src.engine.training.Model.compute_loss
+#: keras.src.engine.training.Model.compute_metrics
+#: keras.src.engine.training.Model.evaluate keras.src.engine.training.Model.fit
+#: keras.src.engine.training.Model.from_config
+#: keras.src.engine.training.Model.get_compile_config
+#: keras.src.engine.training.Model.get_config
+#: keras.src.engine.training.Model.get_layer
+#: keras.src.engine.training.Model.get_metrics_result
+#: keras.src.engine.training.Model.get_weight_paths
+#: keras.src.engine.training.Model.get_weights
+#: keras.src.engine.training.Model.make_predict_function
+#: keras.src.engine.training.Model.make_test_function
+#: keras.src.engine.training.Model.make_train_function
+#: keras.src.engine.training.Model.predict
+#: keras.src.engine.training.Model.predict_on_batch
+#: keras.src.engine.training.Model.predict_step
+#: keras.src.engine.training.Model.test_on_batch
+#: keras.src.engine.training.Model.test_step
+#: keras.src.engine.training.Model.to_json
+#: keras.src.engine.training.Model.to_yaml
+#: keras.src.engine.training.Model.train_on_batch
+#: keras.src.engine.training.Model.train_step
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config
#: of tensorcircuit.abstractcircuit.AbstractCircuit.append
#: tensorcircuit.abstractcircuit.AbstractCircuit.cond_measurement
#: tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps
@@ -792,6 +932,7 @@ msgstr ""
#: tensorcircuit.abstractcircuit.AbstractCircuit.from_qir
#: tensorcircuit.abstractcircuit.AbstractCircuit.from_qiskit
#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_count
+#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_count_by_condition
#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_summary
#: tensorcircuit.abstractcircuit.AbstractCircuit.get_positional_logical_mapping
#: tensorcircuit.abstractcircuit.AbstractCircuit.initial_mapping
@@ -984,6 +1125,7 @@ msgstr ""
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reshape2
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reshapem
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reverse
+#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scan
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scatter
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.sizen
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.sparse_dense_matmul
@@ -997,6 +1139,78 @@ msgstr ""
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.tree_map
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.tree_unflatten
#: tensorcircuit.backends.backend_factory.get_backend
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.abs
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.acos
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.acosh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.arange
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmax
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmin
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.asin
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.asinh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atan
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atan2
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atanh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cast
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cond
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.conj
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.coo_sparse_matrix
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.copy
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cos
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cosh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cumsum
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.device
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.device_move
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.dtype
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.eigvalsh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.expm
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.i
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.imag
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.is_sparse
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.is_tensor
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.jit
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.jvp
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.kron
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.left_shift
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.max
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.mean
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.min
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.mod
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.multiply
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.numpy
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.onehot
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.real
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.relu
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.reshape
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.right_shift
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.scatter
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.searchsorted
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.shape_tuple
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sigmoid
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sin
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sinh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.size
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.softmax
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.solve
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sparse_dense_matmul
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stack
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randc
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.std
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stop_gradient
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sum
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.switch
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tan
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tanh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tile
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.to_dense
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.transpose
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.unique_with_counts
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.value_and_grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vjp
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vmap
#: tensorcircuit.backends.jax_backend.JaxBackend.abs
#: tensorcircuit.backends.jax_backend.JaxBackend.acos
#: tensorcircuit.backends.jax_backend.JaxBackend.acosh
@@ -1043,6 +1257,7 @@ msgstr ""
#: tensorcircuit.backends.jax_backend.JaxBackend.real
#: tensorcircuit.backends.jax_backend.JaxBackend.relu
#: tensorcircuit.backends.jax_backend.JaxBackend.right_shift
+#: tensorcircuit.backends.jax_backend.JaxBackend.scan
#: tensorcircuit.backends.jax_backend.JaxBackend.scatter
#: tensorcircuit.backends.jax_backend.JaxBackend.searchsorted
#: tensorcircuit.backends.jax_backend.JaxBackend.sigmoid
@@ -1250,6 +1465,7 @@ msgstr ""
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.real
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.relu
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.right_shift
+#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scan
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scatter
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.searchsorted
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sigmoid
@@ -1297,6 +1513,7 @@ msgstr ""
#: tensorcircuit.channels.evol_superop
#: tensorcircuit.channels.generaldepolarizingchannel
#: tensorcircuit.channels.is_hermitian_matrix
+#: tensorcircuit.channels.isotropicdepolarizingchannel
#: tensorcircuit.channels.kraus_to_choi tensorcircuit.channels.kraus_to_super
#: tensorcircuit.channels.kraus_to_super_gate
#: tensorcircuit.channels.krausgate_to_krausmatrix
@@ -1312,6 +1529,26 @@ msgstr ""
#: tensorcircuit.circuit.Circuit.measure_reference
#: tensorcircuit.circuit.Circuit.unitary_kraus
#: tensorcircuit.circuit.Circuit.wavefunction tensorcircuit.circuit.expectation
+#: tensorcircuit.cloud.abstraction.Device.list_properties
+#: tensorcircuit.cloud.abstraction.Device.native_gates
+#: tensorcircuit.cloud.abstraction.Device.topology
+#: tensorcircuit.cloud.abstraction.Device.topology_graph
+#: tensorcircuit.cloud.abstraction.Task.details
+#: tensorcircuit.cloud.abstraction.Task.get_device
+#: tensorcircuit.cloud.abstraction.Task.resubmit
+#: tensorcircuit.cloud.abstraction.Task.results
+#: tensorcircuit.cloud.abstraction.Task.state
+#: tensorcircuit.cloud.apis.get_device tensorcircuit.cloud.apis.get_provider
+#: tensorcircuit.cloud.apis.get_task tensorcircuit.cloud.apis.get_task_details
+#: tensorcircuit.cloud.apis.get_token tensorcircuit.cloud.apis.list_devices
+#: tensorcircuit.cloud.apis.list_properties
+#: tensorcircuit.cloud.apis.list_providers tensorcircuit.cloud.apis.list_tasks
+#: tensorcircuit.cloud.apis.resubmit_task tensorcircuit.cloud.apis.set_device
+#: tensorcircuit.cloud.apis.set_provider tensorcircuit.cloud.apis.set_token
+#: tensorcircuit.cloud.apis.submit_task tensorcircuit.cloud.tencent.submit_task
+#: tensorcircuit.cloud.utils.set_proxy
+#: tensorcircuit.cloud.wrapper.batch_expectation_ps
+#: tensorcircuit.compiler.qiskit_compiler.qiskit_compile
#: tensorcircuit.cons.get_contractor tensorcircuit.cons.get_dtype
#: tensorcircuit.cons.plain_contractor tensorcircuit.cons.set_contractor
#: tensorcircuit.cons.set_dtype tensorcircuit.cons.set_function_backend
@@ -1323,6 +1560,7 @@ msgstr ""
#: tensorcircuit.densitymatrix.DMCircuit.get_dm_as_quoperator
#: tensorcircuit.densitymatrix.DMCircuit.to_circuit
#: tensorcircuit.densitymatrix.DMCircuit.wavefunction
+#: tensorcircuit.experimental.evol_global tensorcircuit.experimental.evol_local
#: tensorcircuit.experimental.hamiltonian_evol
#: tensorcircuit.experimental.parameter_shift_grad
#: tensorcircuit.experimental.parameter_shift_grad_v2
@@ -1346,6 +1584,24 @@ msgstr ""
#: tensorcircuit.interfaces.tensortrans.numpy_args_to_backend
#: tensorcircuit.interfaces.tensortrans.which_backend
#: tensorcircuit.interfaces.torch.torch_interface
+#: tensorcircuit.interfaces.torch.torch_interface_kws
+#: tensorcircuit.keras.HardwareLayer.compute_dtype
+#: tensorcircuit.keras.HardwareLayer.input
+#: tensorcircuit.keras.HardwareLayer.input_mask
+#: tensorcircuit.keras.HardwareLayer.input_shape
+#: tensorcircuit.keras.HardwareLayer.input_spec
+#: tensorcircuit.keras.HardwareLayer.losses
+#: tensorcircuit.keras.HardwareLayer.metrics
+#: tensorcircuit.keras.HardwareLayer.non_trainable_variables
+#: tensorcircuit.keras.HardwareLayer.non_trainable_weights
+#: tensorcircuit.keras.HardwareLayer.output
+#: tensorcircuit.keras.HardwareLayer.output_mask
+#: tensorcircuit.keras.HardwareLayer.output_shape
+#: tensorcircuit.keras.HardwareLayer.submodules
+#: tensorcircuit.keras.HardwareLayer.trainable_variables
+#: tensorcircuit.keras.HardwareLayer.trainable_weights
+#: tensorcircuit.keras.HardwareLayer.variables
+#: tensorcircuit.keras.HardwareLayer.weights
#: tensorcircuit.keras.QuantumLayer.compute_dtype
#: tensorcircuit.keras.QuantumLayer.input
#: tensorcircuit.keras.QuantumLayer.input_mask
@@ -1382,6 +1638,7 @@ msgstr ""
#: tensorcircuit.mpscircuit.MPSCircuit.wavefunction
#: tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors
#: tensorcircuit.mpscircuit.split_tensor
+#: tensorcircuit.noisemodel.NoiseConf.channel_count
#: tensorcircuit.noisemodel.apply_qir_with_noise
#: tensorcircuit.noisemodel.circuit_with_noise
#: tensorcircuit.noisemodel.expectation_noisfy
@@ -1416,7 +1673,7 @@ msgstr ""
#: tensorcircuit.quantum.gibbs_state
#: tensorcircuit.quantum.heisenberg_hamiltonian tensorcircuit.quantum.identity
#: tensorcircuit.quantum.measurement_counts
-#: tensorcircuit.quantum.mutual_information
+#: tensorcircuit.quantum.mutual_information tensorcircuit.quantum.ps2xyz
#: tensorcircuit.quantum.quantum_constructor tensorcircuit.quantum.quimb2qop
#: tensorcircuit.quantum.reduced_density_matrix
#: tensorcircuit.quantum.renyi_entropy tensorcircuit.quantum.renyi_free_energy
@@ -1425,8 +1682,15 @@ msgstr ""
#: tensorcircuit.quantum.spin_by_basis tensorcircuit.quantum.taylorlnm
#: tensorcircuit.quantum.tn2qop tensorcircuit.quantum.trace_distance
#: tensorcircuit.quantum.trace_product
-#: tensorcircuit.quantum.truncated_free_energy
+#: tensorcircuit.quantum.truncated_free_energy tensorcircuit.quantum.xyz2ps
#: tensorcircuit.results.counts.expectation
+#: tensorcircuit.results.counts.plot_histogram
+#: tensorcircuit.results.qem.qem_methods.add_dd
+#: tensorcircuit.results.qem.qem_methods.apply_dd
+#: tensorcircuit.results.qem.qem_methods.apply_rc
+#: tensorcircuit.results.qem.qem_methods.apply_zne
+#: tensorcircuit.results.qem.qem_methods.prune_ddcircuit
+#: tensorcircuit.results.qem.qem_methods.used_qubits
#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation
#: tensorcircuit.results.readout_mitigation.ReadoutMit.expectation
#: tensorcircuit.results.readout_mitigation.ReadoutMit.get_matrix
@@ -1442,7 +1706,6 @@ msgstr ""
#: tensorcircuit.templates.blocks.example_block
#: tensorcircuit.templates.blocks.qft
#: tensorcircuit.templates.blocks.state_centric
-#: tensorcircuit.templates.chems.get_ps
#: tensorcircuit.templates.graphs.Grid2DCoord.all_cols
#: tensorcircuit.templates.graphs.Grid2DCoord.all_rows
#: tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph
@@ -1463,16 +1726,32 @@ msgstr ""
#: tensorcircuit.utils.return_partial tensorcircuit.vis.gate_name_trans
#: tensorcircuit.vis.qir2tex tensorcircuit.vis.render_pdf
#: tensorflow.python.module.module.Module.with_name_scope
+#: tensornetwork.backends.abstract_backend.AbstractBackend.addition
+#: tensornetwork.backends.abstract_backend.AbstractBackend.broadcast_left_multiplication
+#: tensornetwork.backends.abstract_backend.AbstractBackend.broadcast_right_multiplication
#: tensornetwork.backends.abstract_backend.AbstractBackend.deserialize_tensor
+#: tensornetwork.backends.abstract_backend.AbstractBackend.diagflat
+#: tensornetwork.backends.abstract_backend.AbstractBackend.diagonal
+#: tensornetwork.backends.abstract_backend.AbstractBackend.divide
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigh
#: tensornetwork.backends.abstract_backend.AbstractBackend.eigs
#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh
#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eps
#: tensornetwork.backends.abstract_backend.AbstractBackend.exp
#: tensornetwork.backends.abstract_backend.AbstractBackend.gmres
+#: tensornetwork.backends.abstract_backend.AbstractBackend.inv
+#: tensornetwork.backends.abstract_backend.AbstractBackend.item
#: tensornetwork.backends.abstract_backend.AbstractBackend.log
+#: tensornetwork.backends.abstract_backend.AbstractBackend.matmul
#: tensornetwork.backends.abstract_backend.AbstractBackend.pivot
#: tensornetwork.backends.abstract_backend.AbstractBackend.power
+#: tensornetwork.backends.abstract_backend.AbstractBackend.random_uniform
#: tensornetwork.backends.abstract_backend.AbstractBackend.serialize_tensor
+#: tensornetwork.backends.abstract_backend.AbstractBackend.shape_tensor
+#: tensornetwork.backends.abstract_backend.AbstractBackend.subtraction
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd
+#: tensornetwork.backends.abstract_backend.AbstractBackend.trace
#: tensornetwork.backends.jax.jax_backend.JaxBackend.addition
#: tensornetwork.backends.jax.jax_backend.JaxBackend.broadcast_left_multiplication
#: tensornetwork.backends.jax.jax_backend.JaxBackend.broadcast_right_multiplication
@@ -1571,7 +1850,6 @@ msgstr ""
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_tuple
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.subtraction
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sum
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.transpose
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator
@@ -1595,12 +1873,14 @@ msgstr ""
#: torch.nn.modules.module.Module.get_extra_state
#: torch.nn.modules.module.Module.get_parameter
#: torch.nn.modules.module.Module.get_submodule
-#: torch.nn.modules.module.Module.half
+#: torch.nn.modules.module.Module.half torch.nn.modules.module.Module.ipu
#: torch.nn.modules.module.Module.load_state_dict
#: torch.nn.modules.module.Module.register_backward_hook
#: torch.nn.modules.module.Module.register_forward_hook
#: torch.nn.modules.module.Module.register_forward_pre_hook
#: torch.nn.modules.module.Module.register_full_backward_hook
+#: torch.nn.modules.module.Module.register_full_backward_pre_hook
+#: torch.nn.modules.module.Module.register_load_state_dict_post_hook
#: torch.nn.modules.module.Module.requires_grad_
#: torch.nn.modules.module.Module.state_dict torch.nn.modules.module.Module.to
#: torch.nn.modules.module.Module.to_empty torch.nn.modules.module.Module.train
@@ -1621,6 +1901,7 @@ msgstr ""
#: tensorcircuit.abstractcircuit.AbstractCircuit.from_qir
#: tensorcircuit.abstractcircuit.AbstractCircuit.from_qiskit
#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_count
+#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_count_by_condition
#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_summary
#: tensorcircuit.abstractcircuit.AbstractCircuit.get_positional_logical_mapping
#: tensorcircuit.abstractcircuit.AbstractCircuit.initial_mapping
@@ -1653,6 +1934,7 @@ msgstr ""
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reshape2
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reshapem
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reverse
+#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scan
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scatter
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.sizen
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.sparse_dense_matmul
@@ -1666,6 +1948,70 @@ msgstr ""
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.tree_map
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.tree_unflatten
#: tensorcircuit.backends.backend_factory.get_backend
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.abs
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.acos
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.acosh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.arange
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmax
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmin
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.asin
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.asinh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atan
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atan2
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atanh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cast
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cond
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.coo_sparse_matrix
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.copy
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cosh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cumsum
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.device
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.device_move
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.dtype
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.eigvalsh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.i
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.imag
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.is_sparse
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.is_tensor
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.jit
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.jvp
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.kron
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.left_shift
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.max
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.mean
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.min
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.mod
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.numpy
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.onehot
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.real
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.relu
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.right_shift
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.scatter
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.searchsorted
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sigmoid
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sinh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.size
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.softmax
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.solve
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sparse_dense_matmul
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stack
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randc
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.std
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stop_gradient
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sum
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.switch
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tan
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tanh
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tile
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.to_dense
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.unique_with_counts
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.value_and_grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vjp
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vmap
#: tensorcircuit.backends.jax_backend.JaxBackend.abs
#: tensorcircuit.backends.jax_backend.JaxBackend.acos
#: tensorcircuit.backends.jax_backend.JaxBackend.acosh
@@ -1710,6 +2056,7 @@ msgstr ""
#: tensorcircuit.backends.jax_backend.JaxBackend.real
#: tensorcircuit.backends.jax_backend.JaxBackend.relu
#: tensorcircuit.backends.jax_backend.JaxBackend.right_shift
+#: tensorcircuit.backends.jax_backend.JaxBackend.scan
#: tensorcircuit.backends.jax_backend.JaxBackend.scatter
#: tensorcircuit.backends.jax_backend.JaxBackend.searchsorted
#: tensorcircuit.backends.jax_backend.JaxBackend.sigmoid
@@ -1907,6 +2254,7 @@ msgstr ""
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.real
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.relu
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.right_shift
+#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scan
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scatter
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.searchsorted
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sigmoid
@@ -1953,6 +2301,7 @@ msgstr ""
#: tensorcircuit.channels.evol_superop
#: tensorcircuit.channels.generaldepolarizingchannel
#: tensorcircuit.channels.is_hermitian_matrix
+#: tensorcircuit.channels.isotropicdepolarizingchannel
#: tensorcircuit.channels.kraus_to_choi tensorcircuit.channels.kraus_to_super
#: tensorcircuit.channels.kraus_to_super_gate
#: tensorcircuit.channels.krausgate_to_krausmatrix
@@ -1968,6 +2317,25 @@ msgstr ""
#: tensorcircuit.circuit.Circuit.measure_reference
#: tensorcircuit.circuit.Circuit.unitary_kraus
#: tensorcircuit.circuit.Circuit.wavefunction tensorcircuit.circuit.expectation
+#: tensorcircuit.cloud.abstraction.Device.list_properties
+#: tensorcircuit.cloud.abstraction.Device.native_gates
+#: tensorcircuit.cloud.abstraction.Device.topology
+#: tensorcircuit.cloud.abstraction.Device.topology_graph
+#: tensorcircuit.cloud.abstraction.Task.details
+#: tensorcircuit.cloud.abstraction.Task.get_device
+#: tensorcircuit.cloud.abstraction.Task.resubmit
+#: tensorcircuit.cloud.abstraction.Task.results
+#: tensorcircuit.cloud.abstraction.Task.state
+#: tensorcircuit.cloud.apis.get_device tensorcircuit.cloud.apis.get_provider
+#: tensorcircuit.cloud.apis.get_task tensorcircuit.cloud.apis.get_task_details
+#: tensorcircuit.cloud.apis.get_token tensorcircuit.cloud.apis.list_devices
+#: tensorcircuit.cloud.apis.list_properties
+#: tensorcircuit.cloud.apis.list_providers tensorcircuit.cloud.apis.list_tasks
+#: tensorcircuit.cloud.apis.resubmit_task tensorcircuit.cloud.apis.set_device
+#: tensorcircuit.cloud.apis.set_provider tensorcircuit.cloud.apis.set_token
+#: tensorcircuit.cloud.apis.submit_task tensorcircuit.cloud.tencent.submit_task
+#: tensorcircuit.cloud.wrapper.batch_expectation_ps
+#: tensorcircuit.compiler.qiskit_compiler.qiskit_compile
#: tensorcircuit.cons.get_contractor tensorcircuit.cons.get_dtype
#: tensorcircuit.cons.plain_contractor tensorcircuit.cons.runtime_backend
#: tensorcircuit.cons.runtime_contractor tensorcircuit.cons.runtime_dtype
@@ -1981,6 +2349,7 @@ msgstr ""
#: tensorcircuit.densitymatrix.DMCircuit.get_dm_as_quoperator
#: tensorcircuit.densitymatrix.DMCircuit.to_circuit
#: tensorcircuit.densitymatrix.DMCircuit.wavefunction
+#: tensorcircuit.experimental.evol_global tensorcircuit.experimental.evol_local
#: tensorcircuit.experimental.hamiltonian_evol
#: tensorcircuit.experimental.parameter_shift_grad
#: tensorcircuit.experimental.parameter_shift_grad_v2
@@ -2003,8 +2372,9 @@ msgstr ""
#: tensorcircuit.interfaces.tensortrans.general_args_to_numpy
#: tensorcircuit.interfaces.tensortrans.numpy_args_to_backend
#: tensorcircuit.interfaces.tensortrans.which_backend
-#: tensorcircuit.interfaces.torch.torch_interface tensorcircuit.keras.load_func
-#: tensorcircuit.keras.output_asis_loss
+#: tensorcircuit.interfaces.torch.torch_interface
+#: tensorcircuit.interfaces.torch.torch_interface_kws
+#: tensorcircuit.keras.load_func tensorcircuit.keras.output_asis_loss
#: tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate
#: tensorcircuit.mps_base.FiniteMPS.measure_local_operator
#: tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator
@@ -2023,6 +2393,7 @@ msgstr ""
#: tensorcircuit.mpscircuit.MPSCircuit.wavefunction
#: tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors
#: tensorcircuit.mpscircuit.split_tensor
+#: tensorcircuit.noisemodel.NoiseConf.channel_count
#: tensorcircuit.noisemodel.apply_qir_with_noise
#: tensorcircuit.noisemodel.circuit_with_noise
#: tensorcircuit.noisemodel.expectation_noisfy
@@ -2057,7 +2428,7 @@ msgstr ""
#: tensorcircuit.quantum.gibbs_state
#: tensorcircuit.quantum.heisenberg_hamiltonian tensorcircuit.quantum.identity
#: tensorcircuit.quantum.measurement_counts
-#: tensorcircuit.quantum.mutual_information
+#: tensorcircuit.quantum.mutual_information tensorcircuit.quantum.ps2xyz
#: tensorcircuit.quantum.quantum_constructor tensorcircuit.quantum.quimb2qop
#: tensorcircuit.quantum.reduced_density_matrix
#: tensorcircuit.quantum.renyi_entropy tensorcircuit.quantum.renyi_free_energy
@@ -2066,8 +2437,15 @@ msgstr ""
#: tensorcircuit.quantum.spin_by_basis tensorcircuit.quantum.taylorlnm
#: tensorcircuit.quantum.tn2qop tensorcircuit.quantum.trace_distance
#: tensorcircuit.quantum.trace_product
-#: tensorcircuit.quantum.truncated_free_energy
+#: tensorcircuit.quantum.truncated_free_energy tensorcircuit.quantum.xyz2ps
#: tensorcircuit.results.counts.expectation
+#: tensorcircuit.results.counts.plot_histogram
+#: tensorcircuit.results.qem.qem_methods.add_dd
+#: tensorcircuit.results.qem.qem_methods.apply_dd
+#: tensorcircuit.results.qem.qem_methods.apply_rc
+#: tensorcircuit.results.qem.qem_methods.apply_zne
+#: tensorcircuit.results.qem.qem_methods.prune_ddcircuit
+#: tensorcircuit.results.qem.qem_methods.used_qubits
#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation
#: tensorcircuit.results.readout_mitigation.ReadoutMit.expectation
#: tensorcircuit.results.readout_mitigation.ReadoutMit.get_matrix
@@ -2083,7 +2461,6 @@ msgstr ""
#: tensorcircuit.templates.blocks.example_block
#: tensorcircuit.templates.blocks.qft
#: tensorcircuit.templates.blocks.state_centric
-#: tensorcircuit.templates.chems.get_ps
#: tensorcircuit.templates.graphs.Grid2DCoord.all_cols
#: tensorcircuit.templates.graphs.Grid2DCoord.all_rows
#: tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph
@@ -2103,9 +2480,20 @@ msgstr ""
#: tensorcircuit.utils.benchmark tensorcircuit.utils.is_m1mac
#: tensorcircuit.utils.return_partial tensorcircuit.vis.gate_name_trans
#: tensorcircuit.vis.qir2tex tensorcircuit.vis.render_pdf
+#: tensornetwork.backends.abstract_backend.AbstractBackend.broadcast_left_multiplication
+#: tensornetwork.backends.abstract_backend.AbstractBackend.broadcast_right_multiplication
+#: tensornetwork.backends.abstract_backend.AbstractBackend.diagflat
+#: tensornetwork.backends.abstract_backend.AbstractBackend.diagonal
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigh
#: tensornetwork.backends.abstract_backend.AbstractBackend.eigs
#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eps
#: tensornetwork.backends.abstract_backend.AbstractBackend.gmres
+#: tensornetwork.backends.abstract_backend.AbstractBackend.inv
+#: tensornetwork.backends.abstract_backend.AbstractBackend.matmul
+#: tensornetwork.backends.abstract_backend.AbstractBackend.random_uniform
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd
+#: tensornetwork.backends.abstract_backend.AbstractBackend.trace
#: tensornetwork.backends.jax.jax_backend.JaxBackend.broadcast_left_multiplication
#: tensornetwork.backends.jax.jax_backend.JaxBackend.broadcast_right_multiplication
#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagflat
@@ -2151,7 +2539,6 @@ msgstr ""
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.inv
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.random_uniform
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sum
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality
@@ -2168,12 +2555,14 @@ msgstr ""
#: torch.nn.modules.module.Module.get_extra_state
#: torch.nn.modules.module.Module.get_parameter
#: torch.nn.modules.module.Module.get_submodule
-#: torch.nn.modules.module.Module.half
+#: torch.nn.modules.module.Module.half torch.nn.modules.module.Module.ipu
#: torch.nn.modules.module.Module.load_state_dict
#: torch.nn.modules.module.Module.register_backward_hook
#: torch.nn.modules.module.Module.register_forward_hook
#: torch.nn.modules.module.Module.register_forward_pre_hook
#: torch.nn.modules.module.Module.register_full_backward_hook
+#: torch.nn.modules.module.Module.register_full_backward_pre_hook
+#: torch.nn.modules.module.Module.register_load_state_dict_post_hook
#: torch.nn.modules.module.Module.requires_grad_
#: torch.nn.modules.module.Module.state_dict torch.nn.modules.module.Module.to
#: torch.nn.modules.module.Module.to_empty torch.nn.modules.module.Module.train
@@ -2194,7 +2583,10 @@ msgstr ""
#: tensorcircuit.abstractcircuit.AbstractCircuit.from_qir
#: tensorcircuit.abstractcircuit.AbstractCircuit.from_qiskit
#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_count
+#: tensorcircuit.abstractcircuit.AbstractCircuit.gate_count_by_condition
#: tensorcircuit.abstractcircuit.AbstractCircuit.to_qir
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.grad
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.value_and_grad
#: tensorcircuit.backends.jax_backend.JaxBackend.grad
#: tensorcircuit.backends.jax_backend.JaxBackend.value_and_grad
#: tensorcircuit.backends.numpy_backend.NumpyBackend.grad
@@ -2215,20 +2607,23 @@ msgstr ""
#: tensorcircuit.channels.amplitudedampingchannel
#: tensorcircuit.channels.depolarizingchannel
#: tensorcircuit.channels.generaldepolarizingchannel
+#: tensorcircuit.channels.isotropicdepolarizingchannel
#: tensorcircuit.channels.phasedampingchannel
#: tensorcircuit.channels.resetchannel
#: tensorcircuit.channels.thermalrelaxationchannel
#: tensorcircuit.circuit.Circuit.expectation
#: tensorcircuit.circuit.Circuit.measure_reference
#: tensorcircuit.circuit.Circuit.replace_mps_inputs
+#: tensorcircuit.cloud.wrapper.batch_expectation_ps
#: tensorcircuit.cons.set_tensornetwork_backend tensorcircuit.gates.bmatrix
#: tensorcircuit.gates.matrix_for_gate tensorcircuit.gates.num_to_tensor
#: tensorcircuit.interfaces.numpy.numpy_interface
#: tensorcircuit.interfaces.scipy.scipy_optimize_interface
#: tensorcircuit.interfaces.tensorflow.tensorflow_interface
#: tensorcircuit.interfaces.tensortrans.args_to_tensor
-#: tensorcircuit.interfaces.torch.torch_interface tensorcircuit.keras.load_func
-#: tensorcircuit.keras.save_func
+#: tensorcircuit.interfaces.torch.torch_interface
+#: tensorcircuit.interfaces.torch.torch_interface_kws
+#: tensorcircuit.keras.load_func tensorcircuit.keras.save_func
#: tensorcircuit.quantum.QuAdjointVector.from_tensor
#: tensorcircuit.quantum.QuOperator.from_tensor
#: tensorcircuit.quantum.QuOperator.tensor_product
@@ -2242,6 +2637,7 @@ msgstr ""
#: tensorcircuit.quantum.quantum_constructor
#: tensorcircuit.quantum.renyi_free_energy tensorcircuit.quantum.spin_by_basis
#: tensorcircuit.quantum.trace_product tensorcircuit.simplify.infer_new_shape
+#: tensorcircuit.torchnn.HardwareNet.__init__
#: tensorcircuit.torchnn.QuantumNet.__init__ tensorcircuit.translation.qir2cirq
#: tensorcircuit.translation.qir2qiskit tensorcircuit.translation.qiskit2tc
#: tensorcircuit.utils.append tensorcircuit.utils.return_partial
@@ -2266,6 +2662,8 @@ msgid "add a barrier instruction flag, no effect on numerical simulation"
msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.barrier_instruction:3
+#: tensorcircuit.abstractcircuit.AbstractCircuit.measure_instruction:3
+#: tensorcircuit.abstractcircuit.AbstractCircuit.reset_instruction:3
msgid "the corresponding qubits"
msgstr ""
@@ -2316,6 +2714,7 @@ msgid ""
"Visualise the circuit. This method recevies the keywords as same as "
"qiskit.circuit.QuantumCircuit.draw. More details can be found here: "
"https://qiskit.org/documentation/stubs/qiskit.circuit.QuantumCircuit.draw.html."
+" Interesting kws options include: ``idle_wires``(bool)"
msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:1
@@ -2340,10 +2739,17 @@ msgid "sites to apply Z gate, defaults to None"
msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:32
+msgid ""
+"or one can apply a ps structures instead of ``x``, ``y``, ``z``, e.g. [0,"
+" 1, 3, 0, 2, 2] for X_1Z_2Y_4Y_5 defaults to None, ``ps`` can overwrite "
+"``x``, ``y`` and ``z``"
+msgstr ""
+
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:36
msgid "whether to cache and reuse the wavefunction, defaults to True"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:34
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:38
#: tensorcircuit.basecircuit.BaseCircuit.sample_expectation_ps:46
#: tensorcircuit.circuit.Circuit.expectation:32
#: tensorcircuit.densitymatrix.DMCircuit.expectation:8
@@ -2352,7 +2758,7 @@ msgstr ""
msgid "Noise Configuration, defaults to None"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:36
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:40
#: tensorcircuit.basecircuit.BaseCircuit.sample_expectation_ps:48
#: tensorcircuit.circuit.Circuit.expectation:34
#: tensorcircuit.noisemodel.expectation_noisfy:7
@@ -2361,7 +2767,7 @@ msgid ""
" to 1000"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:38
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:42
#: tensorcircuit.basecircuit.BaseCircuit.sample_expectation_ps:50
#: tensorcircuit.circuit.Circuit.expectation:36
#: tensorcircuit.densitymatrix.DMCircuit.expectation:10
@@ -2372,7 +2778,7 @@ msgid ""
"None, used for noisfy circuit sampling"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:41
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:45
msgid "Expectation value"
msgstr ""
@@ -2385,11 +2791,23 @@ msgstr ""
#: tensorcircuit.abstractcircuit.AbstractCircuit.from_json_file:7
#: tensorcircuit.abstractcircuit.AbstractCircuit.initial_mapping:9
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.from_dlpack:5
+#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scan:4
+#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scan:6
+#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scan:8
+#: tensorcircuit.backends.abstract_backend.ExtendedBackend.scan:10
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.to_dlpack:3
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.to_dlpack:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.arange:9
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.mean:9
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.std:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.std:12
#: tensorcircuit.backends.jax_backend.JaxBackend.arange:9
#: tensorcircuit.backends.jax_backend.JaxBackend.from_dlpack:5
#: tensorcircuit.backends.jax_backend.JaxBackend.mean:9
+#: tensorcircuit.backends.jax_backend.JaxBackend.scan:4
+#: tensorcircuit.backends.jax_backend.JaxBackend.scan:6
+#: tensorcircuit.backends.jax_backend.JaxBackend.scan:8
+#: tensorcircuit.backends.jax_backend.JaxBackend.scan:10
#: tensorcircuit.backends.jax_backend.JaxBackend.std:3
#: tensorcircuit.backends.jax_backend.JaxBackend.std:12
#: tensorcircuit.backends.jax_backend.JaxBackend.to_dlpack:3
@@ -2408,6 +2826,10 @@ msgstr ""
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.arange:9
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.from_dlpack:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.mean:9
+#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scan:4
+#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scan:6
+#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scan:8
+#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scan:10
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.std:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.std:12
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.to_dlpack:3
@@ -2415,14 +2837,41 @@ msgstr ""
#: tensorcircuit.basecircuit.BaseCircuit.expectation_before:6
#: tensorcircuit.basecircuit.BaseCircuit.expectation_before:7
#: tensorcircuit.channels.is_hermitian_matrix:9
+#: tensorcircuit.cloud.abstraction.Device.native_gates:3
+#: tensorcircuit.cloud.abstraction.Task.details:8
+#: tensorcircuit.cloud.abstraction.Task.get_device:3
+#: tensorcircuit.cloud.abstraction.Task.state:3
+#: tensorcircuit.cloud.apis.get_device:10
+#: tensorcircuit.cloud.apis.get_provider:8 tensorcircuit.cloud.apis.get_task:3
+#: tensorcircuit.cloud.apis.get_task:9
+#: tensorcircuit.cloud.apis.get_task_details:3
+#: tensorcircuit.cloud.apis.get_task_details:10
+#: tensorcircuit.cloud.apis.get_token:8 tensorcircuit.cloud.apis.list_devices:7
+#: tensorcircuit.cloud.apis.list_providers:3
+#: tensorcircuit.cloud.apis.resubmit_task:3
+#: tensorcircuit.cloud.apis.resubmit_task:7
+#: tensorcircuit.cloud.apis.set_device:10
+#: tensorcircuit.cloud.apis.set_provider:8
+#: tensorcircuit.cloud.apis.set_token:13
#: tensorcircuit.cons.runtime_contractor:3
#: tensorcircuit.cons.set_function_contractor:3
+#: tensorcircuit.experimental.evol_global:4
+#: tensorcircuit.experimental.evol_global:9
+#: tensorcircuit.experimental.evol_global:11
+#: tensorcircuit.experimental.evol_local:4
+#: tensorcircuit.experimental.evol_local:6
+#: tensorcircuit.experimental.evol_local:13
#: tensorcircuit.experimental.hamiltonian_evol:4
#: tensorcircuit.experimental.hamiltonian_evol:6
#: tensorcircuit.experimental.hamiltonian_evol:8 tensorcircuit.gates.u_gate:16
-#: tensorcircuit.quantum.count_vector2dict:9
-#: tensorcircuit.quantum.sample2count:3 tensorcircuit.quantum.sample2count:5
-#: tensorcircuit.quantum.sample2count:9
+#: tensorcircuit.interfaces.torch.torch_interface_kws:22
+#: tensorcircuit.interfaces.torch.torch_interface_kws:28
+#: tensorcircuit.quantum.count_vector2dict:9 tensorcircuit.quantum.ps2xyz:5
+#: tensorcircuit.quantum.ps2xyz:7 tensorcircuit.quantum.sample2count:3
+#: tensorcircuit.quantum.sample2count:5 tensorcircuit.quantum.sample2count:9
+#: tensorcircuit.quantum.xyz2ps:3 tensorcircuit.quantum.xyz2ps:7
+#: tensorcircuit.results.counts.plot_histogram:6
+#: tensorcircuit.results.counts.plot_histogram:8
#: tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph:6
#: tensorcircuit.translation.eqasm2tc:3 tensorcircuit.translation.eqasm2tc:9
#: tensorcircuit.translation.qir2json:3 tensorcircuit.translation.qir2json:8
@@ -2445,8 +2894,24 @@ msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.from_json_file:5
#: tensorcircuit.abstractcircuit.AbstractCircuit.initial_mapping:7
+#: tensorcircuit.cloud.apis.get_provider:3 tensorcircuit.cloud.apis.get_task:5
+#: tensorcircuit.cloud.apis.get_task:7
+#: tensorcircuit.cloud.apis.get_task_details:5
+#: tensorcircuit.cloud.apis.get_token:4 tensorcircuit.cloud.apis.get_token:6
+#: tensorcircuit.cloud.apis.list_devices:3
+#: tensorcircuit.cloud.apis.list_devices:5
+#: tensorcircuit.cloud.apis.list_properties:3
+#: tensorcircuit.cloud.apis.list_properties:5
+#: tensorcircuit.cloud.apis.list_properties:7
+#: tensorcircuit.cloud.apis.list_tasks:3 tensorcircuit.cloud.apis.list_tasks:5
+#: tensorcircuit.cloud.apis.list_tasks:7
+#: tensorcircuit.cloud.apis.resubmit_task:5
+#: tensorcircuit.cloud.apis.set_provider:3 tensorcircuit.cloud.apis.set_token:5
+#: tensorcircuit.cloud.apis.set_token:7 tensorcircuit.cloud.apis.submit_task:7
+#: tensorcircuit.cloud.apis.submit_task:9
+#: tensorcircuit.cloud.apis.submit_task:11
#: tensorcircuit.experimental.hamiltonian_evol:10
-#: tensorcircuit.translation.eqasm2tc:5
+#: tensorcircuit.quantum.xyz2ps:5 tensorcircuit.translation.eqasm2tc:5
msgid "_description_, defaults to None"
msgstr ""
@@ -2507,13 +2972,27 @@ msgid "count the gate number of the circuit"
msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.gate_count:14
-msgid "gate name list to be counted, defaults to None (counting all gates)"
+msgid ""
+"gate name or gate name list to be counted, defaults to None (counting all"
+" gates)"
msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.gate_count:16
msgid "the total number of all gates or gates in the ``gate_list``"
msgstr ""
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.gate_count_by_condition:1
+msgid "count the number of gates that satisfy certain condition"
+msgstr ""
+
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.gate_count_by_condition:14
+msgid "the condition for counting the gate"
+msgstr ""
+
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.gate_count_by_condition:16
+msgid "the total number of all gates which satisfy the ``condition``"
+msgstr ""
+
#: of tensorcircuit.abstractcircuit.AbstractCircuit.gate_summary:1
msgid "return the summary dictionary on gate type - gate count pair"
msgstr ""
@@ -2573,11 +3052,6 @@ msgstr ""
msgid "add a measurement instruction flag, no effect on numerical simulation"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.measure_instruction:3
-#: tensorcircuit.abstractcircuit.AbstractCircuit.reset_instruction:3
-msgid "the corresponding qubit"
-msgstr ""
-
#: of tensorcircuit.abstractcircuit.AbstractCircuit.prepend:1
msgid "prepend circuit ``c`` before"
msgstr ""
@@ -2668,6 +3142,10 @@ msgid "Translate ``tc.Circuit`` to a qiskit QuantumCircuit object."
msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.to_qiskit:5
+msgid "whether also export the inputs"
+msgstr ""
+
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.to_qiskit:7
msgid "A qiskit object of this circuit."
msgstr ""
@@ -3329,7 +3807,7 @@ msgstr ""
#: of tensorcircuit.applications.van.MADE:1
#: tensorcircuit.applications.van.NMF:1
#: tensorcircuit.applications.van.PixelCNN:1
-msgid "Bases: :py:class:`keras.engine.training.Model`"
+msgid "Bases: :py:class:`~keras.src.engine.training.Model`"
msgstr ""
#: of tensorcircuit.applications.van.MADE.activity_regularizer:1
@@ -3339,15 +3817,16 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.activity_regularizer:1
#: tensorcircuit.applications.van.ResidualBlock.activity_regularizer:1
#: tensorcircuit.applications.vqes.Linear.activity_regularizer:1
+#: tensorcircuit.keras.HardwareLayer.activity_regularizer:1
#: tensorcircuit.keras.QuantumLayer.activity_regularizer:1
msgid "Optional regularizer function for the output of this layer."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:1 of
+#: keras.src.engine.base_layer.Layer.add_loss:1 of
msgid "Add loss tensor(s), potentially dependent on layer inputs."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:3 of
+#: keras.src.engine.base_layer.Layer.add_loss:3 of
msgid ""
"Some losses (for instance, activity regularization losses) may be "
"dependent on the inputs passed when calling a layer. Hence, when reusing "
@@ -3356,40 +3835,46 @@ msgid ""
"automatically keeps track of dependencies."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:9 of
+#: keras.src.engine.base_layer.Layer.add_loss:9 of
msgid ""
"This method can be used inside a subclassed layer or model's `call` "
"function, in which case `losses` should be a Tensor or list of Tensors."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:12
-#: keras.engine.base_layer.Layer.add_loss:26
-#: keras.engine.base_layer.Layer.add_loss:42
-#: keras.engine.training.Model.compile:3 keras.engine.training.Model.save:28 of
+#: keras.src.engine.base_layer.Layer.add_loss:12
+#: keras.src.engine.base_layer.Layer.add_loss:32
+#: keras.src.engine.base_layer.Layer.add_loss:48
+#: keras.src.engine.training.Model.compile:3
+#: keras.src.engine.training.Model.export:15
+#: keras.src.engine.training.Model.get_weight_paths:18
+#: keras.src.engine.training.Model.save:38 of
#: tensorcircuit.applications.van.MaskedConv2D.metrics:3
#: tensorcircuit.applications.van.MaskedLinear.metrics:3
#: tensorcircuit.applications.van.ResidualBlock.metrics:3
#: tensorcircuit.applications.vqes.Linear.metrics:3
+#: tensorcircuit.keras.HardwareLayer.metrics:3
#: tensorcircuit.keras.QuantumLayer.metrics:3
msgid "Example:"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:14 of
+#: keras.src.engine.base_layer.Layer.add_loss:14 of
msgid "```python class MyLayer(tf.keras.layers.Layer):"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:17
-#: keras.engine.base_layer.Layer.add_metric:14 of
+#: keras.src.engine.base_layer.Layer.add_loss:17
+#: keras.src.engine.base_layer.Layer.add_metric:14
+#: keras.src.engine.training.Model.get_weight_paths:30 of
msgid "def call(self, inputs):"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:17 of
+#: keras.src.engine.base_layer.Layer.add_loss:17 of
msgid "self.add_loss(tf.abs(tf.reduce_mean(inputs))) return inputs"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:19
-#: keras.engine.base_layer.Layer.add_metric:16
-#: keras.engine.training.Model.compile:10 of
+#: keras.src.engine.base_layer.Layer.add_loss:19
+#: keras.src.engine.base_layer.Layer.add_metric:16
+#: keras.src.engine.training.Model.compile:10
+#: keras.src.engine.training.Model.compute_metrics:21 of
#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:21
#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:35
#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:21
@@ -3399,16 +3884,24 @@ msgstr ""
msgid "```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:21 of
+#: keras.src.engine.base_layer.Layer.add_loss:21 of
msgid ""
-"This method can also be called directly on a Functional Model during "
-"construction. In this case, any loss Tensors passed to this Model must be"
-" symbolic and be able to be traced back to the model's `Input`s. These "
-"losses become part of the model's topology and are tracked in "
+"The same code works in distributed training: the input to `add_loss()` is"
+" treated like a regularization loss and averaged across replicas by the "
+"training loop (both built-in `Model.fit()` and compliant custom training "
+"loops)."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.add_loss:26 of
+msgid ""
+"The `add_loss` method can also be called directly on a Functional Model "
+"during construction. In this case, any loss Tensors passed to this Model "
+"must be symbolic and be able to be traced back to the model's `Input`s. "
+"These losses become part of the model's topology and are tracked in "
"`get_config`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:28 of
+#: keras.src.engine.base_layer.Layer.add_loss:34 of
msgid ""
"```python inputs = tf.keras.Input(shape=(10,)) x = "
"tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) "
@@ -3416,7 +3909,7 @@ msgid ""
"model.add_loss(tf.abs(tf.reduce_mean(x))) ```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:37 of
+#: keras.src.engine.base_layer.Layer.add_loss:43 of
msgid ""
"If this is not the case for your loss (if, for example, your loss "
"references a `Variable` of one of the model's layers), you can wrap your "
@@ -3424,7 +3917,7 @@ msgid ""
"the model's topology since they can't be serialized."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:44 of
+#: keras.src.engine.base_layer.Layer.add_loss:50 of
msgid ""
"```python inputs = tf.keras.Input(shape=(10,)) d = "
"tf.keras.layers.Dense(10) x = d(inputs) outputs = "
@@ -3433,57 +3926,47 @@ msgid ""
"```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:54 of
+#: keras.src.engine.base_layer.Layer.add_loss:60 of
msgid ""
"Loss tensor, or list/tuple of tensors. Rather than tensors, losses may "
"also be zero-argument callables which create a loss tensor."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:56 of
-msgid ""
-"Additional keyword arguments for backward compatibility. Accepted values:"
-" inputs - Deprecated, will be automatically inferred."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.add_loss:56 of
-msgid "Additional keyword arguments for backward compatibility. Accepted values:"
-msgstr ""
-
-#: keras.engine.base_layer.Layer.add_loss:58 of
-msgid "inputs - Deprecated, will be automatically inferred."
+#: keras.src.engine.base_layer.Layer.add_loss:63 of
+msgid "Used for backwards compatibility only."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:1 of
+#: keras.src.engine.base_layer.Layer.add_metric:1 of
msgid "Adds metric tensor to the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:3 of
+#: keras.src.engine.base_layer.Layer.add_metric:3 of
msgid ""
"This method can be used inside the `call()` method of a subclassed layer "
"or model."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:6 of
+#: keras.src.engine.base_layer.Layer.add_metric:6 of
msgid "```python class MyMetricLayer(tf.keras.layers.Layer):"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:10 of
+#: keras.src.engine.base_layer.Layer.add_metric:10 of
msgid "def __init__(self):"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:9 of
+#: keras.src.engine.base_layer.Layer.add_metric:9 of
msgid ""
"super(MyMetricLayer, self).__init__(name='my_metric_layer') self.mean = "
"tf.keras.metrics.Mean(name='metric_1')"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:13 of
+#: keras.src.engine.base_layer.Layer.add_metric:13 of
msgid ""
"self.add_metric(self.mean(inputs)) self.add_metric(tf.reduce_sum(inputs),"
" name='metric_2') return inputs"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:18 of
+#: keras.src.engine.base_layer.Layer.add_metric:18 of
msgid ""
"This method can also be called directly on a Functional Model during "
"construction. In this case, any tensor passed to this Model must be "
@@ -3492,7 +3975,7 @@ msgid ""
" the model via `save()`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:24 of
+#: keras.src.engine.base_layer.Layer.add_metric:24 of
msgid ""
"```python inputs = tf.keras.Input(shape=(10,)) x = "
"tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) "
@@ -3500,7 +3983,7 @@ msgid ""
"model.add_metric(math_ops.reduce_sum(x), name='metric_1') ```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:32 of
+#: keras.src.engine.base_layer.Layer.add_metric:32 of
msgid ""
"Note: Calling `add_metric()` with the result of a metric object on a "
"Functional Model, as shown in the example below, is not supported. This "
@@ -3508,7 +3991,7 @@ msgid ""
"inputs."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:36 of
+#: keras.src.engine.base_layer.Layer.add_metric:37 of
msgid ""
"```python inputs = tf.keras.Input(shape=(10,)) x = "
"tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) "
@@ -3516,15 +3999,15 @@ msgid ""
"model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1') ```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:44 of
+#: keras.src.engine.base_layer.Layer.add_metric:45 of
msgid "Metric tensor."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:45 of
+#: keras.src.engine.base_layer.Layer.add_metric:46 of
msgid "String metric name."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:46 of
+#: keras.src.engine.base_layer.Layer.add_metric:47 of
msgid ""
"Additional keyword arguments for backward compatibility. Accepted values:"
" `aggregation` - When the `value` tensor provided is not the result of "
@@ -3532,11 +4015,11 @@ msgid ""
" a `keras.Metric.Mean`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:1 of
+#: keras.src.engine.base_layer.Layer.add_update:1 of
msgid "Add update op(s), potentially dependent on layer inputs."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:3 of
+#: keras.src.engine.base_layer.Layer.add_update:3 of
msgid ""
"Weight updates (for instance, the updates of the moving mean and variance"
" in a BatchNormalization layer) may be dependent on the inputs passed "
@@ -3546,14 +4029,14 @@ msgid ""
"dependencies."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:10 of
+#: keras.src.engine.base_layer.Layer.add_update:10 of
msgid ""
"This call is ignored when eager execution is enabled (in that case, "
"variable updates are run on the fly and thus do not need to be tracked "
"for later execution)."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:14 of
+#: keras.src.engine.base_layer.Layer.add_update:14 of
msgid ""
"Update op, or list/tuple of update ops, or zero-arg callable that returns"
" an update op. A zero-arg callable should be passed in order to disable "
@@ -3561,39 +4044,35 @@ msgid ""
"executing in Eager mode."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:18 of
-msgid "Deprecated, will be automatically inferred."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.add_variable:1 of
+#: keras.src.engine.base_layer.Layer.add_variable:1 of
msgid "Deprecated, do NOT use! Alias for `add_weight`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:1 of
+#: keras.src.engine.base_layer.Layer.add_weight:1 of
msgid "Adds a new variable to the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:3 of
+#: keras.src.engine.base_layer.Layer.add_weight:3 of
msgid "Variable name."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:4 of
+#: keras.src.engine.base_layer.Layer.add_weight:4 of
msgid "Variable shape. Defaults to scalar if unspecified."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:5 of
+#: keras.src.engine.base_layer.Layer.add_weight:5 of
msgid "The type of the variable. Defaults to `self.dtype`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:6 of
+#: keras.src.engine.base_layer.Layer.add_weight:6 of
msgid "Initializer instance (callable)."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:7 of
+#: keras.src.engine.base_layer.Layer.add_weight:7 of
msgid "Regularizer instance (callable)."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:8 of
+#: keras.src.engine.base_layer.Layer.add_weight:8 of
msgid ""
"Boolean, whether the variable should be part of the layer's "
"\"trainable_variables\" (e.g. variables, biases) or "
@@ -3601,15 +4080,28 @@ msgid ""
" `trainable` cannot be `True` if `synchronization` is set to `ON_READ`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:13 of
+#: keras.src.engine.base_layer.Layer.add_weight:13 of
msgid "Constraint instance (callable)."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:14 of
-msgid "Whether to use `ResourceVariable`."
+#: keras.src.engine.base_layer.Layer.add_weight:14 of
+msgid ""
+"Whether to use a `ResourceVariable` or not. See [this guide]( "
+"https://www.tensorflow.org/guide/migrate/tf1_vs_tf2#resourcevariables_instead_of_referencevariables)"
+" for more information."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.add_weight:14 of
+msgid ""
+"Whether to use a `ResourceVariable` or not. See [this guide]( "
+"https://www.tensorflow.org/guide/migrate/tf1_vs_tf2#resourcevariables_instead_of_referencevariables)"
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.add_weight:17 of
+msgid "for more information."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:15 of
+#: keras.src.engine.base_layer.Layer.add_weight:18 of
msgid ""
"Indicates when a distributed a variable will be aggregated. Accepted "
"values are constants defined in the class `tf.VariableSynchronization`. "
@@ -3618,37 +4110,39 @@ msgid ""
"is set to `ON_READ`, `trainable` must not be set to `True`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:21 of
+#: keras.src.engine.base_layer.Layer.add_weight:24 of
msgid ""
"Indicates how a distributed variable will be aggregated. Accepted values "
"are constants defined in the class `tf.VariableAggregation`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:24 of
+#: keras.src.engine.base_layer.Layer.add_weight:27 of
msgid ""
"Additional keyword arguments. Accepted values are `getter`, "
"`collections`, `experimental_autocast` and `caching_device`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:27 of
+#: keras.src.engine.base_layer.Layer.add_weight:30 of
msgid "The variable created."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight
-#: keras.engine.base_layer.Layer.compute_output_signature
-#: keras.engine.base_layer.Layer.count_params
-#: keras.engine.base_layer.Layer.get_input_at
-#: keras.engine.base_layer.Layer.get_input_shape_at
-#: keras.engine.base_layer.Layer.get_output_at
-#: keras.engine.base_layer.Layer.get_output_shape_at
-#: keras.engine.base_layer.Layer.set_weights keras.engine.training.Model.build
-#: keras.engine.training.Model.evaluate keras.engine.training.Model.fit
-#: keras.engine.training.Model.load_weights keras.engine.training.Model.predict
-#: keras.engine.training.Model.predict_on_batch
-#: keras.engine.training.Model.save_weights keras.engine.training.Model.summary
-#: keras.engine.training.Model.test_on_batch
-#: keras.engine.training.Model.to_yaml
-#: keras.engine.training.Model.train_on_batch of
+#: keras.src.engine.base_layer.Layer.add_weight
+#: keras.src.engine.base_layer.Layer.compute_output_signature
+#: keras.src.engine.base_layer.Layer.count_params
+#: keras.src.engine.base_layer.Layer.get_input_at
+#: keras.src.engine.base_layer.Layer.get_input_shape_at
+#: keras.src.engine.base_layer.Layer.get_output_at
+#: keras.src.engine.base_layer.Layer.get_output_shape_at
+#: keras.src.engine.base_layer.Layer.set_weights
+#: keras.src.engine.training.Model.build
+#: keras.src.engine.training.Model.evaluate keras.src.engine.training.Model.fit
+#: keras.src.engine.training.Model.predict
+#: keras.src.engine.training.Model.predict_on_batch
+#: keras.src.engine.training.Model.save_weights
+#: keras.src.engine.training.Model.summary
+#: keras.src.engine.training.Model.test_on_batch
+#: keras.src.engine.training.Model.to_yaml
+#: keras.src.engine.training.Model.train_on_batch of
#: tensorcircuit.applications.van.MADE.input
#: tensorcircuit.applications.van.MADE.input_mask
#: tensorcircuit.applications.van.MADE.input_shape
@@ -3698,7 +4192,13 @@ msgstr ""
#: tensorcircuit.basecircuit.BaseCircuit.expectation_before
#: tensorcircuit.circuit.Circuit.expectation tensorcircuit.circuit.expectation
#: tensorcircuit.cons.get_contractor tensorcircuit.cons.set_contractor
-#: tensorcircuit.gates.bmatrix tensorcircuit.keras.QuantumLayer.input
+#: tensorcircuit.gates.bmatrix tensorcircuit.keras.HardwareLayer.input
+#: tensorcircuit.keras.HardwareLayer.input_mask
+#: tensorcircuit.keras.HardwareLayer.input_shape
+#: tensorcircuit.keras.HardwareLayer.output
+#: tensorcircuit.keras.HardwareLayer.output_mask
+#: tensorcircuit.keras.HardwareLayer.output_shape
+#: tensorcircuit.keras.QuantumLayer.input
#: tensorcircuit.keras.QuantumLayer.input_mask
#: tensorcircuit.keras.QuantumLayer.input_shape
#: tensorcircuit.keras.QuantumLayer.output
@@ -3725,52 +4225,23 @@ msgstr ""
msgid "Raises"
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:29 of
+#: keras.src.engine.base_layer.Layer.add_weight:32 of
msgid ""
"When giving unsupported dtype and no initializer or when trainable "
-"has been set to True with synchronization set as `ON_READ`."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:1
-#: keras.engine.base_layer.Layer.get_losses_for:1
-#: keras.engine.base_layer.Layer.get_updates_for:1 of
-#: tensorcircuit.applications.van.MADE.state_updates:1
-#: tensorcircuit.applications.van.NMF.state_updates:1
-#: tensorcircuit.applications.van.PixelCNN.state_updates:1
-msgid "Deprecated, do NOT use!"
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:3 of
-msgid "This is an alias of `self.__call__`."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:5 of
-msgid "Input tensor(s)."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:6 of
-msgid "additional positional arguments to be passed to `self.call`."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:7 of
-msgid "additional keyword arguments to be passed to `self.call`."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:9 of
-msgid "Output tensor(s)."
+"has been set to True with synchronization set as `ON_READ`."
msgstr ""
-#: keras.engine.training.Model.build:1 of
+#: keras.src.engine.training.Model.build:1 of
msgid "Builds the model based on input shapes received."
msgstr ""
-#: keras.engine.training.Model.build:3 of
+#: keras.src.engine.training.Model.build:3 of
msgid ""
"This is to be used for subclassed models, which do not know at "
"instantiation time what their inputs look like."
msgstr ""
-#: keras.engine.training.Model.build:6 of
+#: keras.src.engine.training.Model.build:6 of
msgid ""
"This method only exists for users who want to call `model.build()` in a "
"standalone way (as a substitute for calling the model on real data to "
@@ -3778,28 +4249,44 @@ msgid ""
"never throw unexpected errors in an unrelated workflow)."
msgstr ""
-#: keras.engine.training.Model.build:11 of
+#: keras.src.engine.training.Model.build:11 of
msgid ""
"Single tuple, `TensorShape` instance, or list/dict of shapes, where "
"shapes are tuples, integers, or `TensorShape` instances."
msgstr ""
-#: keras.engine.training.Model.build:14 of
+#: keras.src.engine.training.Model.build:15 of
msgid ""
"1. In case of invalid user-provided data (not of type tuple, list,"
" `TensorShape`, or dict). 2. If the model requires call arguments "
"that are agnostic to the input shapes (positional or keyword arg "
-"in call signature). 3. If not all layers were properly built. 4. "
-"If float type inputs are not supported within the layers."
+"in call signature). 3. If not all layers were properly built."
+" 4. If float type inputs are not supported within the layers."
msgstr ""
-#: keras.engine.training.Model.build:14 of
+#: keras.src.engine.training.Model.build:15 of
msgid ""
"In case of invalid user-provided data (not of type tuple, list, "
"`TensorShape`, or dict). 2. If the model requires call arguments that"
" are agnostic to the input shapes (positional or keyword arg in "
-"call signature). 3. If not all layers were properly built. 4. If "
-"float type inputs are not supported within the layers."
+"call signature). 3. If not all layers were properly built."
+" 4. If float type inputs are not supported within the layers."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.build_from_config:1 of
+msgid "Builds the layer's states with the supplied config dict."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.build_from_config:3 of
+msgid ""
+"By default, this method calls the `build(config[\"input_shape\"])` "
+"method, which creates weights based on the layer's input shape in the "
+"supplied config. If your config contains other information needed to load"
+" the layer's state, you should override this method."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.build_from_config:8 of
+msgid "Dict containing the input shape associated with this layer."
msgstr ""
#: of tensorcircuit.applications.van.MADE.call:1
@@ -3845,24 +4332,10 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.call:15
msgid ""
"A mask or list of masks. A mask can be either a boolean tensor or None "
-"(no mask). For more details, check the guide "
+"(no mask). For more details, check the guide "
"[here](https://www.tensorflow.org/guide/keras/masking_and_padding)."
msgstr ""
-#: of tensorcircuit.applications.van.MADE.call:15
-#: tensorcircuit.applications.van.NMF.call:15
-#: tensorcircuit.applications.van.PixelCNN.call:15
-msgid ""
-"A mask or list of masks. A mask can be either a boolean tensor or None "
-"(no mask). For more details, check the guide"
-msgstr ""
-
-#: of tensorcircuit.applications.van.MADE.call:17
-#: tensorcircuit.applications.van.NMF.call:17
-#: tensorcircuit.applications.van.PixelCNN.call:17
-msgid "[here](https://www.tensorflow.org/guide/keras/masking_and_padding)."
-msgstr ""
-
#: of tensorcircuit.applications.van.MADE.call:19
#: tensorcircuit.applications.van.NMF.call:19
#: tensorcircuit.applications.van.PixelCNN.call:19
@@ -3871,35 +4344,35 @@ msgid ""
"more than one outputs."
msgstr ""
-#: keras.engine.training.Model.compile:1 of
+#: keras.src.engine.training.Model.compile:1 of
msgid "Configures the model for training."
msgstr ""
-#: keras.engine.training.Model.compile:5 of
+#: keras.src.engine.training.Model.compile:5 of
msgid ""
"```python "
"model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),"
msgstr ""
-#: keras.engine.training.Model.compile:7 of
+#: keras.src.engine.training.Model.compile:7 of
msgid ""
"loss=tf.keras.losses.BinaryCrossentropy(), "
"metrics=[tf.keras.metrics.BinaryAccuracy(),"
msgstr ""
-#: keras.engine.training.Model.compile:9 of
+#: keras.src.engine.training.Model.compile:9 of
msgid "tf.keras.metrics.FalseNegatives()])"
msgstr ""
-#: keras.engine.training.Model.compile:12 of
+#: keras.src.engine.training.Model.compile:12 of
msgid ""
"String (name of optimizer) or optimizer instance. See "
"`tf.keras.optimizers`."
msgstr ""
-#: keras.engine.training.Model.compile:14 of
+#: keras.src.engine.training.Model.compile:14 of
msgid ""
-"Loss function. Maybe be a string (name of loss function), or a "
+"Loss function. May be a string (name of loss function), or a "
"`tf.keras.losses.Loss` instance. See `tf.keras.losses`. A loss function "
"is any callable with the signature `loss = fn(y_true, y_pred)`, where "
"`y_true` are the ground truth values, and `y_pred` are the model's "
@@ -3916,7 +4389,7 @@ msgid ""
"individual losses, unless `loss_weights` is specified."
msgstr ""
-#: keras.engine.training.Model.compile:34 of
+#: keras.src.engine.training.Model.compile:34 of
msgid ""
"List of metrics to be evaluated by the model during training and testing."
" Each of this can be a string (name of a built-in function), function or "
@@ -3924,80 +4397,117 @@ msgid ""
"you will use `metrics=['accuracy']`. A function is any callable with the "
"signature `result = fn(y_true, y_pred)`. To specify different metrics for"
" different outputs of a multi-output model, you could also pass a "
-"dictionary, such as `metrics={'output_a': 'accuracy', 'output_b': "
-"['accuracy', 'mse']}`. You can also pass a list to specify a metric or a "
-"list of metrics for each output, such as `metrics=[['accuracy'], "
-"['accuracy', 'mse']]` or `metrics=['accuracy', ['accuracy', 'mse']]`. "
-"When you pass the strings 'accuracy' or 'acc', we convert this to one of "
-"`tf.keras.metrics.BinaryAccuracy`, "
+"dictionary, such as `metrics={'output_a':'accuracy', "
+"'output_b':['accuracy', 'mse']}`. You can also pass a list to specify a "
+"metric or a list of metrics for each output, such as "
+"`metrics=[['accuracy'], ['accuracy', 'mse']]` or `metrics=['accuracy', "
+"['accuracy', 'mse']]`. When you pass the strings 'accuracy' or 'acc', we "
+"convert this to one of `tf.keras.metrics.BinaryAccuracy`, "
"`tf.keras.metrics.CategoricalAccuracy`, "
-"`tf.keras.metrics.SparseCategoricalAccuracy` based on the loss function "
-"used and the model output shape. We do a similar conversion for the "
-"strings 'crossentropy' and 'ce' as well."
+"`tf.keras.metrics.SparseCategoricalAccuracy` based on the shapes of the "
+"targets and of the model output. We do a similar conversion for the "
+"strings 'crossentropy' and 'ce' as well. The metrics passed here are "
+"evaluated without sample weighting; if you would like sample weighting to"
+" apply, you can specify your metrics via the `weighted_metrics` argument "
+"instead."
msgstr ""
-#: keras.engine.training.Model.compile:51 of
+#: keras.src.engine.training.Model.compile:56 of
msgid ""
"Optional list or dictionary specifying scalar coefficients (Python "
"floats) to weight the loss contributions of different model outputs. The "
"loss value that will be minimized by the model will then be the *weighted"
" sum* of all individual losses, weighted by the `loss_weights` "
-"coefficients. If a list, it is expected to have a 1:1 mapping to the "
-"model's outputs. If a dict, it is expected to map output names "
-"(strings) to scalar coefficients."
+"coefficients. If a list, it is expected to have a 1:1 mapping to the "
+"model's outputs. If a dict, it is expected to map output names (strings) "
+"to scalar coefficients."
msgstr ""
-#: keras.engine.training.Model.compile:51 of
+#: keras.src.engine.training.Model.compile:64 of
msgid ""
-"Optional list or dictionary specifying scalar coefficients (Python "
-"floats) to weight the loss contributions of different model outputs. The "
-"loss value that will be minimized by the model will then be the *weighted"
-" sum* of all individual losses, weighted by the `loss_weights` "
-"coefficients."
+"List of metrics to be evaluated and weighted by `sample_weight` or "
+"`class_weight` during training and testing."
msgstr ""
-#: keras.engine.training.Model.compile:57 of
-msgid "If a list, it is expected to have a 1:1 mapping to the model's"
+#: keras.src.engine.training.Model.compile:66 of
+msgid ""
+"Bool. If `True`, this `Model`'s logic will not be wrapped in a "
+"`tf.function`. Recommended to leave this as `None` unless your `Model` "
+"cannot be run inside a `tf.function`. `run_eagerly=True` is not supported"
+" when using `tf.distribute.experimental.ParameterServerStrategy`. "
+"Defaults to `False`."
msgstr ""
-#: keras.engine.training.Model.compile:57 of
+#: keras.src.engine.training.Model.compile:66 of
msgid ""
-"outputs. If a dict, it is expected to map output names (strings) to "
-"scalar coefficients."
+"Bool. If `True`, this `Model`'s logic will not be wrapped in a "
+"`tf.function`. Recommended to leave this as `None` unless your `Model` "
+"cannot be run inside a `tf.function`. `run_eagerly=True` is not supported"
+" when using `tf.distribute.experimental.ParameterServerStrategy`. "
+"Defaults to"
+msgstr ""
+
+#: keras.src.engine.training.Model.compile:71 of
+msgid "`False`."
msgstr ""
-#: keras.engine.training.Model.compile:59 of
+#: keras.src.engine.training.Model.compile:72 of
msgid ""
-"List of metrics to be evaluated and weighted by `sample_weight` or "
-"`class_weight` during training and testing."
+"Int. The number of batches to run during each `tf.function` call. Running"
+" multiple batches inside a single `tf.function` call can greatly improve "
+"performance on TPUs or small models with a large Python overhead. At "
+"most, one full epoch will be run each execution. If a number larger than "
+"the size of the epoch is passed, the execution will be truncated to the "
+"size of the epoch. Note that if `steps_per_execution` is set to `N`, "
+"`Callback.on_batch_begin` and `Callback.on_batch_end` methods will only "
+"be called every `N` batches (i.e. before/after each `tf.function` "
+"execution). Defaults to `1`."
msgstr ""
-#: keras.engine.training.Model.compile:61 of
+#: keras.src.engine.training.Model.compile:82 of
msgid ""
-"Bool. Defaults to `False`. If `True`, this `Model`'s logic will not be "
-"wrapped in a `tf.function`. Recommended to leave this as `None` unless "
-"your `Model` cannot be run inside a `tf.function`. `run_eagerly=True` is "
-"not supported when using "
-"`tf.distribute.experimental.ParameterServerStrategy`."
+"If `True`, compile the model training step with XLA. "
+"[XLA](https://www.tensorflow.org/xla) is an optimizing compiler for "
+"machine learning. `jit_compile` is not enabled for by default. Note that "
+"`jit_compile=True` may not necessarily work for all models. For more "
+"information on supported operations please refer to the [XLA "
+"documentation](https://www.tensorflow.org/xla). Also refer to [known XLA "
+"issues](https://www.tensorflow.org/xla/known_issues) for more details."
msgstr ""
-#: keras.engine.training.Model.compile:66 of
+#: keras.src.engine.training.Model.compile:93 of
msgid ""
-"Int. Defaults to 1. The number of batches to run during each "
-"`tf.function` call. Running multiple batches inside a single "
-"`tf.function` call can greatly improve performance on TPUs or small "
-"models with a large Python overhead. At most, one full epoch will be run "
-"each execution. If a number larger than the size of the epoch is passed, "
-"the execution will be truncated to the size of the epoch. Note that if "
-"`steps_per_execution` is set to `N`, `Callback.on_batch_begin` and "
-"`Callback.on_batch_end` methods will only be called every `N` batches "
-"(i.e. before/after each `tf.function` execution)."
+"Integer or 'auto'. Used for `tf.distribute.ParameterServerStrategy` "
+"training only. This arg sets the number of shards to split the dataset "
+"into, to enable an exact visitation guarantee for evaluation, meaning the"
+" model will be applied to each dataset element exactly once, even if "
+"workers fail. The dataset must be sharded to ensure separate workers do "
+"not process the same data. The number of shards should be at least the "
+"number of workers for good performance. A value of 'auto' turns on exact "
+"evaluation and uses a heuristic for the number of shards based on the "
+"number of workers. 0, meaning no visitation guarantee is provided. NOTE: "
+"Custom implementations of `Model.test_step` will be ignored when doing "
+"exact evaluation. Defaults to `0`."
msgstr ""
-#: keras.engine.training.Model.compile:77 of
+#: keras.src.engine.training.Model.compile:106 of
msgid "Arguments supported for backwards compatibility only."
msgstr ""
+#: keras.src.engine.training.Model.compile_from_config:1 of
+msgid "Compiles the model with the information given in config."
+msgstr ""
+
+#: keras.src.engine.training.Model.compile_from_config:3 of
+msgid ""
+"This method uses the information in the config (optimizer, loss, metrics,"
+" etc.) to compile the model."
+msgstr ""
+
+#: keras.src.engine.training.Model.compile_from_config:6 of
+msgid "Dict containing information for compiling the model."
+msgstr ""
+
#: of tensorcircuit.applications.van.MADE.compute_dtype:1
#: tensorcircuit.applications.van.MaskedConv2D.compute_dtype:1
#: tensorcircuit.applications.van.MaskedLinear.compute_dtype:1
@@ -4005,6 +4515,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.compute_dtype:1
#: tensorcircuit.applications.van.ResidualBlock.compute_dtype:1
#: tensorcircuit.applications.vqes.Linear.compute_dtype:1
+#: tensorcircuit.keras.HardwareLayer.compute_dtype:1
#: tensorcircuit.keras.QuantumLayer.compute_dtype:1
msgid "The dtype of the layer's computations."
msgstr ""
@@ -4016,6 +4527,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.compute_dtype:3
#: tensorcircuit.applications.van.ResidualBlock.compute_dtype:3
#: tensorcircuit.applications.vqes.Linear.compute_dtype:3
+#: tensorcircuit.keras.HardwareLayer.compute_dtype:3
#: tensorcircuit.keras.QuantumLayer.compute_dtype:3
msgid ""
"This is equivalent to `Layer.dtype_policy.compute_dtype`. Unless mixed "
@@ -4030,6 +4542,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.compute_dtype:7
#: tensorcircuit.applications.van.ResidualBlock.compute_dtype:7
#: tensorcircuit.applications.vqes.Linear.compute_dtype:7
+#: tensorcircuit.keras.HardwareLayer.compute_dtype:7
#: tensorcircuit.keras.QuantumLayer.compute_dtype:7
msgid ""
"Layers automatically cast their inputs to the compute dtype, which causes"
@@ -4045,6 +4558,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.compute_dtype:12
#: tensorcircuit.applications.van.ResidualBlock.compute_dtype:12
#: tensorcircuit.applications.vqes.Linear.compute_dtype:12
+#: tensorcircuit.keras.HardwareLayer.compute_dtype:12
#: tensorcircuit.keras.QuantumLayer.compute_dtype:12
msgid ""
"Layers often perform certain internal computations in higher precision "
@@ -4059,60 +4573,207 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.compute_dtype:16
#: tensorcircuit.applications.van.ResidualBlock.compute_dtype:16
#: tensorcircuit.applications.vqes.Linear.compute_dtype:16
+#: tensorcircuit.keras.HardwareLayer.compute_dtype:16
#: tensorcircuit.keras.QuantumLayer.compute_dtype:16
msgid "The layer's compute dtype."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:1 of
+#: keras.src.engine.training.Model.compute_loss:1 of
+msgid "Compute the total loss, validate it, and return it."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:3 of
+msgid ""
+"Subclasses can optionally override this method to provide custom loss "
+"computation logic."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:6 of
+msgid "Example: ```python class MyModel(tf.keras.Model):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:12 of
+msgid "def __init__(self, *args, **kwargs):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:11 of
+msgid ""
+"super(MyModel, self).__init__(*args, **kwargs) self.loss_tracker = "
+"tf.keras.metrics.Mean(name='loss')"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:18 of
+msgid "def compute_loss(self, x, y, y_pred, sample_weight):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:15 of
+msgid ""
+"loss = tf.reduce_mean(tf.math.squared_difference(y_pred, y)) loss += "
+"tf.add_n(self.losses) self.loss_tracker.update_state(loss) return loss"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:21 of
+msgid "def reset_metrics(self):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:21 of
+msgid "self.loss_tracker.reset_states()"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:23 of
+msgid "@property def metrics(self):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:25 of
+msgid "return [self.loss_tracker]"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:27 of
+msgid ""
+"tensors = tf.random.uniform((10, 10)), tf.random.uniform((10,)) dataset ="
+" tf.data.Dataset.from_tensor_slices(tensors).repeat().batch(1)"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:30 of
+msgid ""
+"inputs = tf.keras.layers.Input(shape=(10,), name='my_input') outputs = "
+"tf.keras.layers.Dense(10)(inputs) model = MyModel(inputs, outputs) "
+"model.add_loss(tf.reduce_sum(outputs))"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:35 of
+msgid ""
+"optimizer = tf.keras.optimizers.SGD() model.compile(optimizer, "
+"loss='mse', steps_per_execution=10) model.fit(dataset, epochs=2, "
+"steps_per_epoch=10) print('My custom loss: ', "
+"model.loss_tracker.result().numpy()) ```"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:41
+#: keras.src.engine.training.Model.compute_metrics:23 of
+msgid "Input data."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:42
+#: keras.src.engine.training.Model.compute_metrics:24 of
+msgid "Target data."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:43 of
+msgid "Predictions returned by the model (output of `model(x)`)"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:44
+#: keras.src.engine.training.Model.compute_metrics:26 of
+msgid "Sample weights for weighting the loss function."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:46 of
+msgid ""
+"The total loss as a `tf.Tensor`, or `None` if no loss results (which is "
+"the case when called by `Model.test_step`)."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.compute_mask:1 of
msgid "Computes an output mask tensor."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:3
-#: keras.engine.base_layer.Layer.compute_mask:4 of
+#: keras.src.engine.base_layer.Layer.compute_mask:3
+#: keras.src.engine.base_layer.Layer.compute_mask:4 of
msgid "Tensor or list of tensors."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:6 of
+#: keras.src.engine.base_layer.Layer.compute_mask:6 of
msgid ""
"None or a tensor (or list of tensors, one per output tensor of the "
"layer)."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:8 of
+#: keras.src.engine.base_layer.Layer.compute_mask:8 of
msgid "None or a tensor (or list of tensors,"
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:9 of
+#: keras.src.engine.base_layer.Layer.compute_mask:9 of
msgid "one per output tensor of the layer)."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_shape:1 of
+#: keras.src.engine.training.Model.compute_metrics:1 of
+msgid "Update metric states and collect all metrics to be returned."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:3 of
+msgid ""
+"Subclasses can optionally override this method to provide custom metric "
+"updating and collection logic."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:6 of
+msgid "Example: ```python class MyModel(tf.keras.Sequential):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:10 of
+msgid "def compute_metrics(self, x, y, y_pred, sample_weight):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:12 of
+msgid ""
+"# This super call updates `self.compiled_metrics` and returns # results "
+"for all metrics listed in `self.metrics`. metric_results = super(MyModel,"
+" self).compute_metrics("
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:15 of
+msgid "x, y, y_pred, sample_weight)"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:17 of
+msgid ""
+"# Note that `self.custom_metric` is not listed in `self.metrics`. "
+"self.custom_metric.update_state(x, y, y_pred, sample_weight) "
+"metric_results['custom_metric_name'] = self.custom_metric.result() return"
+" metric_results"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:25 of
+msgid "Predictions returned by the model (output of `model.call(x)`)"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:28 of
+msgid ""
+"A `dict` containing values that will be passed to "
+"`tf.keras.callbacks.CallbackList.on_train_batch_end()`. Typically, the "
+"values of the metrics listed in `self.metrics` are returned. Example: "
+"`{'loss': 0.2, 'accuracy': 0.7}`."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.compute_output_shape:1 of
msgid "Computes the output shape of the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_shape:3 of
+#: keras.src.engine.base_layer.Layer.compute_output_shape:3 of
msgid ""
-"If the layer has not been built, this method will call `build` on the "
-"layer. This assumes that the layer will later be used with inputs that "
-"match the input shape provided here."
+"This method will cause the layer's state to be built, if that has not "
+"happened before. This requires that the layer will later be used with "
+"inputs that match the input shape provided here."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_shape:7 of
+#: keras.src.engine.base_layer.Layer.compute_output_shape:7 of
msgid ""
-"Shape tuple (tuple of integers) or list of shape tuples (one per output "
-"tensor of the layer). Shape tuples can include None for free dimensions, "
-"instead of an integer."
+"Shape tuple (tuple of integers) or `tf.TensorShape`, or structure of "
+"shape tuples / `tf.TensorShape` instances (one per output tensor of the "
+"layer). Shape tuples can include None for free dimensions, instead of an "
+"integer."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_shape:12 of
-msgid "An input shape tuple."
+#: keras.src.engine.base_layer.Layer.compute_output_shape:13 of
+msgid "A `tf.TensorShape` instance or structure of `tf.TensorShape` instances."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:1 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:1 of
msgid "Compute the output tensor signature of the layer based on the inputs."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:3 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:3 of
msgid ""
"Unlike a TensorShape object, a TensorSpec object contains both shape and "
"dtype information for a tensor. This method allows layers to provide "
@@ -4122,44 +4783,59 @@ msgid ""
"matches the input dtype."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:10 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:10 of
msgid ""
"Single TensorSpec or nested structure of TensorSpec objects, describing a"
" candidate input for the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:13 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:13 of
msgid ""
-"Single TensorSpec or nested structure of TensorSpec objects, describing"
-" how the layer would transform the provided input."
+"Single TensorSpec or nested structure of TensorSpec objects, describing"
+" how the layer would transform the provided input."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:16 of
-msgid "Single TensorSpec or nested structure of TensorSpec objects, describing"
+#: keras.src.engine.base_layer.Layer.compute_output_signature:16 of
+msgid "Single TensorSpec or nested structure of TensorSpec objects,"
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:16 of
-msgid "how the layer would transform the provided input."
+#: keras.src.engine.base_layer.Layer.compute_output_signature:16 of
+msgid "describing how the layer would transform the provided input."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:18 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:18 of
msgid "If input_signature contains a non-TensorSpec object."
msgstr ""
-#: keras.engine.base_layer.Layer.count_params:1 of
+#: keras.src.engine.base_layer.Layer.count_params:1 of
msgid "Count the total number of scalars composing the weights."
msgstr ""
-#: keras.engine.base_layer.Layer.count_params:3 of
+#: keras.src.engine.base_layer.Layer.count_params:3 of
msgid "An integer count."
msgstr ""
-#: keras.engine.base_layer.Layer.count_params:5 of
+#: keras.src.engine.base_layer.Layer.count_params:5 of
msgid ""
"if the layer isn't yet built (in which case its weights aren't yet "
"defined)."
msgstr ""
+#: of tensorcircuit.applications.van.MADE.distribute_reduction_method:1
+#: tensorcircuit.applications.van.NMF.distribute_reduction_method:1
+#: tensorcircuit.applications.van.PixelCNN.distribute_reduction_method:1
+msgid "The method employed to reduce per-replica values during training."
+msgstr ""
+
+#: of tensorcircuit.applications.van.MADE.distribute_reduction_method:3
+#: tensorcircuit.applications.van.NMF.distribute_reduction_method:3
+#: tensorcircuit.applications.van.PixelCNN.distribute_reduction_method:3
+msgid ""
+"Unless specified, the value \"auto\" will be assumed, indicating that the"
+" reduction strategy should be chosen based on the current running "
+"environment. See `reduce_per_replica` function for more details."
+msgstr ""
+
#: of tensorcircuit.applications.van.MADE.distribute_strategy:1
#: tensorcircuit.applications.van.NMF.distribute_strategy:1
#: tensorcircuit.applications.van.PixelCNN.distribute_strategy:1
@@ -4173,6 +4849,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.dtype:1
#: tensorcircuit.applications.van.ResidualBlock.dtype:1
#: tensorcircuit.applications.vqes.Linear.dtype:1
+#: tensorcircuit.keras.HardwareLayer.dtype:1
#: tensorcircuit.keras.QuantumLayer.dtype:1
msgid "The dtype of the layer weights."
msgstr ""
@@ -4184,6 +4861,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.dtype:3
#: tensorcircuit.applications.van.ResidualBlock.dtype:3
#: tensorcircuit.applications.vqes.Linear.dtype:3
+#: tensorcircuit.keras.HardwareLayer.dtype:3
#: tensorcircuit.keras.QuantumLayer.dtype:3
msgid ""
"This is equivalent to `Layer.dtype_policy.variable_dtype`. Unless mixed "
@@ -4198,6 +4876,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.dtype_policy:1
#: tensorcircuit.applications.van.ResidualBlock.dtype_policy:1
#: tensorcircuit.applications.vqes.Linear.dtype_policy:1
+#: tensorcircuit.keras.HardwareLayer.dtype_policy:1
#: tensorcircuit.keras.QuantumLayer.dtype_policy:1
msgid "The dtype policy associated with this layer."
msgstr ""
@@ -4209,6 +4888,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.dtype_policy:3
#: tensorcircuit.applications.van.ResidualBlock.dtype_policy:3
#: tensorcircuit.applications.vqes.Linear.dtype_policy:3
+#: tensorcircuit.keras.HardwareLayer.dtype_policy:3
#: tensorcircuit.keras.QuantumLayer.dtype_policy:3
msgid "This is an instance of a `tf.keras.mixed_precision.Policy`."
msgstr ""
@@ -4220,19 +4900,20 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.dynamic:1
#: tensorcircuit.applications.van.ResidualBlock.dynamic:1
#: tensorcircuit.applications.vqes.Linear.dynamic:1
+#: tensorcircuit.keras.HardwareLayer.dynamic:1
#: tensorcircuit.keras.QuantumLayer.dynamic:1
msgid "Whether the layer is dynamic (eager-only); set in the constructor."
msgstr ""
-#: keras.engine.training.Model.evaluate:1 of
+#: keras.src.engine.training.Model.evaluate:1 of
msgid "Returns the loss value & metrics values for the model in test mode."
msgstr ""
-#: keras.engine.training.Model.evaluate:3 of
+#: keras.src.engine.training.Model.evaluate:3 of
msgid "Computation is done in batches (see the `batch_size` arg.)"
msgstr ""
-#: keras.engine.training.Model.evaluate:5 of
+#: keras.src.engine.training.Model.evaluate:5 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow tensor, "
@@ -4240,63 +4921,67 @@ msgid ""
"mapping input names to the corresponding array/tensors, if the model "
"has named inputs. - A `tf.data` dataset. Should return a tuple of "
"either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A "
-"generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
+"generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
"`(inputs, targets, sample_weights)`. A more detailed description of "
"unpacking behavior for iterator types (Dataset, generator, Sequence) is "
"given in the `Unpacking behavior for iterator-like inputs` section of "
"`Model.fit`."
msgstr ""
-#: keras.engine.training.Model.evaluate:5 keras.engine.training.Model.fit:3
-#: keras.engine.training.Model.train_on_batch:3 of
+#: keras.src.engine.training.Model.evaluate:5
+#: keras.src.engine.training.Model.fit:3
+#: keras.src.engine.training.Model.train_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays"
msgstr ""
-#: keras.engine.training.Model.evaluate:7 keras.engine.training.Model.fit:5
-#: keras.engine.training.Model.predict:13
-#: keras.engine.training.Model.train_on_batch:5
-#: keras.engine.training.Model.train_on_batch:7 of
+#: keras.src.engine.training.Model.evaluate:7
+#: keras.src.engine.training.Model.fit:5
+#: keras.src.engine.training.Model.predict:28
+#: keras.src.engine.training.Model.train_on_batch:5
+#: keras.src.engine.training.Model.train_on_batch:7 of
msgid "(in case the model has multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.evaluate:8 keras.engine.training.Model.fit:6
-#: keras.engine.training.Model.predict:14 of
+#: keras.src.engine.training.Model.evaluate:8
+#: keras.src.engine.training.Model.fit:6
+#: keras.src.engine.training.Model.predict:29 of
msgid ""
"A TensorFlow tensor, or a list of tensors (in case the model has multiple"
" inputs)."
msgstr ""
-#: keras.engine.training.Model.evaluate:10 keras.engine.training.Model.fit:8 of
+#: keras.src.engine.training.Model.evaluate:10
+#: keras.src.engine.training.Model.fit:8 of
msgid ""
"A dict mapping input names to the corresponding array/tensors, if the "
"model has named inputs."
msgstr ""
-#: keras.engine.training.Model.evaluate:12 keras.engine.training.Model.fit:10
-#: of
+#: keras.src.engine.training.Model.evaluate:12
+#: keras.src.engine.training.Model.fit:10 of
msgid ""
"A `tf.data` dataset. Should return a tuple of either `(inputs, targets)` "
"or `(inputs, targets, sample_weights)`."
msgstr ""
-#: keras.engine.training.Model.evaluate:15 keras.engine.training.Model.fit:13
-#: of
+#: keras.src.engine.training.Model.evaluate:15
+#: keras.src.engine.training.Model.fit:13 of
msgid ""
"A generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
"`(inputs, targets, sample_weights)`."
msgstr ""
-#: keras.engine.training.Model.evaluate:17
-#: keras.engine.training.Model.predict:18 of
+#: keras.src.engine.training.Model.evaluate:17
+#: keras.src.engine.training.Model.predict:33 of
msgid ""
"A more detailed description of unpacking behavior for iterator types "
"(Dataset, generator, Sequence) is given in the `Unpacking behavior for "
"iterator-like inputs` section of `Model.fit`."
msgstr ""
-#: keras.engine.training.Model.evaluate:20 of
+#: keras.src.engine.training.Model.evaluate:20 of
msgid ""
"Target data. Like the input data `x`, it could be either Numpy array(s) "
"or TensorFlow tensor(s). It should be consistent with `x` (you cannot "
@@ -4305,7 +4990,7 @@ msgid ""
"specified (since targets will be obtained from the iterator/dataset)."
msgstr ""
-#: keras.engine.training.Model.evaluate:26 of
+#: keras.src.engine.training.Model.evaluate:26 of
msgid ""
"Integer or `None`. Number of samples per batch of computation. If "
"unspecified, `batch_size` will default to 32. Do not specify the "
@@ -4313,34 +4998,41 @@ msgid ""
"`keras.utils.Sequence` instances (since they generate batches)."
msgstr ""
-#: keras.engine.training.Model.evaluate:31 of
-msgid "0 or 1. Verbosity mode. 0 = silent, 1 = progress bar."
+#: keras.src.engine.training.Model.evaluate:31
+#: keras.src.engine.training.Model.predict:42 of
+msgid ""
+"`\"auto\"`, 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 ="
+" single line. `\"auto\"` becomes 1 for most cases, and to 2 when used "
+"with `ParameterServerStrategy`. Note that the progress bar is not "
+"particularly useful when logged to a file, so `verbose=2` is recommended "
+"when not running interactively (e.g. in a production environment). "
+"Defaults to 'auto'."
msgstr ""
-#: keras.engine.training.Model.evaluate:32 of
+#: keras.src.engine.training.Model.evaluate:38 of
msgid ""
"Optional Numpy array of weights for the test samples, used for weighting "
"the loss function. You can either pass a flat (1D) Numpy array with the "
"same length as the input samples (1:1 mapping between weights and "
"samples), or in the case of temporal data, you can pass a 2D array "
"with shape `(samples, sequence_length)`, to apply a different weight "
-"to every timestep of every sample. This argument is not supported "
-"when `x` is a dataset, instead pass sample weights as the third "
+"to every timestep of every sample. This argument is not supported "
+"when `x` is a dataset, instead pass sample weights as the third "
"element of `x`."
msgstr ""
-#: keras.engine.training.Model.evaluate:32 of
+#: keras.src.engine.training.Model.evaluate:38 of
msgid ""
"Optional Numpy array of weights for the test samples, used for weighting "
"the loss function. You can either pass a flat (1D) Numpy array with the "
"same length as the input samples"
msgstr ""
-#: keras.engine.training.Model.evaluate:38 of
+#: keras.src.engine.training.Model.evaluate:45 of
msgid "(1:1 mapping between weights and samples), or in the case of"
msgstr ""
-#: keras.engine.training.Model.evaluate:36 of
+#: keras.src.engine.training.Model.evaluate:42 of
msgid ""
"temporal data, you can pass a 2D array with shape `(samples, "
"sequence_length)`, to apply a different weight to every timestep of every"
@@ -4348,7 +5040,7 @@ msgid ""
"pass sample weights as the third element of `x`."
msgstr ""
-#: keras.engine.training.Model.evaluate:40 of
+#: keras.src.engine.training.Model.evaluate:47 of
msgid ""
"Integer or `None`. Total number of steps (batches of samples) before "
"declaring the evaluation round finished. Ignored with the default value "
@@ -4357,30 +5049,33 @@ msgid ""
"with array inputs."
msgstr ""
-#: keras.engine.training.Model.evaluate:45 of
+#: keras.src.engine.training.Model.evaluate:52 of
msgid ""
"List of `keras.callbacks.Callback` instances. List of callbacks to apply "
-"during evaluation. See [callbacks](/api_docs/python/tf/keras/callbacks)."
+"during evaluation. See "
+"[callbacks](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks)."
msgstr ""
-#: keras.engine.training.Model.evaluate:48 keras.engine.training.Model.fit:160
-#: keras.engine.training.Model.predict:36 of
+#: keras.src.engine.training.Model.evaluate:55
+#: keras.src.engine.training.Model.predict:58 of
msgid ""
"Integer. Used for generator or `keras.utils.Sequence` input only. Maximum"
" size for the generator queue. If unspecified, `max_queue_size` will "
"default to 10."
msgstr ""
-#: keras.engine.training.Model.evaluate:51 keras.engine.training.Model.fit:163
-#: keras.engine.training.Model.predict:39 of
+#: keras.src.engine.training.Model.evaluate:58
+#: keras.src.engine.training.Model.fit:178
+#: keras.src.engine.training.Model.predict:62 of
msgid ""
"Integer. Used for generator or `keras.utils.Sequence` input only. Maximum"
" number of processes to spin up when using process-based threading. If "
"unspecified, `workers` will default to 1."
msgstr ""
-#: keras.engine.training.Model.evaluate:54 keras.engine.training.Model.fit:167
-#: keras.engine.training.Model.predict:43 of
+#: keras.src.engine.training.Model.evaluate:62
+#: keras.src.engine.training.Model.fit:182
+#: keras.src.engine.training.Model.predict:66 of
msgid ""
"Boolean. Used for generator or `keras.utils.Sequence` input only. If "
"`True`, use process-based threading. If unspecified, "
@@ -4390,32 +5085,26 @@ msgid ""
"children processes."
msgstr ""
-#: keras.engine.training.Model.evaluate:60
-#: keras.engine.training.Model.test_on_batch:21
-#: keras.engine.training.Model.train_on_batch:25 of
+#: keras.src.engine.training.Model.evaluate:69
+#: keras.src.engine.training.Model.test_on_batch:21
+#: keras.src.engine.training.Model.train_on_batch:27 of
msgid ""
"If `True`, loss and metric results are returned as a dict, with each key "
"being the name of the metric. If `False`, they are returned as a list."
msgstr ""
-#: keras.engine.training.Model.evaluate:63 of
+#: keras.src.engine.training.Model.evaluate:72 of
msgid "Unused at this time."
msgstr ""
-#: keras.engine.training.Model.evaluate:65 of
+#: keras.src.engine.training.Model.evaluate:74 of
msgid ""
"See the discussion of `Unpacking behavior for iterator-like inputs` for "
"`Model.fit`."
msgstr ""
-#: keras.engine.training.Model.evaluate:68 of
-msgid ""
-"`Model.evaluate` is not yet supported with "
-"`tf.distribute.experimental.ParameterServerStrategy`."
-msgstr ""
-
-#: keras.engine.training.Model.evaluate:71
-#: keras.engine.training.Model.test_on_batch:25 of
+#: keras.src.engine.training.Model.evaluate:77
+#: keras.src.engine.training.Model.test_on_batch:25 of
msgid ""
"Scalar test loss (if the model has a single output and no metrics) or "
"list of scalars (if the model has multiple outputs and/or metrics). The "
@@ -4423,42 +5112,89 @@ msgid ""
"scalar outputs."
msgstr ""
-#: keras.engine.training.Model.evaluate:76 of
+#: keras.src.engine.training.Model.evaluate:82 of
msgid "If `model.evaluate` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.evaluate_generator:1 of
+#: keras.src.engine.training.Model.evaluate_generator:1 of
msgid "Evaluates the model on a data generator."
msgstr ""
-#: keras.engine.training.Model.evaluate_generator:4
-#: keras.engine.training.Model.fit_generator:4
-#: keras.engine.training.Model.predict_generator:4 of
+#: keras.src.engine.training.Model.evaluate_generator:4
+#: keras.src.engine.training.Model.fit_generator:4
+#: keras.src.engine.training.Model.predict_generator:4 of
msgid "DEPRECATED:"
msgstr ""
-#: keras.engine.training.Model.evaluate_generator:4 of
+#: keras.src.engine.training.Model.evaluate_generator:4 of
msgid ""
"`Model.evaluate` now supports generators, so there is no longer any need "
"to use this endpoint."
msgstr ""
-#: keras.engine.base_layer.Layer.finalize_state:1 of
+#: keras.src.engine.training.Model.export:1 of
+msgid "Create a SavedModel artifact for inference (e.g. via TF-Serving)."
+msgstr ""
+
+#: keras.src.engine.training.Model.export:3 of
+msgid ""
+"This method lets you export a model to a lightweight SavedModel artifact "
+"that contains the model's forward pass only (its `call()` method) and can"
+" be served via e.g. TF-Serving. The forward pass is registered under the "
+"name `serve()` (see example below)."
+msgstr ""
+
+#: keras.src.engine.training.Model.export:8 of
+msgid ""
+"The original code of the model (including any custom layers you may have "
+"used) is *no longer* necessary to reload the artifact -- it is entirely "
+"standalone."
+msgstr ""
+
+#: keras.src.engine.training.Model.export:12 of
+msgid "`str` or `pathlib.Path` object. Path where to save the artifact."
+msgstr ""
+
+#: keras.src.engine.training.Model.export:17 of
+msgid "```python # Create the artifact model.export(\"path/to/location\")"
+msgstr ""
+
+#: keras.src.engine.training.Model.export:21 of
+msgid ""
+"# Later, in a different process / environment... reloaded_artifact = "
+"tf.saved_model.load(\"path/to/location\") predictions = "
+"reloaded_artifact.serve(input_data) ```"
+msgstr ""
+
+#: keras.src.engine.training.Model.export:26 of
+msgid ""
+"If you would like to customize your serving endpoints, you can use the "
+"lower-level `keras.export.ExportArchive` class. The `export()` method "
+"relies on `ExportArchive` internally."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.finalize_state:1 of
msgid "Finalizes the layers state after updating layer weights."
msgstr ""
-#: keras.engine.base_layer.Layer.finalize_state:3 of
+#: keras.src.engine.base_layer.Layer.finalize_state:3 of
msgid ""
"This function can be subclassed in a layer and will be called after "
"updating a layer weights. It can be overridden to finalize any additional"
" layer state after a weight update."
msgstr ""
-#: keras.engine.training.Model.fit:1 of
-msgid "Trains the model for a fixed number of epochs (iterations on a dataset)."
+#: keras.src.engine.base_layer.Layer.finalize_state:7 of
+msgid ""
+"This function will be called after weights of a layer have been restored "
+"from a loaded model."
+msgstr ""
+
+#: keras.src.engine.training.Model.fit:1 of
+msgid "Trains the model for a fixed number of epochs (dataset iterations)."
msgstr ""
-#: keras.engine.training.Model.fit:3 of
+#: keras.src.engine.training.Model.fit:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow tensor, "
@@ -4466,7 +5202,7 @@ msgid ""
"mapping input names to the corresponding array/tensors, if the model "
"has named inputs. - A `tf.data` dataset. Should return a tuple of "
"either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A "
-"generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
+"generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
"`(inputs, targets, sample_weights)`. - A "
"`tf.keras.utils.experimental.DatasetCreator`, which wraps a callable "
"that takes a single argument of type `tf.distribute.InputContext`, and "
@@ -4474,12 +5210,15 @@ msgid ""
" prefer to specify the per-replica batching and sharding logic for the "
"`Dataset`. See `tf.keras.utils.experimental.DatasetCreator` doc for "
"more information. A more detailed description of unpacking behavior for"
-" iterator types (Dataset, generator, Sequence) is given below. If using "
+" iterator types (Dataset, generator, Sequence) is given below. If these "
+"include `sample_weights` as a third component, note that sample weighting"
+" applies to the `weighted_metrics` argument but not the `metrics` "
+"argument in `compile()`. If using "
"`tf.distribute.experimental.ParameterServerStrategy`, only "
"`DatasetCreator` type is supported for `x`."
msgstr ""
-#: keras.engine.training.Model.fit:15 of
+#: keras.src.engine.training.Model.fit:15 of
msgid ""
"A `tf.keras.utils.experimental.DatasetCreator`, which wraps a callable "
"that takes a single argument of type `tf.distribute.InputContext`, and "
@@ -4489,15 +5228,18 @@ msgid ""
"information."
msgstr ""
-#: keras.engine.training.Model.fit:22 of
+#: keras.src.engine.training.Model.fit:22 of
msgid ""
"A more detailed description of unpacking behavior for iterator types "
-"(Dataset, generator, Sequence) is given below. If using "
+"(Dataset, generator, Sequence) is given below. If these include "
+"`sample_weights` as a third component, note that sample weighting applies"
+" to the `weighted_metrics` argument but not the `metrics` argument in "
+"`compile()`. If using "
"`tf.distribute.experimental.ParameterServerStrategy`, only "
"`DatasetCreator` type is supported for `x`."
msgstr ""
-#: keras.engine.training.Model.fit:26 of
+#: keras.src.engine.training.Model.fit:29 of
msgid ""
"Target data. Like the input data `x`, it could be either Numpy array(s) "
"or TensorFlow tensor(s). It should be consistent with `x` (you cannot "
@@ -4506,7 +5248,7 @@ msgid ""
"specified (since targets will be obtained from `x`)."
msgstr ""
-#: keras.engine.training.Model.fit:32 of
+#: keras.src.engine.training.Model.fit:35 of
msgid ""
"Integer or `None`. Number of samples per gradient update. If unspecified,"
" `batch_size` will default to 32. Do not specify the `batch_size` if your"
@@ -4514,7 +5256,7 @@ msgid ""
"instances (since they generate batches)."
msgstr ""
-#: keras.engine.training.Model.fit:38 of
+#: keras.src.engine.training.Model.fit:41 of
msgid ""
"Integer. Number of epochs to train the model. An epoch is an iteration "
"over the entire `x` and `y` data provided (unless the `steps_per_epoch` "
@@ -4524,16 +5266,17 @@ msgid ""
"merely until the epoch of index `epochs` is reached."
msgstr ""
-#: keras.engine.training.Model.fit:48 of
+#: keras.src.engine.training.Model.fit:51 of
msgid ""
"'auto', 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one"
-" line per epoch. 'auto' defaults to 1 for most cases, but 2 when used "
-"with `ParameterServerStrategy`. Note that the progress bar is not "
-"particularly useful when logged to a file, so verbose=2 is recommended "
-"when not running interactively (eg, in a production environment)."
+" line per epoch. 'auto' becomes 1 for most cases, but 2 when used with "
+"`ParameterServerStrategy`. Note that the progress bar is not particularly"
+" useful when logged to a file, so verbose=2 is recommended when not "
+"running interactively (eg, in a production environment). Defaults to "
+"'auto'."
msgstr ""
-#: keras.engine.training.Model.fit:55 of
+#: keras.src.engine.training.Model.fit:58 of
msgid ""
"List of `keras.callbacks.Callback` instances. List of callbacks to apply "
"during training. See `tf.keras.callbacks`. Note "
@@ -4547,43 +5290,21 @@ msgid ""
"`steps_per_epoch` value."
msgstr ""
-#: keras.engine.training.Model.fit:66 of
-msgid ""
-"Float between 0 and 1. Fraction of the training data to be used as "
-"validation data. The model will set apart this fraction of the training "
-"data, will not train on it, and will evaluate the loss and any model "
-"metrics on this data at the end of each epoch. The validation data is "
-"selected from the last samples in the `x` and `y` data provided, before "
-"shuffling. This argument is not supported when `x` is a dataset, "
-"generator or `keras.utils.Sequence` instance. `validation_split` is not "
-"yet supported with `tf.distribute.experimental.ParameterServerStrategy`."
-msgstr ""
-
-#: keras.engine.training.Model.fit:74 of
-msgid "Float between 0 and 1."
-msgstr ""
-
-#: keras.engine.training.Model.fit:68 of
-msgid ""
-"Fraction of the training data to be used as validation data. The model "
-"will set apart this fraction of the training data, will not train on it, "
-"and will evaluate the loss and any model metrics on this data at the end "
-"of each epoch. The validation data is selected from the last samples in "
-"the `x` and `y` data provided, before shuffling. This argument is not "
-"supported when `x` is a dataset, generator or"
-msgstr ""
-
-#: keras.engine.training.Model.fit:77 of
-msgid "`keras.utils.Sequence` instance."
-msgstr ""
-
-#: keras.engine.training.Model.fit:77 of
+#: keras.src.engine.training.Model.fit:70 of
msgid ""
-"`validation_split` is not yet supported with "
+"Float between 0 and 1. Fraction of the training data to be used as "
+"validation data. The model will set apart this fraction of the training "
+"data, will not train on it, and will evaluate the loss and any model "
+"metrics on this data at the end of each epoch. The validation data is "
+"selected from the last samples in the `x` and `y` data provided, before "
+"shuffling. This argument is not supported when `x` is a dataset, "
+"generator or `keras.utils.Sequence` instance. If both `validation_data` "
+"and `validation_split` are provided, `validation_data` will override "
+"`validation_split`. `validation_split` is not yet supported with "
"`tf.distribute.experimental.ParameterServerStrategy`."
msgstr ""
-#: keras.engine.training.Model.fit:79 of
+#: keras.src.engine.training.Model.fit:84 of
msgid ""
"Data on which to evaluate the loss and any model metrics at the end of "
"each epoch. The model will not be trained on this data. Thus, note the "
@@ -4591,14 +5312,14 @@ msgid ""
"or `validation_data` is not affected by regularization layers like noise "
"and dropout. `validation_data` will override `validation_split`. "
"`validation_data` could be: - A tuple `(x_val, y_val)` of Numpy arrays "
-"or tensors. - A tuple `(x_val, y_val, val_sample_weights)` of NumPy "
-"arrays. - A `tf.data.Dataset`. - A Python generator or "
+"or tensors. - A tuple `(x_val, y_val, val_sample_weights)` of NumPy"
+" arrays. - A `tf.data.Dataset`. - A Python generator or "
"`keras.utils.Sequence` returning `(inputs, targets)` or `(inputs, "
"targets, sample_weights)`. `validation_data` is not yet supported with "
"`tf.distribute.experimental.ParameterServerStrategy`."
msgstr ""
-#: keras.engine.training.Model.fit:79 of
+#: keras.src.engine.training.Model.fit:84 of
msgid ""
"Data on which to evaluate the loss and any model metrics at the end of "
"each epoch. The model will not be trained on this data. Thus, note the "
@@ -4608,33 +5329,33 @@ msgid ""
"`validation_data` could be:"
msgstr ""
-#: keras.engine.training.Model.fit:87 of
+#: keras.src.engine.training.Model.fit:92 of
msgid "A tuple `(x_val, y_val)` of Numpy arrays or tensors."
msgstr ""
-#: keras.engine.training.Model.fit:88 of
+#: keras.src.engine.training.Model.fit:93 of
msgid "A tuple `(x_val, y_val, val_sample_weights)` of NumPy arrays."
msgstr ""
-#: keras.engine.training.Model.fit:89 of
+#: keras.src.engine.training.Model.fit:95 of
msgid "A `tf.data.Dataset`."
msgstr ""
-#: keras.engine.training.Model.fit:90 of
+#: keras.src.engine.training.Model.fit:96 of
msgid "A Python generator or `keras.utils.Sequence` returning"
msgstr ""
-#: keras.engine.training.Model.fit:91 of
+#: keras.src.engine.training.Model.fit:97 of
msgid "`(inputs, targets)` or `(inputs, targets, sample_weights)`."
msgstr ""
-#: keras.engine.training.Model.fit:92 of
+#: keras.src.engine.training.Model.fit:98 of
msgid ""
"`validation_data` is not yet supported with "
"`tf.distribute.experimental.ParameterServerStrategy`."
msgstr ""
-#: keras.engine.training.Model.fit:94 of
+#: keras.src.engine.training.Model.fit:100 of
msgid ""
"Boolean (whether to shuffle the training data before each epoch) or str "
"(for 'batch'). This argument is ignored when `x` is a generator or an "
@@ -4643,57 +5364,39 @@ msgid ""
"effect when `steps_per_epoch` is not `None`."
msgstr ""
-#: keras.engine.training.Model.fit:100 of
+#: keras.src.engine.training.Model.fit:106 of
msgid ""
"Optional dictionary mapping class indices (integers) to a weight (float) "
"value, used for weighting the loss function (during training only). This "
"can be useful to tell the model to \"pay more attention\" to samples from"
-" an under-represented class."
-msgstr ""
-
-#: keras.engine.training.Model.fit:106 of
-msgid ""
-"Optional Numpy array of weights for the training samples, used for "
-"weighting the loss function (during training only). You can either pass "
-"a flat (1D) Numpy array with the same length as the input samples (1:1 "
-"mapping between weights and samples), or in the case of temporal data, "
-"you can pass a 2D array with shape `(samples, sequence_length)`, to "
-"apply a different weight to every timestep of every sample. This "
-"argument is not supported when `x` is a dataset, generator, or "
-"`keras.utils.Sequence` instance, instead provide the sample_weights as "
-"the third element of `x`."
-msgstr ""
-
-#: keras.engine.training.Model.fit:115 of
-msgid "Optional Numpy array of weights for"
+" an under-represented class. When `class_weight` is specified and targets"
+" have a rank of 2 or greater, either `y` must be one-hot encoded, or an "
+"explicit final dimension of `1` must be included for sparse class labels."
msgstr ""
-#: keras.engine.training.Model.fit:108 of
+#: keras.src.engine.training.Model.fit:115 of
msgid ""
-"the training samples, used for weighting the loss function (during "
-"training only). You can either pass a flat (1D) Numpy array with the same"
-" length as the input samples (1:1 mapping between weights and samples), "
-"or in the case of temporal data, you can pass a 2D array with shape "
-"`(samples, sequence_length)`, to apply a different weight to every "
-"timestep of every sample. This argument is not supported when `x` is a "
-"dataset, generator, or"
-msgstr ""
-
-#: keras.engine.training.Model.fit:117 of
-msgid "`keras.utils.Sequence` instance, instead provide the sample_weights"
-msgstr ""
-
-#: keras.engine.training.Model.fit:118 of
-msgid "as the third element of `x`."
+"Optional Numpy array of weights for the training samples, used for "
+"weighting the loss function (during training only). You can either pass a"
+" flat (1D) Numpy array with the same length as the input samples (1:1 "
+"mapping between weights and samples), or in the case of temporal data, "
+"you can pass a 2D array with shape `(samples, sequence_length)`, to apply"
+" a different weight to every timestep of every sample. This argument is "
+"not supported when `x` is a dataset, generator, or `keras.utils.Sequence`"
+" instance, instead provide the sample_weights as the third element of "
+"`x`. Note that sample weighting does not apply to metrics specified via "
+"the `metrics` argument in `compile()`. To apply sample weighting to your "
+"metrics, you can specify them via the `weighted_metrics` in `compile()` "
+"instead."
msgstr ""
-#: keras.engine.training.Model.fit:119 of
+#: keras.src.engine.training.Model.fit:131 of
msgid ""
"Integer. Epoch at which to start training (useful for resuming a previous"
" training run)."
msgstr ""
-#: keras.engine.training.Model.fit:122 of
+#: keras.src.engine.training.Model.fit:134 of
msgid ""
"Integer or `None`. Total number of steps (batches of samples) before "
"declaring one epoch finished and starting the next epoch. When training "
@@ -4701,15 +5404,15 @@ msgid ""
" equal to the number of samples in your dataset divided by the batch "
"size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and"
" 'steps_per_epoch' is None, the epoch will run until the input dataset is"
-" exhausted. When passing an infinitely repeating dataset, you must "
+" exhausted. When passing an infinitely repeating dataset, you must "
"specify the `steps_per_epoch` argument. If `steps_per_epoch=-1` the "
-"training will run indefinitely with an infinitely repeating dataset. This"
-" argument is not supported with array inputs. When using "
+"training will run indefinitely with an infinitely repeating dataset. "
+"This argument is not supported with array inputs. When using "
"`tf.distribute.experimental.ParameterServerStrategy`: * "
"`steps_per_epoch=None` is not supported."
msgstr ""
-#: keras.engine.training.Model.fit:122 of
+#: keras.src.engine.training.Model.fit:134 of
msgid ""
"Integer or `None`. Total number of steps (batches of samples) before "
"declaring one epoch finished and starting the next epoch. When training "
@@ -4717,18 +5420,18 @@ msgid ""
" equal to the number of samples in your dataset divided by the batch "
"size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and"
" 'steps_per_epoch' is None, the epoch will run until the input dataset is"
-" exhausted. When passing an infinitely repeating dataset, you must "
+" exhausted. When passing an infinitely repeating dataset, you must "
"specify the `steps_per_epoch` argument. If `steps_per_epoch=-1` the "
-"training will run indefinitely with an infinitely repeating dataset. This"
-" argument is not supported with array inputs. When using "
+"training will run indefinitely with an infinitely repeating dataset. "
+"This argument is not supported with array inputs. When using "
"`tf.distribute.experimental.ParameterServerStrategy`:"
msgstr ""
-#: keras.engine.training.Model.fit:136 of
+#: keras.src.engine.training.Model.fit:149 of
msgid "`steps_per_epoch=None` is not supported."
msgstr ""
-#: keras.engine.training.Model.fit:137 of
+#: keras.src.engine.training.Model.fit:150 of
msgid ""
"Only relevant if `validation_data` is provided and is a `tf.data` "
"dataset. Total number of steps (batches of samples) to draw before "
@@ -4742,7 +5445,7 @@ msgid ""
"time."
msgstr ""
-#: keras.engine.training.Model.fit:147 of
+#: keras.src.engine.training.Model.fit:161 of
msgid ""
"Integer or `None`. Number of samples per validation batch. If "
"unspecified, will default to `batch_size`. Do not specify the "
@@ -4751,10 +5454,10 @@ msgid ""
"batches)."
msgstr ""
-#: keras.engine.training.Model.fit:153 of
+#: keras.src.engine.training.Model.fit:167 of
msgid ""
"Only relevant if validation data is provided. Integer or "
-"`collections.abc.Container` instance (e.g. list, tuple, etc.). If an "
+"`collections.abc.Container` instance (e.g. list, tuple, etc.). If an "
"integer, specifies how many training epochs to run before a new "
"validation run is performed, e.g. `validation_freq=2` runs validation "
"every 2 epochs. If a Container, specifies the epochs on which to run "
@@ -4762,19 +5465,26 @@ msgid ""
"of the 1st, 2nd, and 10th epochs."
msgstr ""
-#: keras.engine.training.Model.fit:196 of
+#: keras.src.engine.training.Model.fit:175 of
+msgid ""
+"Integer. Used for generator or `keras.utils.Sequence` input only. Maximum"
+" size for the generator queue. If unspecified, `max_queue_size` will "
+"default to 10."
+msgstr ""
+
+#: keras.src.engine.training.Model.fit:214 of
msgid "Unpacking behavior for iterator-like inputs:"
msgstr ""
-#: keras.engine.training.Model.fit:175 of
+#: keras.src.engine.training.Model.fit:191 of
msgid "A common pattern is to pass a tf.data.Dataset, generator, or"
msgstr ""
-#: keras.engine.training.Model.fit:176 of
+#: keras.src.engine.training.Model.fit:192 of
msgid ""
"tf.keras.utils.Sequence to the `x` argument of fit, which will in fact "
"yield not only features (x) but optionally targets (y) and sample "
-"weights. Keras requires that the output of such iterator-likes be "
+"weights. Keras requires that the output of such iterator-likes be "
"unambiguous. The iterator should return a tuple of length 1, 2, or 3, "
"where the optional second and third elements will be used for y and "
"sample_weight respectively. Any other type provided will be wrapped in a "
@@ -4784,31 +5494,31 @@ msgid ""
"features, targets, and weights from the keys of a single dict."
msgstr ""
-#: keras.engine.training.Model.fit:186 of
-msgid "A notable unsupported data type is the namedtuple. The reason is that"
+#: keras.src.engine.training.Model.fit:203 of
+msgid "A notable unsupported data type is the namedtuple. The reason is"
msgstr ""
-#: keras.engine.training.Model.fit:187 of
+#: keras.src.engine.training.Model.fit:204 of
msgid ""
-"it behaves like both an ordered datatype (tuple) and a mapping datatype "
-"(dict). So given a namedtuple of the form:"
+"that it behaves like both an ordered datatype (tuple) and a mapping "
+"datatype (dict). So given a namedtuple of the form:"
msgstr ""
-#: keras.engine.training.Model.fit:189 of
+#: keras.src.engine.training.Model.fit:206 of
msgid "`namedtuple(\"example_tuple\", [\"y\", \"x\"])`"
msgstr ""
-#: keras.engine.training.Model.fit:190 of
+#: keras.src.engine.training.Model.fit:207 of
msgid ""
"it is ambiguous whether to reverse the order of the elements when "
"interpreting the value. Even worse is a tuple of the form:"
msgstr ""
-#: keras.engine.training.Model.fit:192 of
+#: keras.src.engine.training.Model.fit:209 of
msgid "`namedtuple(\"other_tuple\", [\"x\", \"y\", \"z\"])`"
msgstr ""
-#: keras.engine.training.Model.fit:193 of
+#: keras.src.engine.training.Model.fit:210 of
msgid ""
"where it is unclear if the tuple was intended to be unpacked into x, y, "
"and sample_weight or passed through as a single element to `x`. As a "
@@ -4816,44 +5526,44 @@ msgid ""
"encounters a namedtuple. (Along with instructions to remedy the issue.)"
msgstr ""
-#: keras.engine.training.Model.fit:198 of
+#: keras.src.engine.training.Model.fit:216 of
msgid ""
"A `History` object. Its `History.history` attribute is a record of "
"training loss values and metrics values at successive epochs, as well as "
"validation loss values and validation metrics values (if applicable)."
msgstr ""
-#: keras.engine.training.Model.fit:203 of
+#: keras.src.engine.training.Model.fit:221 of
msgid "1. If the model was never compiled or,"
msgstr ""
-#: keras.engine.training.Model.fit:203 of
+#: keras.src.engine.training.Model.fit:221 of
msgid "If the model was never compiled or,"
msgstr ""
-#: keras.engine.training.Model.fit:205 of
+#: keras.src.engine.training.Model.fit:223 of
msgid ""
"In case of mismatch between the provided input data and what the "
"model expects or when the input data is empty."
msgstr ""
-#: keras.engine.training.Model.fit_generator:1 of
+#: keras.src.engine.training.Model.fit_generator:1 of
msgid "Fits the model on data yielded batch-by-batch by a Python generator."
msgstr ""
-#: keras.engine.training.Model.fit_generator:4 of
+#: keras.src.engine.training.Model.fit_generator:4 of
msgid ""
"`Model.fit` now supports generators, so there is no longer any need to "
"use this endpoint."
msgstr ""
-#: keras.engine.base_layer.Layer.from_config:1
-#: keras.engine.training.Model.from_config:1 of
+#: keras.src.engine.base_layer.Layer.from_config:1
+#: keras.src.engine.training.Model.from_config:1 of
msgid "Creates a layer from its config."
msgstr ""
-#: keras.engine.base_layer.Layer.from_config:3
-#: keras.engine.training.Model.from_config:3 of
+#: keras.src.engine.base_layer.Layer.from_config:3
+#: keras.src.engine.training.Model.from_config:3 of
msgid ""
"This method is the reverse of `get_config`, capable of instantiating the "
"same layer from the config dictionary. It does not handle layer "
@@ -4861,69 +5571,106 @@ msgid ""
"`set_weights`)."
msgstr ""
-#: keras.engine.base_layer.Layer.from_config:8
-#: keras.engine.training.Model.from_config:8 of
+#: keras.src.engine.base_layer.Layer.from_config:8
+#: keras.src.engine.training.Model.from_config:8 of
msgid "A Python dictionary, typically the output of get_config."
msgstr ""
-#: keras.engine.base_layer.Layer.from_config:11
-#: keras.engine.training.Model.from_config:11
-#: keras.engine.training.Model.get_layer:9 of
+#: keras.src.engine.base_layer.Layer.from_config:11
+#: keras.src.engine.training.Model.from_config:11
+#: keras.src.engine.training.Model.get_layer:9 of
msgid "A layer instance."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:1
-#: keras.engine.training.Model.get_config:1 of
-msgid "Returns the config of the layer."
+#: keras.src.engine.base_layer.Layer.get_build_config:1 of
+msgid "Returns a dictionary with the layer's input shape."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:3
-#: keras.engine.training.Model.get_config:3 of
+#: keras.src.engine.base_layer.Layer.get_build_config:3 of
msgid ""
-"A layer config is a Python dictionary (serializable) containing the "
-"configuration of a layer. The same layer can be reinstantiated later "
-"(without its trained weights) from this configuration."
+"This method returns a config dict that can be used by "
+"`build_from_config(config)` to create all states (e.g. Variables and "
+"Lookup tables) needed by the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:8
-#: keras.engine.training.Model.get_config:8 of
+#: keras.src.engine.base_layer.Layer.get_build_config:7 of
msgid ""
-"The config of a layer does not include connectivity information, nor the "
-"layer class name. These are handled by `Network` (one layer of "
-"abstraction above)."
+"By default, the config only contains the input shape that the layer was "
+"built with. If you're writing a custom layer that creates state in an "
+"unusual way, you should override this method to make sure this state is "
+"already created when Keras attempts to load its value upon model loading."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_build_config:13 of
+msgid "A dict containing the input shape associated with the layer."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_compile_config:1 of
+msgid "Returns a serialized config with information for compiling the model."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:12
-#: keras.engine.training.Model.get_config:12 of
+#: keras.src.engine.training.Model.get_compile_config:3 of
+msgid ""
+"This method returns a config dictionary containing all the information "
+"(optimizer, loss, metrics, etc.) with which the model was compiled."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_compile_config:6 of
+msgid "A dict containing information for compiling the model."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_config:1 of
+msgid "Returns the config of the `Model`."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_config:3 of
+msgid ""
+"Config is a Python dictionary (serializable) containing the configuration"
+" of an object, which in this case is a `Model`. This allows the `Model` "
+"to be be reinstantiated later (without its trained weights) from this "
+"configuration."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_config:12
+#: keras.src.engine.training.Model.get_config:8 of
msgid ""
"Note that `get_config()` does not guarantee to return a fresh copy of "
"dict every time it is called. The callers should make a copy of the "
"returned dict if they want to modify it."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:16
-#: keras.engine.training.Model.get_config:16 of
-msgid "Python dictionary."
+#: keras.src.engine.training.Model.get_config:12 of
+msgid ""
+"Developers of subclassed `Model` are advised to override this method, and"
+" continue to update the dict from `super(MyModel, self).get_config()` to "
+"provide the proper configuration of this `Model`. The default config will"
+" return config dict for init parameters if they are basic types. Raises "
+"`NotImplementedError` when in cases where a custom `get_config()` "
+"implementation is required for the subclassed model."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_at:1 of
+#: keras.src.engine.training.Model.get_config:19 of
+msgid "Python dictionary containing the configuration of this `Model`."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_input_at:1 of
msgid "Retrieves the input tensor(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_at:3 of
+#: keras.src.engine.base_layer.Layer.get_input_at:3 of
msgid ""
"Integer, index of the node from which to retrieve the attribute. E.g. "
"`node_index=0` will correspond to the first input node of the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_at:8 of
+#: keras.src.engine.base_layer.Layer.get_input_at:8 of
msgid "A tensor (or list of tensors if the layer has multiple inputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_at:10
-#: keras.engine.base_layer.Layer.get_input_shape_at:11
-#: keras.engine.base_layer.Layer.get_output_at:10
-#: keras.engine.base_layer.Layer.get_output_shape_at:11 of
+#: keras.src.engine.base_layer.Layer.get_input_at:10
+#: keras.src.engine.base_layer.Layer.get_input_shape_at:11
+#: keras.src.engine.base_layer.Layer.get_output_at:10
+#: keras.src.engine.base_layer.Layer.get_output_shape_at:11 of
#: tensorcircuit.applications.van.MADE.input:8
#: tensorcircuit.applications.van.MaskedConv2D.input:8
#: tensorcircuit.applications.van.MaskedLinear.input:8
@@ -4931,129 +5678,192 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input:8
#: tensorcircuit.applications.van.ResidualBlock.input:8
#: tensorcircuit.applications.vqes.Linear.input:8
+#: tensorcircuit.keras.HardwareLayer.input:8
#: tensorcircuit.keras.QuantumLayer.input:8
msgid "If called in Eager mode."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_mask_at:1 of
+#: keras.src.engine.base_layer.Layer.get_input_mask_at:1 of
msgid "Retrieves the input mask tensor(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_mask_at:3
-#: keras.engine.base_layer.Layer.get_input_shape_at:3
-#: keras.engine.base_layer.Layer.get_output_mask_at:3
-#: keras.engine.base_layer.Layer.get_output_shape_at:3 of
+#: keras.src.engine.base_layer.Layer.get_input_mask_at:3
+#: keras.src.engine.base_layer.Layer.get_input_shape_at:3
+#: keras.src.engine.base_layer.Layer.get_output_mask_at:3
+#: keras.src.engine.base_layer.Layer.get_output_shape_at:3 of
msgid ""
"Integer, index of the node from which to retrieve the attribute. E.g. "
"`node_index=0` will correspond to the first time the layer was called."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_mask_at:8 of
+#: keras.src.engine.base_layer.Layer.get_input_mask_at:8 of
msgid "A mask tensor (or list of tensors if the layer has multiple inputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_shape_at:1 of
+#: keras.src.engine.base_layer.Layer.get_input_shape_at:1 of
msgid "Retrieves the input shape(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_shape_at:8 of
+#: keras.src.engine.base_layer.Layer.get_input_shape_at:8 of
msgid "A shape tuple (or list of shape tuples if the layer has multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.get_layer:1 of
+#: keras.src.engine.training.Model.get_layer:1 of
msgid "Retrieves a layer based on either its name (unique) or index."
msgstr ""
-#: keras.engine.training.Model.get_layer:3 of
+#: keras.src.engine.training.Model.get_layer:3 of
msgid ""
"If `name` and `index` are both provided, `index` will take precedence. "
"Indices are based on order of horizontal graph traversal (bottom-up)."
msgstr ""
-#: keras.engine.training.Model.get_layer:6 of
+#: keras.src.engine.training.Model.get_layer:6 of
msgid "String, name of layer."
msgstr ""
-#: keras.engine.training.Model.get_layer:7 of
+#: keras.src.engine.training.Model.get_layer:7 of
msgid "Integer, index of layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_losses_for:3 of
-msgid "Retrieves losses relevant to a specific set of inputs."
+#: keras.src.engine.training.Model.get_metrics_result:1 of
+msgid "Returns the model's metrics values as a dict."
msgstr ""
-#: keras.engine.base_layer.Layer.get_losses_for:5
-#: keras.engine.base_layer.Layer.get_updates_for:5 of
-msgid "Input tensor or list/tuple of input tensors."
+#: keras.src.engine.training.Model.get_metrics_result:3 of
+msgid ""
+"If any of the metric result is a dict (containing multiple metrics), each"
+" of them gets added to the top level returned dict of this method."
msgstr ""
-#: keras.engine.base_layer.Layer.get_losses_for:7 of
-msgid "List of loss tensors of the layer that depend on `inputs`."
+#: keras.src.engine.training.Model.get_metrics_result:6 of
+msgid ""
+"A `dict` containing values of the metrics listed in `self.metrics`. "
+"Example: `{'loss': 0.2, 'accuracy': 0.7}`."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_at:1 of
+#: keras.src.engine.base_layer.Layer.get_output_at:1 of
msgid "Retrieves the output tensor(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_at:3 of
+#: keras.src.engine.base_layer.Layer.get_output_at:3 of
msgid ""
"Integer, index of the node from which to retrieve the attribute. E.g. "
"`node_index=0` will correspond to the first output node of the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_at:8 of
+#: keras.src.engine.base_layer.Layer.get_output_at:8 of
msgid "A tensor (or list of tensors if the layer has multiple outputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_mask_at:1 of
+#: keras.src.engine.base_layer.Layer.get_output_mask_at:1 of
msgid "Retrieves the output mask tensor(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_mask_at:8 of
+#: keras.src.engine.base_layer.Layer.get_output_mask_at:8 of
msgid "A mask tensor (or list of tensors if the layer has multiple outputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_shape_at:1 of
+#: keras.src.engine.base_layer.Layer.get_output_shape_at:1 of
msgid "Retrieves the output shape(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_shape_at:8 of
+#: keras.src.engine.base_layer.Layer.get_output_shape_at:8 of
msgid "A shape tuple (or list of shape tuples if the layer has multiple outputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_updates_for:3 of
-msgid "Retrieves updates relevant to a specific set of inputs."
+#: keras.src.engine.training.Model.get_weight_paths:1 of
+msgid "Retrieve all the variables and their paths for the model."
msgstr ""
-#: keras.engine.base_layer.Layer.get_updates_for:7 of
-msgid "List of update ops of the layer that depend on `inputs`."
+#: keras.src.engine.training.Model.get_weight_paths:3 of
+msgid ""
+"The variable path (string) is a stable key to identify a `tf.Variable` "
+"instance owned by the model. It can be used to specify variable-specific "
+"configurations (e.g. DTensor, quantization) from a global view."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:7 of
+msgid ""
+"This method returns a dict with weight object paths as keys and the "
+"corresponding `tf.Variable` instances as values."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:10 of
+msgid ""
+"Note that if the model is a subclassed model and the weights haven't been"
+" initialized, an empty dict will be returned."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:13 of
+msgid ""
+"A dict where keys are variable paths and values are `tf.Variable` "
+"instances."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:16 of
+msgid "A dict where keys are variable paths and values are `tf.Variable`"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:16 of
+msgid "instances."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:20 of
+msgid "```python class SubclassModel(tf.keras.Model):"
msgstr ""
-#: keras.engine.training.Model.get_weights:1 of
+#: keras.src.engine.training.Model.get_weight_paths:26 of
+msgid "def __init__(self, name=None):"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:24 of
+msgid ""
+"super().__init__(name=name) self.d1 = tf.keras.layers.Dense(10) self.d2 ="
+" tf.keras.layers.Dense(20)"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:29 of
+msgid "x = self.d1(inputs) return self.d2(x)"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:32 of
+msgid ""
+"model = SubclassModel() model(tf.zeros((10, 10))) weight_paths = "
+"model.get_weight_paths() # weight_paths: # { # 'd1.kernel': "
+"model.d1.kernel, # 'd1.bias': model.d1.bias, # 'd2.kernel': "
+"model.d2.kernel, # 'd2.bias': model.d2.bias, # }"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:43 of
+msgid ""
+"# Functional model inputs = tf.keras.Input((10,), batch_size=10) x = "
+"tf.keras.layers.Dense(20, name='d1')(inputs) output = "
+"tf.keras.layers.Dense(30, name='d2')(x) model = tf.keras.Model(inputs, "
+"output) d1 = model.layers[1] d2 = model.layers[2] weight_paths = "
+"model.get_weight_paths() # weight_paths: # { # 'd1.kernel': d1.kernel,"
+" # 'd1.bias': d1.bias, # 'd2.kernel': d2.kernel, # 'd2.bias': "
+"d2.bias, # } ```"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weights:1 of
msgid "Retrieves the weights of the model."
msgstr ""
-#: keras.engine.training.Model.get_weights:3 of
+#: keras.src.engine.training.Model.get_weights:3 of
msgid "A flat list of Numpy arrays."
msgstr ""
#: of tensorcircuit.applications.van.MADE.inbound_nodes:1
-#: tensorcircuit.applications.van.MADE.outbound_nodes:1
#: tensorcircuit.applications.van.MaskedConv2D.inbound_nodes:1
-#: tensorcircuit.applications.van.MaskedConv2D.outbound_nodes:1
#: tensorcircuit.applications.van.MaskedLinear.inbound_nodes:1
-#: tensorcircuit.applications.van.MaskedLinear.outbound_nodes:1
#: tensorcircuit.applications.van.NMF.inbound_nodes:1
-#: tensorcircuit.applications.van.NMF.outbound_nodes:1
#: tensorcircuit.applications.van.PixelCNN.inbound_nodes:1
-#: tensorcircuit.applications.van.PixelCNN.outbound_nodes:1
#: tensorcircuit.applications.van.ResidualBlock.inbound_nodes:1
-#: tensorcircuit.applications.van.ResidualBlock.outbound_nodes:1
#: tensorcircuit.applications.vqes.Linear.inbound_nodes:1
-#: tensorcircuit.applications.vqes.Linear.outbound_nodes:1
+#: tensorcircuit.keras.HardwareLayer.inbound_nodes:1
#: tensorcircuit.keras.QuantumLayer.inbound_nodes:1
-#: tensorcircuit.keras.QuantumLayer.outbound_nodes:1
-msgid "Deprecated, do NOT use! Only for compatibility with external Keras."
+msgid "Return Functional API nodes upstream of this layer."
msgstr ""
#: of tensorcircuit.applications.van.MADE.input:1
@@ -5063,6 +5873,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input:1
#: tensorcircuit.applications.van.ResidualBlock.input:1
#: tensorcircuit.applications.vqes.Linear.input:1
+#: tensorcircuit.keras.HardwareLayer.input:1
#: tensorcircuit.keras.QuantumLayer.input:1
msgid "Retrieves the input tensor(s) of a layer."
msgstr ""
@@ -5074,6 +5885,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input:3
#: tensorcircuit.applications.van.ResidualBlock.input:3
#: tensorcircuit.applications.vqes.Linear.input:3
+#: tensorcircuit.keras.HardwareLayer.input:3
#: tensorcircuit.keras.QuantumLayer.input:3
msgid ""
"Only applicable if the layer has exactly one input, i.e. if it is "
@@ -5087,6 +5899,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input:6
#: tensorcircuit.applications.van.ResidualBlock.input:6
#: tensorcircuit.applications.vqes.Linear.input:6
+#: tensorcircuit.keras.HardwareLayer.input:6
#: tensorcircuit.keras.QuantumLayer.input:6
msgid "Input tensor or list of input tensors."
msgstr ""
@@ -5098,6 +5911,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input:9
#: tensorcircuit.applications.van.ResidualBlock.input:9
#: tensorcircuit.applications.vqes.Linear.input:9
+#: tensorcircuit.keras.HardwareLayer.input:9
#: tensorcircuit.keras.QuantumLayer.input:9
msgid "If no inbound nodes are found."
msgstr ""
@@ -5109,6 +5923,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_mask:1
#: tensorcircuit.applications.van.ResidualBlock.input_mask:1
#: tensorcircuit.applications.vqes.Linear.input_mask:1
+#: tensorcircuit.keras.HardwareLayer.input_mask:1
#: tensorcircuit.keras.QuantumLayer.input_mask:1
msgid "Retrieves the input mask tensor(s) of a layer."
msgstr ""
@@ -5127,6 +5942,8 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.output_mask:3
#: tensorcircuit.applications.vqes.Linear.input_mask:3
#: tensorcircuit.applications.vqes.Linear.output_mask:3
+#: tensorcircuit.keras.HardwareLayer.input_mask:3
+#: tensorcircuit.keras.HardwareLayer.output_mask:3
#: tensorcircuit.keras.QuantumLayer.input_mask:3
#: tensorcircuit.keras.QuantumLayer.output_mask:3
msgid ""
@@ -5141,6 +5958,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_mask:6
#: tensorcircuit.applications.van.ResidualBlock.input_mask:6
#: tensorcircuit.applications.vqes.Linear.input_mask:6
+#: tensorcircuit.keras.HardwareLayer.input_mask:6
#: tensorcircuit.keras.QuantumLayer.input_mask:6
msgid "Input mask tensor (potentially None) or list of input mask tensors."
msgstr ""
@@ -5159,6 +5977,8 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.output_mask:9
#: tensorcircuit.applications.vqes.Linear.input_mask:9
#: tensorcircuit.applications.vqes.Linear.output_mask:9
+#: tensorcircuit.keras.HardwareLayer.input_mask:9
+#: tensorcircuit.keras.HardwareLayer.output_mask:9
#: tensorcircuit.keras.QuantumLayer.input_mask:9
#: tensorcircuit.keras.QuantumLayer.output_mask:9
msgid "if the layer is connected to"
@@ -5171,6 +5991,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_shape:1
#: tensorcircuit.applications.van.ResidualBlock.input_shape:1
#: tensorcircuit.applications.vqes.Linear.input_shape:1
+#: tensorcircuit.keras.HardwareLayer.input_shape:1
#: tensorcircuit.keras.QuantumLayer.input_shape:1
msgid "Retrieves the input shape(s) of a layer."
msgstr ""
@@ -5182,6 +6003,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_shape:3
#: tensorcircuit.applications.van.ResidualBlock.input_shape:3
#: tensorcircuit.applications.vqes.Linear.input_shape:3
+#: tensorcircuit.keras.HardwareLayer.input_shape:3
#: tensorcircuit.keras.QuantumLayer.input_shape:3
msgid ""
"Only applicable if the layer has exactly one input, i.e. if it is "
@@ -5195,6 +6017,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_shape:7
#: tensorcircuit.applications.van.ResidualBlock.input_shape:7
#: tensorcircuit.applications.vqes.Linear.input_shape:7
+#: tensorcircuit.keras.HardwareLayer.input_shape:7
#: tensorcircuit.keras.QuantumLayer.input_shape:7
msgid ""
"Input shape, as an integer shape tuple (or list of shape tuples, one "
@@ -5208,6 +6031,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_shape:10
#: tensorcircuit.applications.van.ResidualBlock.input_shape:10
#: tensorcircuit.applications.vqes.Linear.input_shape:10
+#: tensorcircuit.keras.HardwareLayer.input_shape:10
#: tensorcircuit.keras.QuantumLayer.input_shape:10
msgid "if the layer has no defined input_shape."
msgstr ""
@@ -5233,6 +6057,9 @@ msgstr ""
#: tensorcircuit.applications.vqes.Linear.input_shape:11
#: tensorcircuit.applications.vqes.Linear.output:9
#: tensorcircuit.applications.vqes.Linear.output_shape:10
+#: tensorcircuit.keras.HardwareLayer.input_shape:11
+#: tensorcircuit.keras.HardwareLayer.output:9
+#: tensorcircuit.keras.HardwareLayer.output_shape:10
#: tensorcircuit.keras.QuantumLayer.input_shape:11
#: tensorcircuit.keras.QuantumLayer.output:9
#: tensorcircuit.keras.QuantumLayer.output_shape:10
@@ -5246,6 +6073,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_spec:1
#: tensorcircuit.applications.van.ResidualBlock.input_spec:1
#: tensorcircuit.applications.vqes.Linear.input_spec:1
+#: tensorcircuit.keras.HardwareLayer.input_spec:1
#: tensorcircuit.keras.QuantumLayer.input_spec:1
msgid "`InputSpec` instance(s) describing the input format for this layer."
msgstr ""
@@ -5257,6 +6085,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_spec:3
#: tensorcircuit.applications.van.ResidualBlock.input_spec:3
#: tensorcircuit.applications.vqes.Linear.input_spec:3
+#: tensorcircuit.keras.HardwareLayer.input_spec:3
#: tensorcircuit.keras.QuantumLayer.input_spec:3
msgid ""
"When you create a layer subclass, you can set `self.input_spec` to enable"
@@ -5272,6 +6101,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_spec:8
#: tensorcircuit.applications.van.ResidualBlock.input_spec:8
#: tensorcircuit.applications.vqes.Linear.input_spec:8
+#: tensorcircuit.keras.HardwareLayer.input_spec:8
#: tensorcircuit.keras.QuantumLayer.input_spec:8
msgid "```python self.input_spec = tf.keras.layers.InputSpec(ndim=4) ```"
msgstr ""
@@ -5283,6 +6113,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_spec:12
#: tensorcircuit.applications.van.ResidualBlock.input_spec:12
#: tensorcircuit.applications.vqes.Linear.input_spec:12
+#: tensorcircuit.keras.HardwareLayer.input_spec:12
#: tensorcircuit.keras.QuantumLayer.input_spec:12
msgid ""
"Now, if you try to call the layer on an input that isn't rank 4 (for "
@@ -5297,6 +6128,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_spec:16
#: tensorcircuit.applications.van.ResidualBlock.input_spec:16
#: tensorcircuit.applications.vqes.Linear.input_spec:16
+#: tensorcircuit.keras.HardwareLayer.input_spec:16
#: tensorcircuit.keras.QuantumLayer.input_spec:16
msgid ""
"``` ValueError: Input 0 of layer conv2d is incompatible with the layer: "
@@ -5310,6 +6142,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_spec:21
#: tensorcircuit.applications.van.ResidualBlock.input_spec:21
#: tensorcircuit.applications.vqes.Linear.input_spec:21
+#: tensorcircuit.keras.HardwareLayer.input_spec:21
#: tensorcircuit.keras.QuantumLayer.input_spec:21
msgid ""
"Input checks that can be specified via `input_spec` include: - Structure "
@@ -5324,6 +6157,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_spec:27
#: tensorcircuit.applications.van.ResidualBlock.input_spec:27
#: tensorcircuit.applications.vqes.Linear.input_spec:27
+#: tensorcircuit.keras.HardwareLayer.input_spec:27
#: tensorcircuit.keras.QuantumLayer.input_spec:27
msgid "For more information, see `tf.keras.layers.InputSpec`."
msgstr ""
@@ -5335,96 +6169,130 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.input_spec:29
#: tensorcircuit.applications.van.ResidualBlock.input_spec:29
#: tensorcircuit.applications.vqes.Linear.input_spec:29
+#: tensorcircuit.keras.HardwareLayer.input_spec:29
#: tensorcircuit.keras.QuantumLayer.input_spec:29
msgid "A `tf.keras.layers.InputSpec` instance, or nested structure thereof."
msgstr ""
-#: keras.engine.training.Model.load_weights:1 of
-msgid "Loads all layer weights, either from a TensorFlow or an HDF5 weight file."
+#: of tensorcircuit.applications.van.MADE.jit_compile:1
+#: tensorcircuit.applications.van.NMF.jit_compile:1
+#: tensorcircuit.applications.van.PixelCNN.jit_compile:1
+msgid "Specify whether to compile the model with XLA."
msgstr ""
-#: keras.engine.training.Model.load_weights:3 of
+#: of tensorcircuit.applications.van.MADE.jit_compile:3
+#: tensorcircuit.applications.van.NMF.jit_compile:3
+#: tensorcircuit.applications.van.PixelCNN.jit_compile:3
msgid ""
-"If `by_name` is False weights are loaded based on the network's topology."
-" This means the architecture should be the same as when the weights were "
-"saved. Note that layers that don't have weights are not taken into "
-"account in the topological ordering, so adding or removing layers is fine"
-" as long as they don't have weights."
+"[XLA](https://www.tensorflow.org/xla) is an optimizing compiler for "
+"machine learning. `jit_compile` is not enabled by default. Note that "
+"`jit_compile=True` may not necessarily work for all models."
msgstr ""
-#: keras.engine.training.Model.load_weights:9 of
+#: of tensorcircuit.applications.van.MADE.jit_compile:7
+#: tensorcircuit.applications.van.NMF.jit_compile:7
+#: tensorcircuit.applications.van.PixelCNN.jit_compile:7
msgid ""
-"If `by_name` is True, weights are loaded into layers only if they share "
-"the same name. This is useful for fine-tuning or transfer-learning models"
-" where some of the layers have changed."
+"For more information on supported operations please refer to the [XLA "
+"documentation](https://www.tensorflow.org/xla). Also refer to [known XLA "
+"issues](https://www.tensorflow.org/xla/known_issues) for more details."
msgstr ""
-#: keras.engine.training.Model.load_weights:13 of
+#: keras.src.engine.base_layer.Layer.load_own_variables:1 of
+msgid "Loads the state of the layer."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.load_own_variables:3 of
msgid ""
-"Only topological loading (`by_name=False`) is supported when loading "
-"weights from the TensorFlow format. Note that topological loading differs"
-" slightly between TensorFlow and HDF5 formats for user-defined classes "
-"inheriting from `tf.keras.Model`: HDF5 loads based on a flattened list of"
-" weights, while the TensorFlow format loads based on the object-local "
-"names of attributes to which layers are assigned in the `Model`'s "
-"constructor."
+"You can override this method to take full control of how the state of the"
+" layer is loaded upon calling `keras.models.load_model()`."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.load_own_variables:6 of
+msgid "Dict from which the state of the model will be loaded."
msgstr ""
-#: keras.engine.training.Model.load_weights:20 of
+#: keras.src.engine.training.Model.load_weights:1 of
+msgid "Loads all layer weights from a saved files."
+msgstr ""
+
+#: keras.src.engine.training.Model.load_weights:3 of
msgid ""
-"String, path to the weights file to load. For weight files in TensorFlow "
-"format, this is the file prefix (the same as was passed to "
-"`save_weights`). This can also be a path to a SavedModel saved from "
-"`model.save`."
+"The saved file could be a SavedModel file, a `.keras` file (v3 saving "
+"format), or a file created via `model.save_weights()`."
msgstr ""
-#: keras.engine.training.Model.load_weights:24 of
+#: keras.src.engine.training.Model.load_weights:6 of
msgid ""
-"Boolean, whether to load weights by name or by topological order. Only "
-"topological loading is supported for weight files in TensorFlow format."
+"By default, weights are loaded based on the network's topology. This "
+"means the architecture should be the same as when the weights were saved."
+" Note that layers that don't have weights are not taken into account in "
+"the topological ordering, so adding or removing layers is fine as long as"
+" they don't have weights."
+msgstr ""
+
+#: keras.src.engine.training.Model.load_weights:12 of
+msgid "**Partial weight loading**"
msgstr ""
-#: keras.engine.training.Model.load_weights:27 of
+#: keras.src.engine.training.Model.load_weights:14 of
msgid ""
-"Boolean, whether to skip loading of layers where there is a mismatch in "
-"the number of weights, or a mismatch in the shape of the weight (only "
-"valid when `by_name=True`)."
+"If you have modified your model, for instance by adding a new layer (with"
+" weights) or by changing the shape of the weights of a layer, you can "
+"choose to ignore errors and continue loading by setting "
+"`skip_mismatch=True`. In this case any layer with mismatching weights "
+"will be skipped. A warning will be displayed for each skipped layer."
msgstr ""
-#: keras.engine.training.Model.load_weights:30 of
+#: keras.src.engine.training.Model.load_weights:21 of
+msgid "**Weight loading by name**"
+msgstr ""
+
+#: keras.src.engine.training.Model.load_weights:23 of
msgid ""
-"Optional `tf.train.CheckpointOptions` object that specifies options for "
-"loading weights."
+"If your weights are saved as a `.h5` file created via "
+"`model.save_weights()`, you can use the argument `by_name=True`."
+msgstr ""
+
+#: keras.src.engine.training.Model.load_weights:26 of
+msgid ""
+"In this case, weights are loaded into layers only if they share the same "
+"name. This is useful for fine-tuning or transfer-learning models where "
+"some of the layers have changed."
msgstr ""
-#: keras.engine.training.Model.load_weights:33 of
+#: keras.src.engine.training.Model.load_weights:30 of
msgid ""
-"When loading a weight file in TensorFlow format, returns the same status "
-"object as `tf.train.Checkpoint.restore`. When graph building, restore ops"
-" are run automatically as soon as the network is built (on first call for"
-" user-defined classes inheriting from `Model`, immediately if it is "
-"already built). When loading weights in HDF5 format, returns `None`."
+"Note that only topological loading (`by_name=False`) is supported when "
+"loading weights from the `.keras` v3 format or from the TensorFlow "
+"SavedModel format."
msgstr ""
-#: keras.engine.training.Model.load_weights:33 of
+#: keras.src.engine.training.Model.load_weights:34 of
msgid ""
-"When loading a weight file in TensorFlow format, returns the same status "
-"object as `tf.train.Checkpoint.restore`. When graph building, restore ops"
-" are run automatically as soon as the network is built (on first call for"
-" user-defined classes inheriting from `Model`, immediately if it is "
-"already built)."
+"String, path to the weights file to load. For weight files in TensorFlow "
+"format, this is the file prefix (the same as was passed to "
+"`save_weights()`). This can also be a path to a SavedModel or a `.keras` "
+"file (v3 saving format) saved via `model.save()`."
msgstr ""
-#: keras.engine.training.Model.load_weights:39 of
-msgid "When loading weights in HDF5 format, returns `None`."
+#: keras.src.engine.training.Model.load_weights:39 of
+msgid ""
+"Boolean, whether to skip loading of layers where there is a mismatch in "
+"the number of weights, or a mismatch in the shape of the weights."
msgstr ""
-#: keras.engine.training.Model.load_weights:41 of
-msgid "If `h5py` is not available and the weight file is in HDF5 format."
+#: keras.src.engine.training.Model.load_weights:42 of
+msgid ""
+"Boolean, whether to load weights by name or by topological order. Only "
+"topological loading is supported for weight files in the `.keras` v3 "
+"format or in the TensorFlow SavedModel format."
msgstr ""
-#: keras.engine.training.Model.load_weights:42 of
-msgid "If `skip_mismatch` is set to `True` when `by_name` is `False`."
+#: keras.src.engine.training.Model.load_weights:45 of
+msgid ""
+"Optional `tf.train.CheckpointOptions` object that specifies options for "
+"loading weights (only valid for a SavedModel file)."
msgstr ""
#: of tensorcircuit.applications.van.MADE.losses:1
@@ -5434,6 +6302,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.losses:1
#: tensorcircuit.applications.van.ResidualBlock.losses:1
#: tensorcircuit.applications.vqes.Linear.losses:1
+#: tensorcircuit.keras.HardwareLayer.losses:1
#: tensorcircuit.keras.QuantumLayer.losses:1
msgid "List of losses added using the `add_loss()` API."
msgstr ""
@@ -5445,6 +6314,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.losses:3
#: tensorcircuit.applications.van.ResidualBlock.losses:3
#: tensorcircuit.applications.vqes.Linear.losses:3
+#: tensorcircuit.keras.HardwareLayer.losses:3
#: tensorcircuit.keras.QuantumLayer.losses:3
msgid ""
"Variable regularization tensors are created when this property is "
@@ -5453,53 +6323,55 @@ msgid ""
"variables."
msgstr ""
-#: keras.engine.training.Model.reset_metrics:3 of
-#: tensorcircuit.applications.van.MADE.losses:7
+#: keras.src.engine.training.Model.reset_metrics:3 of
+#: tensorcircuit.applications.van.MADE.losses:8
#: tensorcircuit.applications.van.MADE.metrics:6
#: tensorcircuit.applications.van.MADE.metrics_names:6
-#: tensorcircuit.applications.van.MaskedConv2D.losses:7
-#: tensorcircuit.applications.van.MaskedLinear.losses:7
-#: tensorcircuit.applications.van.NMF.losses:7
+#: tensorcircuit.applications.van.MaskedConv2D.losses:8
+#: tensorcircuit.applications.van.MaskedLinear.losses:8
+#: tensorcircuit.applications.van.NMF.losses:8
#: tensorcircuit.applications.van.NMF.metrics:6
#: tensorcircuit.applications.van.NMF.metrics_names:6
-#: tensorcircuit.applications.van.PixelCNN.losses:7
+#: tensorcircuit.applications.van.PixelCNN.losses:8
#: tensorcircuit.applications.van.PixelCNN.metrics:6
#: tensorcircuit.applications.van.PixelCNN.metrics_names:6
-#: tensorcircuit.applications.van.ResidualBlock.losses:7
-#: tensorcircuit.applications.vqes.Linear.losses:7
-#: tensorcircuit.keras.QuantumLayer.losses:7
+#: tensorcircuit.applications.van.ResidualBlock.losses:8
+#: tensorcircuit.applications.vqes.Linear.losses:8
+#: tensorcircuit.keras.HardwareLayer.losses:8
+#: tensorcircuit.keras.QuantumLayer.losses:8
msgid "Examples:"
msgstr ""
-#: of tensorcircuit.applications.van.MADE.losses:39
-#: tensorcircuit.applications.van.MaskedConv2D.losses:39
-#: tensorcircuit.applications.van.MaskedLinear.losses:39
-#: tensorcircuit.applications.van.NMF.losses:39
-#: tensorcircuit.applications.van.PixelCNN.losses:39
-#: tensorcircuit.applications.van.ResidualBlock.losses:39
-#: tensorcircuit.applications.vqes.Linear.losses:39
-#: tensorcircuit.keras.QuantumLayer.losses:39
+#: of tensorcircuit.applications.van.MADE.losses:40
+#: tensorcircuit.applications.van.MaskedConv2D.losses:40
+#: tensorcircuit.applications.van.MaskedLinear.losses:40
+#: tensorcircuit.applications.van.NMF.losses:40
+#: tensorcircuit.applications.van.PixelCNN.losses:40
+#: tensorcircuit.applications.van.ResidualBlock.losses:40
+#: tensorcircuit.applications.vqes.Linear.losses:40
+#: tensorcircuit.keras.HardwareLayer.losses:40
+#: tensorcircuit.keras.QuantumLayer.losses:40
msgid "A list of tensors."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:1 of
+#: keras.src.engine.training.Model.make_predict_function:1 of
msgid "Creates a function that executes one step of inference."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:3 of
+#: keras.src.engine.training.Model.make_predict_function:3 of
msgid ""
"This method can be overridden to support custom inference logic. This "
"method is called by `Model.predict` and `Model.predict_on_batch`."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:6 of
+#: keras.src.engine.training.Model.make_predict_function:6 of
msgid ""
"Typically, this method directly controls `tf.function` and "
"`tf.distribute.Strategy` settings, and delegates the actual evaluation "
"logic to `Model.predict_step`."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:10 of
+#: keras.src.engine.training.Model.make_predict_function:10 of
msgid ""
"This function is cached the first time `Model.predict` or "
"`Model.predict_on_batch` is called. The cache is cleared whenever "
@@ -5507,36 +6379,36 @@ msgid ""
"function with `force=True`."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:15 of
+#: keras.src.engine.training.Model.make_predict_function:15 of
msgid ""
"Whether to regenerate the predict function and skip the cached function "
"if available."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:18 of
+#: keras.src.engine.training.Model.make_predict_function:18 of
msgid ""
"Function. The function created by this method should accept a "
"`tf.data.Iterator`, and return the outputs of the `Model`."
msgstr ""
-#: keras.engine.training.Model.make_test_function:1 of
+#: keras.src.engine.training.Model.make_test_function:1 of
msgid "Creates a function that executes one step of evaluation."
msgstr ""
-#: keras.engine.training.Model.make_test_function:3 of
+#: keras.src.engine.training.Model.make_test_function:3 of
msgid ""
"This method can be overridden to support custom evaluation logic. This "
"method is called by `Model.evaluate` and `Model.test_on_batch`."
msgstr ""
-#: keras.engine.training.Model.make_test_function:6 of
+#: keras.src.engine.training.Model.make_test_function:6 of
msgid ""
"Typically, this method directly controls `tf.function` and "
"`tf.distribute.Strategy` settings, and delegates the actual evaluation "
"logic to `Model.test_step`."
msgstr ""
-#: keras.engine.training.Model.make_test_function:10 of
+#: keras.src.engine.training.Model.make_test_function:10 of
msgid ""
"This function is cached the first time `Model.evaluate` or "
"`Model.test_on_batch` is called. The cache is cleared whenever "
@@ -5544,37 +6416,37 @@ msgid ""
"function with `force=True`."
msgstr ""
-#: keras.engine.training.Model.make_test_function:15 of
+#: keras.src.engine.training.Model.make_test_function:15 of
msgid ""
"Whether to regenerate the test function and skip the cached function if "
"available."
msgstr ""
-#: keras.engine.training.Model.make_test_function:18 of
+#: keras.src.engine.training.Model.make_test_function:18 of
msgid ""
"Function. The function created by this method should accept a "
"`tf.data.Iterator`, and return a `dict` containing values that will be "
"passed to `tf.keras.Callbacks.on_test_batch_end`."
msgstr ""
-#: keras.engine.training.Model.make_train_function:1 of
+#: keras.src.engine.training.Model.make_train_function:1 of
msgid "Creates a function that executes one step of training."
msgstr ""
-#: keras.engine.training.Model.make_train_function:3 of
+#: keras.src.engine.training.Model.make_train_function:3 of
msgid ""
"This method can be overridden to support custom training logic. This "
"method is called by `Model.fit` and `Model.train_on_batch`."
msgstr ""
-#: keras.engine.training.Model.make_train_function:6 of
+#: keras.src.engine.training.Model.make_train_function:6 of
msgid ""
"Typically, this method directly controls `tf.function` and "
"`tf.distribute.Strategy` settings, and delegates the actual training "
"logic to `Model.train_step`."
msgstr ""
-#: keras.engine.training.Model.make_train_function:10 of
+#: keras.src.engine.training.Model.make_train_function:10 of
msgid ""
"This function is cached the first time `Model.fit` or "
"`Model.train_on_batch` is called. The cache is cleared whenever "
@@ -5582,13 +6454,13 @@ msgid ""
"function with `force=True`."
msgstr ""
-#: keras.engine.training.Model.make_train_function:15 of
+#: keras.src.engine.training.Model.make_train_function:15 of
msgid ""
"Whether to regenerate the train function and skip the cached function if "
"available."
msgstr ""
-#: keras.engine.training.Model.make_train_function:18 of
+#: keras.src.engine.training.Model.make_train_function:18 of
msgid ""
"Function. The function created by this method should accept a "
"`tf.data.Iterator`, and return a `dict` containing values that will be "
@@ -5599,7 +6471,7 @@ msgstr ""
#: of tensorcircuit.applications.van.MADE.metrics:1
#: tensorcircuit.applications.van.NMF.metrics:1
#: tensorcircuit.applications.van.PixelCNN.metrics:1
-msgid "Returns the model's metrics added using `compile()`, `add_metric()` APIs."
+msgid "Return metrics added using `compile()` or `add_metric()`."
msgstr ""
#: of tensorcircuit.applications.van.MADE.metrics:3
@@ -5631,6 +6503,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.name:1
#: tensorcircuit.applications.van.ResidualBlock.name:1
#: tensorcircuit.applications.vqes.Linear.name:1
+#: tensorcircuit.keras.HardwareLayer.name:1
#: tensorcircuit.keras.QuantumLayer.name:1
msgid "Name of the layer (string), set in the constructor."
msgstr ""
@@ -5642,6 +6515,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.name_scope:1
#: tensorcircuit.applications.van.ResidualBlock.name_scope:1
#: tensorcircuit.applications.vqes.Linear.name_scope:1
+#: tensorcircuit.keras.HardwareLayer.name_scope:1
#: tensorcircuit.keras.QuantumLayer.name_scope:1
msgid "Returns a `tf.name_scope` instance for this class."
msgstr ""
@@ -5653,6 +6527,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.non_trainable_variables:1
#: tensorcircuit.applications.van.ResidualBlock.non_trainable_variables:1
#: tensorcircuit.applications.vqes.Linear.non_trainable_variables:1
+#: tensorcircuit.keras.HardwareLayer.non_trainable_variables:1
#: tensorcircuit.keras.QuantumLayer.non_trainable_variables:1
msgid ""
"Sequence of non-trainable variables owned by this module and its "
@@ -5673,6 +6548,8 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.trainable_variables:3
#: tensorcircuit.applications.vqes.Linear.non_trainable_variables:3
#: tensorcircuit.applications.vqes.Linear.trainable_variables:3
+#: tensorcircuit.keras.HardwareLayer.non_trainable_variables:3
+#: tensorcircuit.keras.HardwareLayer.trainable_variables:3
#: tensorcircuit.keras.QuantumLayer.non_trainable_variables:3
#: tensorcircuit.keras.QuantumLayer.trainable_variables:3
msgid ""
@@ -5696,6 +6573,8 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.trainable_variables:7
#: tensorcircuit.applications.vqes.Linear.non_trainable_variables:7
#: tensorcircuit.applications.vqes.Linear.trainable_variables:7
+#: tensorcircuit.keras.HardwareLayer.non_trainable_variables:7
+#: tensorcircuit.keras.HardwareLayer.trainable_variables:7
#: tensorcircuit.keras.QuantumLayer.non_trainable_variables:7
#: tensorcircuit.keras.QuantumLayer.trainable_variables:7
msgid ""
@@ -5710,6 +6589,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.non_trainable_weights:1
#: tensorcircuit.applications.van.ResidualBlock.non_trainable_weights:1
#: tensorcircuit.applications.vqes.Linear.non_trainable_weights:1
+#: tensorcircuit.keras.HardwareLayer.non_trainable_weights:1
#: tensorcircuit.keras.QuantumLayer.non_trainable_weights:1
msgid "List of all non-trainable weights tracked by this layer."
msgstr ""
@@ -5721,6 +6601,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.non_trainable_weights:3
#: tensorcircuit.applications.van.ResidualBlock.non_trainable_weights:3
#: tensorcircuit.applications.vqes.Linear.non_trainable_weights:3
+#: tensorcircuit.keras.HardwareLayer.non_trainable_weights:3
#: tensorcircuit.keras.QuantumLayer.non_trainable_weights:3
msgid ""
"Non-trainable weights are *not* updated during training. They are "
@@ -5734,10 +6615,23 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.non_trainable_weights:6
#: tensorcircuit.applications.van.ResidualBlock.non_trainable_weights:6
#: tensorcircuit.applications.vqes.Linear.non_trainable_weights:6
+#: tensorcircuit.keras.HardwareLayer.non_trainable_weights:6
#: tensorcircuit.keras.QuantumLayer.non_trainable_weights:6
msgid "A list of non-trainable variables."
msgstr ""
+#: of tensorcircuit.applications.van.MADE.outbound_nodes:1
+#: tensorcircuit.applications.van.MaskedConv2D.outbound_nodes:1
+#: tensorcircuit.applications.van.MaskedLinear.outbound_nodes:1
+#: tensorcircuit.applications.van.NMF.outbound_nodes:1
+#: tensorcircuit.applications.van.PixelCNN.outbound_nodes:1
+#: tensorcircuit.applications.van.ResidualBlock.outbound_nodes:1
+#: tensorcircuit.applications.vqes.Linear.outbound_nodes:1
+#: tensorcircuit.keras.HardwareLayer.outbound_nodes:1
+#: tensorcircuit.keras.QuantumLayer.outbound_nodes:1
+msgid "Return Functional API nodes downstream of this layer."
+msgstr ""
+
#: of tensorcircuit.applications.van.MADE.output:1
#: tensorcircuit.applications.van.MaskedConv2D.output:1
#: tensorcircuit.applications.van.MaskedLinear.output:1
@@ -5745,6 +6639,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output:1
#: tensorcircuit.applications.van.ResidualBlock.output:1
#: tensorcircuit.applications.vqes.Linear.output:1
+#: tensorcircuit.keras.HardwareLayer.output:1
#: tensorcircuit.keras.QuantumLayer.output:1
msgid "Retrieves the output tensor(s) of a layer."
msgstr ""
@@ -5756,6 +6651,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output:3
#: tensorcircuit.applications.van.ResidualBlock.output:3
#: tensorcircuit.applications.vqes.Linear.output:3
+#: tensorcircuit.keras.HardwareLayer.output:3
#: tensorcircuit.keras.QuantumLayer.output:3
msgid ""
"Only applicable if the layer has exactly one output, i.e. if it is "
@@ -5769,6 +6665,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output:6
#: tensorcircuit.applications.van.ResidualBlock.output:6
#: tensorcircuit.applications.vqes.Linear.output:6
+#: tensorcircuit.keras.HardwareLayer.output:6
#: tensorcircuit.keras.QuantumLayer.output:6
msgid "Output tensor or list of output tensors."
msgstr ""
@@ -5780,6 +6677,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output:8
#: tensorcircuit.applications.van.ResidualBlock.output:8
#: tensorcircuit.applications.vqes.Linear.output:8
+#: tensorcircuit.keras.HardwareLayer.output:8
#: tensorcircuit.keras.QuantumLayer.output:8
msgid "if the layer is connected to more than one incoming layers."
msgstr ""
@@ -5791,6 +6689,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output_mask:1
#: tensorcircuit.applications.van.ResidualBlock.output_mask:1
#: tensorcircuit.applications.vqes.Linear.output_mask:1
+#: tensorcircuit.keras.HardwareLayer.output_mask:1
#: tensorcircuit.keras.QuantumLayer.output_mask:1
msgid "Retrieves the output mask tensor(s) of a layer."
msgstr ""
@@ -5802,6 +6701,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output_mask:6
#: tensorcircuit.applications.van.ResidualBlock.output_mask:6
#: tensorcircuit.applications.vqes.Linear.output_mask:6
+#: tensorcircuit.keras.HardwareLayer.output_mask:6
#: tensorcircuit.keras.QuantumLayer.output_mask:6
msgid "Output mask tensor (potentially None) or list of output mask tensors."
msgstr ""
@@ -5813,6 +6713,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output_shape:1
#: tensorcircuit.applications.van.ResidualBlock.output_shape:1
#: tensorcircuit.applications.vqes.Linear.output_shape:1
+#: tensorcircuit.keras.HardwareLayer.output_shape:1
#: tensorcircuit.keras.QuantumLayer.output_shape:1
msgid "Retrieves the output shape(s) of a layer."
msgstr ""
@@ -5824,6 +6725,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output_shape:3
#: tensorcircuit.applications.van.ResidualBlock.output_shape:3
#: tensorcircuit.applications.vqes.Linear.output_shape:3
+#: tensorcircuit.keras.HardwareLayer.output_shape:3
#: tensorcircuit.keras.QuantumLayer.output_shape:3
msgid ""
"Only applicable if the layer has one output, or if all outputs have the "
@@ -5837,6 +6739,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output_shape:6
#: tensorcircuit.applications.van.ResidualBlock.output_shape:6
#: tensorcircuit.applications.vqes.Linear.output_shape:6
+#: tensorcircuit.keras.HardwareLayer.output_shape:6
#: tensorcircuit.keras.QuantumLayer.output_shape:6
msgid ""
"Output shape, as an integer shape tuple (or list of shape tuples, one "
@@ -5850,26 +6753,50 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.output_shape:9
#: tensorcircuit.applications.van.ResidualBlock.output_shape:9
#: tensorcircuit.applications.vqes.Linear.output_shape:9
+#: tensorcircuit.keras.HardwareLayer.output_shape:9
#: tensorcircuit.keras.QuantumLayer.output_shape:9
msgid "if the layer has no defined output shape."
msgstr ""
-#: keras.engine.training.Model.predict:1 of
+#: keras.src.engine.training.Model.predict:1 of
msgid "Generates output predictions for the input samples."
msgstr ""
-#: keras.engine.training.Model.predict:3 of
+#: keras.src.engine.training.Model.predict:3 of
+msgid ""
+"Computation is done in batches. This method is designed for batch "
+"processing of large numbers of inputs. It is not intended for use inside "
+"of loops that iterate over your data and process small numbers of inputs "
+"at a time."
+msgstr ""
+
+#: keras.src.engine.training.Model.predict:8 of
+msgid ""
+"For small numbers of inputs that fit in one batch, directly use "
+"`__call__()` for faster execution, e.g., `model(x)`, or `model(x, "
+"training=False)` if you have layers such as "
+"`tf.keras.layers.BatchNormalization` that behave differently during "
+"inference. You may pair the individual model call with a `tf.function` "
+"for additional performance inside your inner loop. If you need access to "
+"numpy array values instead of tensors after your model call, you can use "
+"`tensor.numpy()` to get the numpy array value of an eager tensor."
+msgstr ""
+
+#: keras.src.engine.training.Model.predict:18 of
+msgid ""
+"Also, note the fact that test loss is not affected by regularization "
+"layers like noise and dropout."
+msgstr ""
+
+#: keras.src.engine.training.Model.predict:21 of
msgid ""
-"Computation is done in batches. This method is designed for performance "
-"in large scale inputs. For small amount of inputs that fit in one batch, "
-"directly using `__call__()` is recommended for faster execution, e.g., "
-"`model(x)`, or `model(x, training=False)` if you have layers such as "
-"`tf.keras.layers.BatchNormalization` that behaves differently during "
-"inference. Also, note the fact that test loss is not affected by "
-"regularization layers like noise and dropout."
+"Note: See [this FAQ entry]( https://keras.io/getting_started/faq/#whats-"
+"the-difference-between-model-methods-predict-and-call) for more details "
+"about the difference between `Model` methods `predict()` and "
+"`__call__()`."
msgstr ""
-#: keras.engine.training.Model.predict:11 of
+#: keras.src.engine.training.Model.predict:26 of
msgid ""
"Input samples. It could be: - A Numpy array (or array-like), or a list of"
" arrays (in case the model has multiple inputs). - A TensorFlow tensor,"
@@ -5880,21 +6807,21 @@ msgid ""
"iterator-like inputs` section of `Model.fit`."
msgstr ""
-#: keras.engine.training.Model.predict:11 of
+#: keras.src.engine.training.Model.predict:26 of
msgid ""
"Input samples. It could be: - A Numpy array (or array-like), or a list of"
" arrays"
msgstr ""
-#: keras.engine.training.Model.predict:16 of
+#: keras.src.engine.training.Model.predict:31 of
msgid "A `tf.data` dataset."
msgstr ""
-#: keras.engine.training.Model.predict:17 of
+#: keras.src.engine.training.Model.predict:32 of
msgid "A generator or `keras.utils.Sequence` instance."
msgstr ""
-#: keras.engine.training.Model.predict:21 of
+#: keras.src.engine.training.Model.predict:36 of
msgid ""
"Integer or `None`. Number of samples per batch. If unspecified, "
"`batch_size` will default to 32. Do not specify the `batch_size` if your "
@@ -5902,11 +6829,7 @@ msgid ""
"instances (since they generate batches)."
msgstr ""
-#: keras.engine.training.Model.predict:27 of
-msgid "Verbosity mode, 0 or 1."
-msgstr ""
-
-#: keras.engine.training.Model.predict:28 of
+#: keras.src.engine.training.Model.predict:49 of
msgid ""
"Total number of steps (batches of samples) before declaring the "
"prediction round finished. Ignored with the default value of `None`. If x"
@@ -5914,13 +6837,14 @@ msgid ""
"the input dataset is exhausted."
msgstr ""
-#: keras.engine.training.Model.predict:33 of
+#: keras.src.engine.training.Model.predict:54 of
msgid ""
"List of `keras.callbacks.Callback` instances. List of callbacks to apply "
-"during prediction. See [callbacks](/api_docs/python/tf/keras/callbacks)."
+"during prediction. See [callbacks]( "
+"https://www.tensorflow.org/api_docs/python/tf/keras/callbacks)."
msgstr ""
-#: keras.engine.training.Model.predict:50 of
+#: keras.src.engine.training.Model.predict:74 of
msgid ""
"See the discussion of `Unpacking behavior for iterator-like inputs` for "
"`Model.fit`. Note that Model.predict uses the same interpretation rules "
@@ -5928,105 +6852,105 @@ msgid ""
"all three methods."
msgstr ""
-#: keras.engine.training.Model.predict:55
-#: keras.engine.training.Model.predict_on_batch:9 of
+#: keras.src.engine.training.Model.predict:79
+#: keras.src.engine.training.Model.predict_on_batch:9 of
msgid "Numpy array(s) of predictions."
msgstr ""
-#: keras.engine.training.Model.predict:57 of
+#: keras.src.engine.training.Model.predict:81 of
msgid "If `model.predict` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.predict:58 of
+#: keras.src.engine.training.Model.predict:82 of
msgid ""
"In case of mismatch between the provided input data and the model's "
"expectations, or in case a stateful model receives a number of "
"samples that is not a multiple of the batch size."
msgstr ""
-#: keras.engine.training.Model.predict_generator:1 of
+#: keras.src.engine.training.Model.predict_generator:1 of
msgid "Generates predictions for the input samples from a data generator."
msgstr ""
-#: keras.engine.training.Model.predict_generator:4 of
+#: keras.src.engine.training.Model.predict_generator:4 of
msgid ""
"`Model.predict` now supports generators, so there is no longer any need "
"to use this endpoint."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:1 of
+#: keras.src.engine.training.Model.predict_on_batch:1 of
msgid "Returns predictions for a single batch of samples."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:3 of
+#: keras.src.engine.training.Model.predict_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow "
"tensor, or a list of tensors (in case the model has multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:3
-#: keras.engine.training.Model.test_on_batch:3 of
+#: keras.src.engine.training.Model.predict_on_batch:3
+#: keras.src.engine.training.Model.test_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the"
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:5
-#: keras.engine.training.Model.test_on_batch:5 of
+#: keras.src.engine.training.Model.predict_on_batch:5
+#: keras.src.engine.training.Model.test_on_batch:5 of
msgid "model has multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:7
-#: keras.engine.training.Model.test_on_batch:6 of
+#: keras.src.engine.training.Model.predict_on_batch:7
+#: keras.src.engine.training.Model.test_on_batch:6 of
msgid "A TensorFlow tensor, or a list of tensors (in case the model has"
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:7
-#: keras.engine.training.Model.test_on_batch:7 of
+#: keras.src.engine.training.Model.predict_on_batch:7
+#: keras.src.engine.training.Model.test_on_batch:7 of
msgid "multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:11 of
-msgid "If `model.predict_on_batch` is wrapped in a `tf.function`."
+#: keras.src.engine.training.Model.predict_on_batch:11 of
+msgid "If `model.predict_on_batch` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.predict_step:1 of
+#: keras.src.engine.training.Model.predict_step:1 of
msgid "The logic for one inference step."
msgstr ""
-#: keras.engine.training.Model.predict_step:3 of
+#: keras.src.engine.training.Model.predict_step:3 of
msgid ""
"This method can be overridden to support custom inference logic. This "
"method is called by `Model.make_predict_function`."
msgstr ""
-#: keras.engine.training.Model.predict_step:6 of
+#: keras.src.engine.training.Model.predict_step:6 of
msgid ""
"This method should contain the mathematical logic for one step of "
-"inference. This typically includes the forward pass."
+"inference. This typically includes the forward pass."
msgstr ""
-#: keras.engine.training.Model.predict_step:9 of
+#: keras.src.engine.training.Model.predict_step:9 of
msgid ""
"Configuration details for *how* this logic is run (e.g. `tf.function` and"
" `tf.distribute.Strategy` settings), should be left to "
"`Model.make_predict_function`, which can also be overridden."
msgstr ""
-#: keras.engine.training.Model.predict_step:13
-#: keras.engine.training.Model.test_step:15
-#: keras.engine.training.Model.train_step:16 of
+#: keras.src.engine.training.Model.predict_step:13
+#: keras.src.engine.training.Model.test_step:15
+#: keras.src.engine.training.Model.train_step:17 of
msgid "A nested structure of `Tensor`s."
msgstr ""
-#: keras.engine.training.Model.predict_step:15 of
+#: keras.src.engine.training.Model.predict_step:15 of
msgid ""
"The result of one inference step, typically the output of calling the "
"`Model` on data."
msgstr ""
-#: keras.engine.training.Model.reset_metrics:1 of
+#: keras.src.engine.training.Model.reset_metrics:1 of
msgid "Resets the state of all the metrics in the model."
msgstr ""
@@ -6059,116 +6983,170 @@ msgstr ""
msgid "Boolean, whether the model should run eagerly."
msgstr ""
-#: keras.engine.training.Model.save:1 of
-msgid "Saves the model to Tensorflow SavedModel or a single HDF5 file."
+#: keras.src.engine.training.Model.save:1 of
+msgid "Saves a model as a TensorFlow SavedModel or HDF5 file."
msgstr ""
-#: keras.engine.training.Model.save:3 of
-msgid ""
-"Please see `tf.keras.models.save_model` or the [Serialization and Saving "
-"guide](https://keras.io/guides/serialization_and_saving/) for details."
+#: keras.src.engine.training.Model.save:4 of
+msgid "See the [Serialization and Saving guide]("
msgstr ""
-#: keras.engine.training.Model.save:7 of
-msgid "String, PathLike, path to SavedModel or H5 file to save the model."
+#: keras.src.engine.training.Model.save:4 of
+msgid "https://keras.io/guides/serialization_and_saving/) for details."
msgstr ""
-#: keras.engine.training.Model.save:9
-#: keras.engine.training.Model.save_weights:47 of
-msgid ""
-"Whether to silently overwrite any existing file at the target location, "
-"or provide the user with a manual prompt."
+#: keras.src.engine.training.Model.save:6 of
+msgid "Keras model instance to be saved."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:7 of
+msgid "`str` or `pathlib.Path` object. Path where to save the model."
msgstr ""
-#: keras.engine.training.Model.save:11 of
-msgid "If True, save optimizer's state together."
+#: keras.src.engine.training.Model.save:9 of
+msgid ""
+"Whether we should overwrite any existing model at the target location, or"
+" instead ask the user via an interactive prompt."
msgstr ""
-#: keras.engine.training.Model.save:12 of
+#: keras.src.engine.training.Model.save:12 of
msgid ""
-"Either `'tf'` or `'h5'`, indicating whether to save the model to "
-"Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5' in TF"
-" 1.X."
+"Either `\"keras\"`, `\"tf\"`, `\"h5\"`, indicating whether to save the "
+"model in the native Keras format (`.keras`), in the TensorFlow SavedModel"
+" format (referred to as \"SavedModel\" below), or in the legacy HDF5 "
+"format (`.h5`). Defaults to `\"tf\"` in TF 2.X, and `\"h5\"` in TF 1.X."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:36 of
+msgid "SavedModel format arguments:"
+msgstr ""
+
+#: keras.src.engine.training.Model.save:22 of
+msgid "include_optimizer: Only applied to SavedModel and legacy HDF5"
+msgstr ""
+
+#: keras.src.engine.training.Model.save:22 of
+msgid "formats. If False, do not save the optimizer state. Defaults to `True`."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:25 of
+msgid "signatures: Only applies to SavedModel format. Signatures to save"
msgstr ""
-#: keras.engine.training.Model.save:15 of
+#: keras.src.engine.training.Model.save:25 of
msgid ""
-"Signatures to save with the SavedModel. Applicable to the 'tf' format "
-"only. Please see the `signatures` argument in `tf.saved_model.save` for "
-"details."
+"with the SavedModel. See the `signatures` argument in "
+"`tf.saved_model.save` for details."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:28 of
+msgid "options: Only applies to SavedModel format."
msgstr ""
-#: keras.engine.training.Model.save:18 of
+#: keras.src.engine.training.Model.save:28 of
msgid ""
-"(only applies to SavedModel format) `tf.saved_model.SaveOptions` object "
-"that specifies options for saving to SavedModel."
+"`tf.saved_model.SaveOptions` object that specifies SavedModel saving "
+"options."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:36 of
+msgid "save_traces: Only applies to SavedModel format. When enabled, the"
msgstr ""
-#: keras.engine.training.Model.save:21 of
+#: keras.src.engine.training.Model.save:31 of
msgid ""
-"(only applies to SavedModel format) When enabled, the SavedModel will "
-"store the function traces for each layer. This can be disabled, so that "
-"only the configs of each layer are stored. Defaults to `True`. Disabling "
-"this will decrease serialization time and reduce file size, but it "
-"requires that all custom layers/models implement a `get_config()` method."
+"SavedModel will store the function traces for each layer. This can be "
+"disabled, so that only the configs of each layer are stored. Defaults to "
+"`True`. Disabling this will decrease serialization time and reduce file "
+"size, but it requires that all custom layers/models implement a "
+"`get_config()` method."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:40 of
+msgid "```python model = tf.keras.Sequential(["
msgstr ""
-#: keras.engine.training.Model.save:30 of
-msgid "```python from keras.models import load_model"
+#: keras.src.engine.training.Model.save:42 of
+msgid "tf.keras.layers.Dense(5, input_shape=(3,)), tf.keras.layers.Softmax()])"
msgstr ""
-#: keras.engine.training.Model.save:33 of
+#: keras.src.engine.training.Model.save:44 of
msgid ""
-"model.save('my_model.h5') # creates a HDF5 file 'my_model.h5' del model"
-" # deletes the existing model"
+"model.save(\"model.keras\") loaded_model = "
+"tf.keras.models.load_model(\"model.keras\") x = tf.random.uniform((10, "
+"3)) assert np.allclose(model.predict(x), loaded_model.predict(x)) ```"
+msgstr ""
+
+#: keras.src.engine.training.Model.save:50 of
+msgid "Note that `model.save()` is an alias for `tf.keras.models.save_model()`."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.save_own_variables:1 of
+msgid "Saves the state of the layer."
msgstr ""
-#: keras.engine.training.Model.save:36 of
+#: keras.src.engine.base_layer.Layer.save_own_variables:3 of
msgid ""
-"# returns a compiled model # identical to the previous one model = "
-"load_model('my_model.h5') ```"
+"You can override this method to take full control of how the state of the"
+" layer is saved upon calling `model.save()`."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.save_own_variables:6 of
+msgid "Dict where the state of the model will be saved."
msgstr ""
-#: keras.engine.training.Model.save_spec:1 of
-msgid "Returns the `tf.TensorSpec` of call inputs as a tuple `(args, kwargs)`."
+#: keras.src.engine.training.Model.save_spec:1 of
+msgid "Returns the `tf.TensorSpec` of call args as a tuple `(args, kwargs)`."
msgstr ""
-#: keras.engine.training.Model.save_spec:3 of
+#: keras.src.engine.training.Model.save_spec:3 of
msgid ""
"This value is automatically defined after calling the model for the first"
" time. Afterwards, you can use it when exporting the model for serving:"
msgstr ""
-#: keras.engine.training.Model.save_spec:6 of
+#: keras.src.engine.training.Model.save_spec:7 of
msgid "```python model = tf.keras.Model(...)"
msgstr ""
-#: keras.engine.training.Model.save_spec:9 of
+#: keras.src.engine.training.Model.save_spec:10 of
msgid "@tf.function def serve(*args, **kwargs):"
msgstr ""
-#: keras.engine.training.Model.save_spec:11 of
+#: keras.src.engine.training.Model.save_spec:12 of
msgid ""
"outputs = model(*args, **kwargs) # Apply postprocessing steps, or add "
"additional outputs. ... return outputs"
msgstr ""
-#: keras.engine.training.Model.save_spec:16 of
+#: keras.src.engine.training.Model.save_spec:17 of
msgid ""
-"# arg_specs is `[tf.TensorSpec(...), ...]`. kwarg_specs, in this example,"
-" is # an empty dict since functional models do not use keyword arguments."
-" arg_specs, kwarg_specs = model.save_spec()"
+"# arg_specs is `[tf.TensorSpec(...), ...]`. kwarg_specs, in this # "
+"example, is an empty dict since functional models do not use keyword # "
+"arguments. arg_specs, kwarg_specs = model.save_spec()"
msgstr ""
-#: keras.engine.training.Model.save_spec:20 of
+#: keras.src.engine.training.Model.save_spec:23 of
msgid "model.save(path, signatures={"
msgstr ""
-#: keras.engine.training.Model.save_spec:21 of
-msgid "'serving_default': serve.get_concrete_function(*arg_specs, **kwarg_specs)"
+#: keras.src.engine.training.Model.save_spec:23 of
+msgid "'serving_default': serve.get_concrete_function(*arg_specs,"
+msgstr ""
+
+#: keras.src.engine.training.Model.save_spec:24 of
+msgid "**kwarg_specs)"
+msgstr ""
+
+#: keras.src.engine.training.Model.save_spec:26 of
+msgid "})"
+msgstr ""
+
+#: keras.src.engine.training.Model.save_spec of
+msgid "param dynamic_batch"
msgstr ""
-#: keras.engine.training.Model.save_spec:25 of
+#: keras.src.engine.training.Model.save_spec:28 of
msgid ""
"Whether to set the batch sizes of all the returned `tf.TensorSpec` to "
"`None`. (Note that when defining functional or Sequential models with "
@@ -6176,7 +7154,11 @@ msgid ""
"preserved). Defaults to `True`."
msgstr ""
-#: keras.engine.training.Model.save_spec:30 of
+#: keras.src.engine.training.Model.save_spec of
+msgid "returns"
+msgstr ""
+
+#: keras.src.engine.training.Model.save_spec:33 of
msgid ""
"If the model inputs are defined, returns a tuple `(args, kwargs)`. All "
"elements in `args` and `kwargs` are `tf.TensorSpec`. If the model inputs "
@@ -6184,49 +7166,49 @@ msgid ""
"when calling the model, `model.fit`, `model.evaluate` or `model.predict`."
msgstr ""
-#: keras.engine.training.Model.save_weights:1 of
+#: keras.src.engine.training.Model.save_weights:1 of
msgid "Saves all layer weights."
msgstr ""
-#: keras.engine.training.Model.save_weights:3 of
+#: keras.src.engine.training.Model.save_weights:3 of
msgid ""
"Either saves in HDF5 or in TensorFlow format based on the `save_format` "
"argument."
msgstr ""
-#: keras.engine.training.Model.save_weights:14 of
+#: keras.src.engine.training.Model.save_weights:14 of
msgid "When saving in HDF5 format, the weight file has:"
msgstr ""
-#: keras.engine.training.Model.save_weights:7 of
+#: keras.src.engine.training.Model.save_weights:7 of
msgid "`layer_names` (attribute), a list of strings"
msgstr ""
-#: keras.engine.training.Model.save_weights:8 of
+#: keras.src.engine.training.Model.save_weights:8 of
msgid "(ordered names of model layers)."
msgstr ""
-#: keras.engine.training.Model.save_weights:14 of
+#: keras.src.engine.training.Model.save_weights:14 of
msgid "For every layer, a `group` named `layer.name`"
msgstr ""
-#: keras.engine.training.Model.save_weights:11 of
+#: keras.src.engine.training.Model.save_weights:11 of
msgid "For every such layer group, a group attribute `weight_names`,"
msgstr ""
-#: keras.engine.training.Model.save_weights:11 of
+#: keras.src.engine.training.Model.save_weights:11 of
msgid "a list of strings (ordered names of weights tensor of the layer)."
msgstr ""
-#: keras.engine.training.Model.save_weights:14 of
+#: keras.src.engine.training.Model.save_weights:14 of
msgid "For every weight in the layer, a dataset"
msgstr ""
-#: keras.engine.training.Model.save_weights:14 of
+#: keras.src.engine.training.Model.save_weights:14 of
msgid "storing the weight value, named after the weight tensor."
msgstr ""
-#: keras.engine.training.Model.save_weights:16 of
+#: keras.src.engine.training.Model.save_weights:16 of
msgid ""
"When saving in TensorFlow format, all objects referenced by the network "
"are saved in the same format as `tf.train.Checkpoint`, including any "
@@ -6239,7 +7221,7 @@ msgid ""
"`tf.train.Checkpoint` and `tf.keras.Model` for details."
msgstr ""
-#: keras.engine.training.Model.save_weights:26 of
+#: keras.src.engine.training.Model.save_weights:26 of
msgid ""
"While the formats are the same, do not mix `save_weights` and "
"`tf.train.Checkpoint`. Checkpoints saved by `Model.save_weights` should "
@@ -6249,7 +7231,7 @@ msgid ""
"`save_weights` for training checkpoints."
msgstr ""
-#: keras.engine.training.Model.save_weights:33 of
+#: keras.src.engine.training.Model.save_weights:33 of
msgid ""
"The TensorFlow format matches objects and variables by starting at a root"
" object, `self` for `save_weights`, and greedily matching attribute "
@@ -6258,11 +7240,11 @@ msgid ""
"This means saving a `tf.keras.Model` using `save_weights` and loading "
"into a `tf.train.Checkpoint` with a `Model` attached (or vice versa) will"
" not match the `Model`'s variables. See the [guide to training "
-"checkpoints](https://www.tensorflow.org/guide/checkpoint) for details on "
-"the TensorFlow format."
+"checkpoints]( https://www.tensorflow.org/guide/checkpoint) for details on"
+" the TensorFlow format."
msgstr ""
-#: keras.engine.training.Model.save_weights:43 of
+#: keras.src.engine.training.Model.save_weights:44 of
msgid ""
"String or PathLike, path to the file to save the weights to. When saving "
"in TensorFlow format, this is the prefix used for checkpoint files "
@@ -6270,28 +7252,34 @@ msgid ""
" to be saved in HDF5 format."
msgstr ""
-#: keras.engine.training.Model.save_weights:49 of
+#: keras.src.engine.training.Model.save_weights:48 of
+msgid ""
+"Whether to silently overwrite any existing file at the target location, "
+"or provide the user with a manual prompt."
+msgstr ""
+
+#: keras.src.engine.training.Model.save_weights:50 of
msgid ""
"Either 'tf' or 'h5'. A `filepath` ending in '.h5' or '.keras' will "
-"default to HDF5 if `save_format` is `None`. Otherwise `None` defaults to "
-"'tf'."
+"default to HDF5 if `save_format` is `None`. Otherwise, `None` becomes "
+"'tf'. Defaults to `None`."
msgstr ""
-#: keras.engine.training.Model.save_weights:52 of
+#: keras.src.engine.training.Model.save_weights:53 of
msgid ""
"Optional `tf.train.CheckpointOptions` object that specifies options for "
"saving weights."
msgstr ""
-#: keras.engine.training.Model.save_weights:55 of
-msgid "If `h5py` is not available when attempting to save in HDF5 format."
+#: keras.src.engine.training.Model.save_weights:56 of
+msgid "If `h5py` is not available when attempting to save in HDF5 format."
msgstr ""
-#: keras.engine.base_layer.Layer.set_weights:1 of
+#: keras.src.engine.base_layer.Layer.set_weights:1 of
msgid "Sets the weights of the layer, from NumPy arrays."
msgstr ""
-#: keras.engine.base_layer.Layer.set_weights:3 of
+#: keras.src.engine.base_layer.Layer.set_weights:3 of
msgid ""
"The weights of a layer represent the state of the layer. This function "
"sets the weight values from numpy arrays. The weight values should be "
@@ -6300,27 +7288,33 @@ msgid ""
" layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_weights:8
-#: keras.engine.base_layer.Layer.set_weights:9 of
+#: keras.src.engine.base_layer.Layer.get_weights:8
+#: keras.src.engine.base_layer.Layer.set_weights:9 of
msgid ""
"For example, a `Dense` layer returns a list of two values: the kernel "
"matrix and the bias vector. These can be used to set the weights of "
"another `Dense` layer:"
msgstr ""
-#: keras.engine.base_layer.Layer.set_weights:33 of
+#: keras.src.engine.base_layer.Layer.set_weights:33 of
msgid ""
"a list of NumPy arrays. The number of arrays and their shape must match "
"number of the dimensions of the weights of the layer (i.e. it should "
"match the output of `get_weights`)."
msgstr ""
-#: keras.engine.base_layer.Layer.set_weights:39 of
+#: keras.src.engine.base_layer.Layer.set_weights:39 of
msgid ""
"If the provided weights list does not match the layer's "
"specifications."
msgstr ""
+#: of tensorcircuit.applications.van.MADE.state_updates:1
+#: tensorcircuit.applications.van.NMF.state_updates:1
+#: tensorcircuit.applications.van.PixelCNN.state_updates:1
+msgid "Deprecated, do NOT use!"
+msgstr ""
+
#: of tensorcircuit.applications.van.MADE.state_updates:3
#: tensorcircuit.applications.van.NMF.state_updates:3
#: tensorcircuit.applications.van.PixelCNN.state_updates:3
@@ -6348,6 +7342,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.submodules:1
#: tensorcircuit.applications.van.ResidualBlock.submodules:1
#: tensorcircuit.applications.vqes.Linear.submodules:1
+#: tensorcircuit.keras.HardwareLayer.submodules:1
#: tensorcircuit.keras.QuantumLayer.submodules:1
msgid "Sequence of all sub-modules."
msgstr ""
@@ -6359,6 +7354,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.submodules:3
#: tensorcircuit.applications.van.ResidualBlock.submodules:3
#: tensorcircuit.applications.vqes.Linear.submodules:3
+#: tensorcircuit.keras.HardwareLayer.submodules:3
#: tensorcircuit.keras.QuantumLayer.submodules:3
msgid ""
"Submodules are modules which are properties of this module, or found as "
@@ -6372,38 +7368,55 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.submodules:18
#: tensorcircuit.applications.van.ResidualBlock.submodules:18
#: tensorcircuit.applications.vqes.Linear.submodules:18
+#: tensorcircuit.keras.HardwareLayer.submodules:18
#: tensorcircuit.keras.QuantumLayer.submodules:18
msgid "A sequence of all submodules."
msgstr ""
-#: keras.engine.training.Model.summary:1 of
+#: keras.src.engine.training.Model.summary:1 of
msgid "Prints a string summary of the network."
msgstr ""
-#: keras.engine.training.Model.summary:3 of
+#: keras.src.engine.training.Model.summary:3 of
msgid ""
"Total length of printed lines (e.g. set this to adapt the display to "
"different terminal window sizes)."
msgstr ""
-#: keras.engine.training.Model.summary:6 of
+#: keras.src.engine.training.Model.summary:6 of
msgid ""
"Relative or absolute positions of log elements in each line. If not "
-"provided, defaults to `[.33, .55, .67, 1.]`."
+"provided, becomes `[0.3, 0.6, 0.70, 1.]`. Defaults to `None`."
msgstr ""
-#: keras.engine.training.Model.summary:9 of
+#: keras.src.engine.training.Model.summary:9 of
msgid ""
-"Print function to use. Defaults to `print`. It will be called on each "
-"line of the summary. You can set it to a custom function in order to "
-"capture the string summary."
+"Print function to use. By default, prints to `stdout`. If `stdout` "
+"doesn't work in your environment, change to `print`. It will be called on"
+" each line of the summary. You can set it to a custom function in order "
+"to capture the string summary."
+msgstr ""
+
+#: keras.src.engine.training.Model.summary:14 of
+msgid "Whether to expand the nested models. Defaults to `False`."
+msgstr ""
+
+#: keras.src.engine.training.Model.summary:16 of
+msgid "Whether to show if a layer is trainable. Defaults to `False`."
msgstr ""
-#: keras.engine.training.Model.summary:13 of
-msgid "Whether to expand the nested models. If not provided, defaults to `False`."
+#: keras.src.engine.training.Model.summary:18 of
+msgid ""
+"a list or tuple of 2 strings, which is the starting layer name and ending"
+" layer name (both inclusive) indicating the range of layers to be printed"
+" in summary. It also accepts regex patterns instead of exact name. In "
+"such case, start predicate will be the first element it matches to "
+"`layer_range[0]` and the end predicate will be the last element it "
+"matches to `layer_range[1]`. By default `None` which considers all layers"
+" of model."
msgstr ""
-#: keras.engine.training.Model.summary:16 of
+#: keras.src.engine.training.Model.summary:27 of
msgid "if `summary()` is called before the model is built."
msgstr ""
@@ -6414,41 +7427,43 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.supports_masking:1
#: tensorcircuit.applications.van.ResidualBlock.supports_masking:1
#: tensorcircuit.applications.vqes.Linear.supports_masking:1
+#: tensorcircuit.keras.HardwareLayer.supports_masking:1
#: tensorcircuit.keras.QuantumLayer.supports_masking:1
msgid "Whether this layer supports computing a mask using `compute_mask`."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:1 of
+#: keras.src.engine.training.Model.test_on_batch:1 of
msgid "Test the model on a single batch of samples."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:3 of
+#: keras.src.engine.training.Model.test_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow "
"tensor, or a list of tensors (in case the model has multiple inputs)."
-" - A dict mapping input names to the corresponding array/tensors, if "
+" - A dict mapping input names to the corresponding array/tensors, if "
"the model has named inputs."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:8 of
-msgid "A dict mapping input names to the corresponding array/tensors, if"
+#: keras.src.engine.training.Model.test_on_batch:8
+#: keras.src.engine.training.Model.train_on_batch:8 of
+msgid "A dict mapping input names to the corresponding array/tensors,"
msgstr ""
-#: keras.engine.training.Model.test_on_batch:9 of
-msgid "the model has named inputs."
+#: keras.src.engine.training.Model.test_on_batch:9
+#: keras.src.engine.training.Model.train_on_batch:9 of
+msgid "if the model has named inputs."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:10
-#: keras.engine.training.Model.train_on_batch:10 of
+#: keras.src.engine.training.Model.test_on_batch:10 of
msgid ""
"Target data. Like the input data `x`, it could be either Numpy array(s) "
"or TensorFlow tensor(s). It should be consistent with `x` (you cannot "
"have Numpy inputs and tensor targets, or inversely)."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:13
-#: keras.engine.training.Model.train_on_batch:13 of
+#: keras.src.engine.training.Model.test_on_batch:13
+#: keras.src.engine.training.Model.train_on_batch:12 of
msgid ""
"Optional array of the same length as x, containing weights to apply to "
"the model's loss for each sample. In the case of temporal data, you can "
@@ -6456,105 +7471,105 @@ msgid ""
"different weight to every timestep of every sample."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:18
-#: keras.engine.training.Model.train_on_batch:22 of
+#: keras.src.engine.training.Model.test_on_batch:18
+#: keras.src.engine.training.Model.train_on_batch:24 of
msgid ""
"If `True`, the metrics returned will be only for this batch. If `False`, "
"the metrics will be statefully accumulated across batches."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:30 of
-msgid "If `model.test_on_batch` is wrapped in a `tf.function`."
+#: keras.src.engine.training.Model.test_on_batch:30 of
+msgid "If `model.test_on_batch` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.test_step:1 of
+#: keras.src.engine.training.Model.test_step:1 of
msgid "The logic for one evaluation step."
msgstr ""
-#: keras.engine.training.Model.test_step:3 of
+#: keras.src.engine.training.Model.test_step:3 of
msgid ""
"This method can be overridden to support custom evaluation logic. This "
"method is called by `Model.make_test_function`."
msgstr ""
-#: keras.engine.training.Model.test_step:6 of
+#: keras.src.engine.training.Model.test_step:6 of
msgid ""
"This function should contain the mathematical logic for one step of "
"evaluation. This typically includes the forward pass, loss calculation, "
"and metrics updates."
msgstr ""
-#: keras.engine.training.Model.test_step:11 of
+#: keras.src.engine.training.Model.test_step:11 of
msgid ""
"Configuration details for *how* this logic is run (e.g. `tf.function` and"
" `tf.distribute.Strategy` settings), should be left to "
"`Model.make_test_function`, which can also be overridden."
msgstr ""
-#: keras.engine.training.Model.test_step:17 of
+#: keras.src.engine.training.Model.test_step:17 of
msgid ""
"A `dict` containing values that will be passed to "
"`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the "
"values of the `Model`'s metrics are returned."
msgstr ""
-#: keras.engine.training.Model.to_json:1 of
+#: keras.src.engine.training.Model.to_json:1 of
msgid "Returns a JSON string containing the network configuration."
msgstr ""
-#: keras.engine.training.Model.to_json:3 of
+#: keras.src.engine.training.Model.to_json:3 of
msgid ""
"To load a network from a JSON save file, use "
"`keras.models.model_from_json(json_string, custom_objects={})`."
msgstr ""
-#: keras.engine.training.Model.to_json:6 of
-msgid "Additional keyword arguments to be passed to `json.dumps()`."
+#: keras.src.engine.training.Model.to_json:6 of
+msgid "Additional keyword arguments to be passed to *`json.dumps()`."
msgstr ""
-#: keras.engine.training.Model.to_json:9 of
+#: keras.src.engine.training.Model.to_json:9 of
msgid "A JSON string."
msgstr ""
-#: keras.engine.training.Model.to_yaml:1 of
+#: keras.src.engine.training.Model.to_yaml:1 of
msgid "Returns a yaml string containing the network configuration."
msgstr ""
-#: keras.engine.training.Model.to_yaml:3 of
+#: keras.src.engine.training.Model.to_yaml:3 of
msgid ""
"Note: Since TF 2.6, this method is no longer supported and will raise a "
"RuntimeError."
msgstr ""
-#: keras.engine.training.Model.to_yaml:6 of
+#: keras.src.engine.training.Model.to_yaml:6 of
msgid ""
"To load a network from a yaml save file, use "
"`keras.models.model_from_yaml(yaml_string, custom_objects={})`."
msgstr ""
-#: keras.engine.training.Model.to_yaml:9 of
+#: keras.src.engine.training.Model.to_yaml:9 of
msgid ""
"`custom_objects` should be a dictionary mapping the names of custom "
"losses / layers / etc to the corresponding functions / classes."
msgstr ""
-#: keras.engine.training.Model.to_yaml:13 of
+#: keras.src.engine.training.Model.to_yaml:13 of
msgid "Additional keyword arguments to be passed to `yaml.dump()`."
msgstr ""
-#: keras.engine.training.Model.to_yaml:16 of
+#: keras.src.engine.training.Model.to_yaml:16 of
msgid "A YAML string."
msgstr ""
-#: keras.engine.training.Model.to_yaml:18 of
+#: keras.src.engine.training.Model.to_yaml:18 of
msgid "announces that the method poses a security risk"
msgstr ""
-#: keras.engine.training.Model.train_on_batch:1 of
+#: keras.src.engine.training.Model.train_on_batch:1 of
msgid "Runs a single gradient update on a single batch of data."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:3 of
+#: keras.src.engine.training.Model.train_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow "
@@ -6563,27 +7578,28 @@ msgid ""
"the model has named inputs."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:6 of
+#: keras.src.engine.training.Model.train_on_batch:6 of
msgid "A TensorFlow tensor, or a list of tensors"
msgstr ""
-#: keras.engine.training.Model.train_on_batch:8 of
-msgid "A dict mapping input names to the corresponding array/tensors,"
-msgstr ""
-
-#: keras.engine.training.Model.train_on_batch:9 of
-msgid "if the model has named inputs."
+#: keras.src.engine.training.Model.train_on_batch:10 of
+msgid ""
+"Target data. Like the input data `x`, it could be either Numpy array(s) "
+"or TensorFlow tensor(s)."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:18 of
+#: keras.src.engine.training.Model.train_on_batch:17 of
msgid ""
"Optional dictionary mapping class indices (integers) to a weight (float) "
"to apply to the model's loss for the samples from this class during "
"training. This can be useful to tell the model to \"pay more attention\" "
-"to samples from an under-represented class."
+"to samples from an under-represented class. When `class_weight` is "
+"specified and targets have a rank of 2 or greater, either `y` must be "
+"one-hot encoded, or an explicit final dimension of `1` must be included "
+"for sparse class labels."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:29 of
+#: keras.src.engine.training.Model.train_on_batch:31 of
msgid ""
"Scalar training loss (if the model has a single output and no metrics) or"
" list of scalars (if the model has multiple outputs and/or metrics). The "
@@ -6591,38 +7607,38 @@ msgid ""
"scalar outputs."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:35 of
+#: keras.src.engine.training.Model.train_on_batch:37 of
msgid "If `model.train_on_batch` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.train_step:1 of
+#: keras.src.engine.training.Model.train_step:1 of
msgid "The logic for one training step."
msgstr ""
-#: keras.engine.training.Model.train_step:3 of
+#: keras.src.engine.training.Model.train_step:3 of
msgid ""
"This method can be overridden to support custom training logic. For "
"concrete examples of how to override this method see [Customizing what "
-"happends in "
-"fit](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit)."
-" This method is called by `Model.make_train_function`."
+"happens in fit]( "
+"https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit). "
+"This method is called by `Model.make_train_function`."
msgstr ""
-#: keras.engine.training.Model.train_step:8 of
+#: keras.src.engine.training.Model.train_step:9 of
msgid ""
"This method should contain the mathematical logic for one step of "
-"training. This typically includes the forward pass, loss calculation, "
+"training. This typically includes the forward pass, loss calculation, "
"backpropagation, and metric updates."
msgstr ""
-#: keras.engine.training.Model.train_step:12 of
+#: keras.src.engine.training.Model.train_step:13 of
msgid ""
"Configuration details for *how* this logic is run (e.g. `tf.function` and"
" `tf.distribute.Strategy` settings), should be left to "
"`Model.make_train_function`, which can also be overridden."
msgstr ""
-#: keras.engine.training.Model.train_step:18 of
+#: keras.src.engine.training.Model.train_step:19 of
msgid ""
"A `dict` containing values that will be passed to "
"`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the "
@@ -6637,6 +7653,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.trainable_variables:1
#: tensorcircuit.applications.van.ResidualBlock.trainable_variables:1
#: tensorcircuit.applications.vqes.Linear.trainable_variables:1
+#: tensorcircuit.keras.HardwareLayer.trainable_variables:1
#: tensorcircuit.keras.QuantumLayer.trainable_variables:1
msgid "Sequence of trainable variables owned by this module and its submodules."
msgstr ""
@@ -6648,6 +7665,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.trainable_weights:1
#: tensorcircuit.applications.van.ResidualBlock.trainable_weights:1
#: tensorcircuit.applications.vqes.Linear.trainable_weights:1
+#: tensorcircuit.keras.HardwareLayer.trainable_weights:1
#: tensorcircuit.keras.QuantumLayer.trainable_weights:1
msgid "List of all trainable weights tracked by this layer."
msgstr ""
@@ -6659,6 +7677,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.trainable_weights:3
#: tensorcircuit.applications.van.ResidualBlock.trainable_weights:3
#: tensorcircuit.applications.vqes.Linear.trainable_weights:3
+#: tensorcircuit.keras.HardwareLayer.trainable_weights:3
#: tensorcircuit.keras.QuantumLayer.trainable_weights:3
msgid "Trainable weights are updated via gradient descent during training."
msgstr ""
@@ -6670,6 +7689,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.trainable_weights:5
#: tensorcircuit.applications.van.ResidualBlock.trainable_weights:5
#: tensorcircuit.applications.vqes.Linear.trainable_weights:5
+#: tensorcircuit.keras.HardwareLayer.trainable_weights:5
#: tensorcircuit.keras.QuantumLayer.trainable_weights:5
msgid "A list of trainable variables."
msgstr ""
@@ -6681,6 +7701,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.variable_dtype:1
#: tensorcircuit.applications.van.ResidualBlock.variable_dtype:1
#: tensorcircuit.applications.vqes.Linear.variable_dtype:1
+#: tensorcircuit.keras.HardwareLayer.variable_dtype:1
#: tensorcircuit.keras.QuantumLayer.variable_dtype:1
msgid "Alias of `Layer.dtype`, the dtype of the weights."
msgstr ""
@@ -6699,6 +7720,8 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.weights:1
#: tensorcircuit.applications.vqes.Linear.variables:1
#: tensorcircuit.applications.vqes.Linear.weights:1
+#: tensorcircuit.keras.HardwareLayer.variables:1
+#: tensorcircuit.keras.HardwareLayer.weights:1
#: tensorcircuit.keras.QuantumLayer.variables:1
#: tensorcircuit.keras.QuantumLayer.weights:1
msgid "Returns the list of all layer variables/weights."
@@ -6711,6 +7734,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.variables:3
#: tensorcircuit.applications.van.ResidualBlock.variables:3
#: tensorcircuit.applications.vqes.Linear.variables:3
+#: tensorcircuit.keras.HardwareLayer.variables:3
#: tensorcircuit.keras.QuantumLayer.variables:3
msgid "Alias of `self.weights`."
msgstr ""
@@ -6725,6 +7749,7 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.weights:3
#: tensorcircuit.applications.van.ResidualBlock.variables:5
#: tensorcircuit.applications.vqes.Linear.variables:5
+#: tensorcircuit.keras.HardwareLayer.variables:5
#: tensorcircuit.keras.QuantumLayer.variables:5
msgid ""
"Note: This will not track the weights of nested `tf.Modules` that are not"
@@ -6745,6 +7770,8 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.weights:3
#: tensorcircuit.applications.vqes.Linear.variables:8
#: tensorcircuit.applications.vqes.Linear.weights:3
+#: tensorcircuit.keras.HardwareLayer.variables:8
+#: tensorcircuit.keras.HardwareLayer.weights:3
#: tensorcircuit.keras.QuantumLayer.variables:8
#: tensorcircuit.keras.QuantumLayer.weights:3
msgid "A list of variables."
@@ -6772,33 +7799,36 @@ msgstr ""
#: tensorcircuit.applications.van.MaskedLinear:1
#: tensorcircuit.applications.van.ResidualBlock:1
#: tensorcircuit.applications.vqes.Linear:1 tensorcircuit.keras.QuantumLayer:1
-msgid "Bases: :py:class:`keras.engine.base_layer.Layer`"
+msgid "Bases: :py:class:`~keras.src.engine.base_layer.Layer`"
msgstr ""
-#: keras.engine.base_layer.Layer.build:1 of
+#: keras.src.engine.base_layer.Layer.build:1 of
#: tensorcircuit.applications.van.MaskedConv2D.build:1
#: tensorcircuit.keras.QuantumLayer.build:1
-msgid "Creates the variables of the layer (optional, for subclass implementers)."
+msgid "Creates the variables of the layer (for subclass implementers)."
msgstr ""
-#: keras.engine.base_layer.Layer.build:3 of
+#: keras.src.engine.base_layer.Layer.build:3 of
#: tensorcircuit.applications.van.MaskedConv2D.build:3
#: tensorcircuit.keras.QuantumLayer.build:3
msgid ""
"This is a method that implementers of subclasses of `Layer` or `Model` "
"can override if they need a state-creation step in-between layer "
-"instantiation and layer call."
+"instantiation and layer call. It is invoked automatically before the "
+"first execution of `call()`."
msgstr ""
-#: keras.engine.base_layer.Layer.build:7 of
-#: tensorcircuit.applications.van.MaskedConv2D.build:7
-#: tensorcircuit.keras.QuantumLayer.build:7
-msgid "This is typically used to create the weights of `Layer` subclasses."
+#: keras.src.engine.base_layer.Layer.build:8 of
+#: tensorcircuit.applications.van.MaskedConv2D.build:8
+#: tensorcircuit.keras.QuantumLayer.build:8
+msgid ""
+"This is typically used to create the weights of `Layer` subclasses (at "
+"the discretion of the subclass implementer)."
msgstr ""
-#: keras.engine.base_layer.Layer.build:9 of
-#: tensorcircuit.applications.van.MaskedConv2D.build:9
-#: tensorcircuit.keras.QuantumLayer.build:9
+#: keras.src.engine.base_layer.Layer.build:11 of
+#: tensorcircuit.applications.van.MaskedConv2D.build:11
+#: tensorcircuit.keras.QuantumLayer.build:11
msgid ""
"Instance of `TensorShape`, or list of instances of `TensorShape` if the "
"layer expects a list of inputs (one instance per input)."
@@ -6816,81 +7846,79 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.call:3
#: tensorcircuit.applications.vqes.Linear.call:3
msgid ""
-"Note here that `call()` method in `tf.keras` is little bit different from"
-" `keras` API. In `keras` API, you can pass support masking for layers as "
-"additional arguments. Whereas `tf.keras` has `compute_mask()` method to "
-"support masking."
+"The `call()` method may not create state (except in its first invocation,"
+" wrapping the creation of variables or other resources in "
+"`tf.init_scope()`). It is recommended to create state, including "
+"`tf.Variable` instances and nested `Layer` instances,"
+msgstr ""
+
+#: of tensorcircuit.applications.van.MaskedConv2D.call:7
+#: tensorcircuit.applications.van.MaskedLinear.call:7
+#: tensorcircuit.applications.van.ResidualBlock.call:7
+#: tensorcircuit.applications.vqes.Linear.call:7
+msgid "in `__init__()`, or in the `build()` method that is"
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:8
#: tensorcircuit.applications.van.MaskedLinear.call:8
#: tensorcircuit.applications.van.ResidualBlock.call:8
#: tensorcircuit.applications.vqes.Linear.call:8
+msgid "called automatically before `call()` executes for the first time."
+msgstr ""
+
+#: of tensorcircuit.applications.van.MaskedConv2D.call:10
+#: tensorcircuit.applications.van.MaskedLinear.call:10
+#: tensorcircuit.applications.van.ResidualBlock.call:10
+#: tensorcircuit.applications.vqes.Linear.call:10
msgid ""
"Input tensor, or dict/list/tuple of input tensors. The first positional "
"`inputs` argument is subject to special rules: - `inputs` must be "
"explicitly passed. A layer cannot have zero arguments, and `inputs` "
"cannot be provided via the default value of a keyword argument. - NumPy"
-" array or Python scalar values in `inputs` get cast as tensors. - Keras "
-"mask metadata is only collected from `inputs`. - Layers are built "
+" array or Python scalar values in `inputs` get cast as tensors. - Keras"
+" mask metadata is only collected from `inputs`. - Layers are built "
"(`build(input_shape)` method) using shape info from `inputs` only. - "
"`input_spec` compatibility is only checked against `inputs`. - Mixed "
"precision input casting is only applied to `inputs`. If a layer has "
"tensor arguments in `*args` or `**kwargs`, their casting behavior in "
"mixed precision should be handled manually. - The SavedModel input "
-"specification is generated using `inputs` only. - Integration with "
+"specification is generated using `inputs` only. - Integration with "
"various ecosystem packages like TFMOT, TFLite, TF.js, etc is only "
"supported for `inputs` and not for tensors in positional and keyword "
"arguments."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:8
-#: tensorcircuit.applications.van.MaskedLinear.call:8
-#: tensorcircuit.applications.van.ResidualBlock.call:8
-#: tensorcircuit.applications.vqes.Linear.call:8
+#: of tensorcircuit.applications.van.MaskedConv2D.call:10
+#: tensorcircuit.applications.van.MaskedLinear.call:10
+#: tensorcircuit.applications.van.ResidualBlock.call:10
+#: tensorcircuit.applications.vqes.Linear.call:10
msgid ""
"Input tensor, or dict/list/tuple of input tensors. The first positional "
"`inputs` argument is subject to special rules: - `inputs` must be "
"explicitly passed. A layer cannot have zero"
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:11
-#: tensorcircuit.applications.van.MaskedLinear.call:11
-#: tensorcircuit.applications.van.ResidualBlock.call:11
-#: tensorcircuit.applications.vqes.Linear.call:11
-msgid ""
-"arguments, and `inputs` cannot be provided via the default value of a "
-"keyword argument."
-msgstr ""
-
#: of tensorcircuit.applications.van.MaskedConv2D.call:13
#: tensorcircuit.applications.van.MaskedLinear.call:13
#: tensorcircuit.applications.van.ResidualBlock.call:13
#: tensorcircuit.applications.vqes.Linear.call:13
-msgid "NumPy array or Python scalar values in `inputs` get cast as tensors."
-msgstr ""
-
-#: of tensorcircuit.applications.van.MaskedConv2D.call:14
-#: tensorcircuit.applications.van.MaskedLinear.call:14
-#: tensorcircuit.applications.van.ResidualBlock.call:14
-#: tensorcircuit.applications.vqes.Linear.call:14
-msgid "Keras mask metadata is only collected from `inputs`."
+msgid ""
+"arguments, and `inputs` cannot be provided via the default value of a "
+"keyword argument."
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:15
#: tensorcircuit.applications.van.MaskedLinear.call:15
#: tensorcircuit.applications.van.ResidualBlock.call:15
#: tensorcircuit.applications.vqes.Linear.call:15
-msgid ""
-"Layers are built (`build(input_shape)` method) using shape info from "
-"`inputs` only."
+msgid "NumPy array or Python scalar values in `inputs` get cast as tensors."
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:17
#: tensorcircuit.applications.van.MaskedLinear.call:17
#: tensorcircuit.applications.van.ResidualBlock.call:17
#: tensorcircuit.applications.vqes.Linear.call:17
-msgid "`input_spec` compatibility is only checked against `inputs`."
+msgid "Keras mask metadata is only collected from `inputs`."
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:18
@@ -6898,57 +7926,73 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.call:18
#: tensorcircuit.applications.vqes.Linear.call:18
msgid ""
-"Mixed precision input casting is only applied to `inputs`. If a layer has"
-" tensor arguments in `*args` or `**kwargs`, their casting behavior in "
-"mixed precision should be handled manually."
+"Layers are built (`build(input_shape)` method) using shape info from "
+"`inputs` only."
+msgstr ""
+
+#: of tensorcircuit.applications.van.MaskedConv2D.call:20
+#: tensorcircuit.applications.van.MaskedLinear.call:20
+#: tensorcircuit.applications.van.ResidualBlock.call:20
+#: tensorcircuit.applications.vqes.Linear.call:20
+msgid "`input_spec` compatibility is only checked against `inputs`."
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:21
#: tensorcircuit.applications.van.MaskedLinear.call:21
#: tensorcircuit.applications.van.ResidualBlock.call:21
#: tensorcircuit.applications.vqes.Linear.call:21
+msgid ""
+"Mixed precision input casting is only applied to `inputs`. If a layer has"
+" tensor arguments in `*args` or `**kwargs`, their casting behavior in "
+"mixed precision should be handled manually."
+msgstr ""
+
+#: of tensorcircuit.applications.van.MaskedConv2D.call:24
+#: tensorcircuit.applications.van.MaskedLinear.call:24
+#: tensorcircuit.applications.van.ResidualBlock.call:24
+#: tensorcircuit.applications.vqes.Linear.call:24
msgid "The SavedModel input specification is generated using `inputs` only."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:22
-#: tensorcircuit.applications.van.MaskedLinear.call:22
-#: tensorcircuit.applications.van.ResidualBlock.call:22
-#: tensorcircuit.applications.vqes.Linear.call:22
+#: of tensorcircuit.applications.van.MaskedConv2D.call:26
+#: tensorcircuit.applications.van.MaskedLinear.call:26
+#: tensorcircuit.applications.van.ResidualBlock.call:26
+#: tensorcircuit.applications.vqes.Linear.call:26
msgid ""
"Integration with various ecosystem packages like TFMOT, TFLite, TF.js, "
"etc is only supported for `inputs` and not for tensors in positional and "
"keyword arguments."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:25
-#: tensorcircuit.applications.van.MaskedLinear.call:25
-#: tensorcircuit.applications.van.ResidualBlock.call:25
-#: tensorcircuit.applications.vqes.Linear.call:25
+#: of tensorcircuit.applications.van.MaskedConv2D.call:29
+#: tensorcircuit.applications.van.MaskedLinear.call:29
+#: tensorcircuit.applications.van.ResidualBlock.call:29
+#: tensorcircuit.applications.vqes.Linear.call:29
msgid ""
"Additional positional arguments. May contain tensors, although this is "
"not recommended, for the reasons above."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:27
-#: tensorcircuit.applications.van.MaskedLinear.call:27
-#: tensorcircuit.applications.van.ResidualBlock.call:27
-#: tensorcircuit.applications.vqes.Linear.call:27
+#: of tensorcircuit.applications.van.MaskedConv2D.call:31
+#: tensorcircuit.applications.van.MaskedLinear.call:31
+#: tensorcircuit.applications.van.ResidualBlock.call:31
+#: tensorcircuit.applications.vqes.Linear.call:31
msgid ""
"Additional keyword arguments. May contain tensors, although this is not "
"recommended, for the reasons above. The following optional keyword "
"arguments are reserved: - `training`: Boolean scalar tensor of Python "
"boolean indicating whether the `call` is meant for training or "
"inference. - `mask`: Boolean input mask. If the layer's `call()` method "
-"takes a `mask` argument, its default value will be set to the mask "
-"generated for `inputs` by the previous layer (if `input` did come from "
-"a layer that generated a corresponding mask, i.e. if it came from a "
-"Keras layer with masking support)."
+"takes a `mask` argument, its default value will be set to the mask "
+"generated for `inputs` by the previous layer (if `input` did come from "
+"a layer that generated a corresponding mask, i.e. if it came from a "
+"Keras layer with masking support)."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:27
-#: tensorcircuit.applications.van.MaskedLinear.call:27
-#: tensorcircuit.applications.van.ResidualBlock.call:27
-#: tensorcircuit.applications.vqes.Linear.call:27
+#: of tensorcircuit.applications.van.MaskedConv2D.call:31
+#: tensorcircuit.applications.van.MaskedLinear.call:31
+#: tensorcircuit.applications.van.ResidualBlock.call:31
+#: tensorcircuit.applications.vqes.Linear.call:31
msgid ""
"Additional keyword arguments. May contain tensors, although this is not "
"recommended, for the reasons above. The following optional keyword "
@@ -6956,17 +8000,17 @@ msgid ""
"boolean indicating"
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:31
-#: tensorcircuit.applications.van.MaskedLinear.call:31
-#: tensorcircuit.applications.van.ResidualBlock.call:31
-#: tensorcircuit.applications.vqes.Linear.call:31
+#: of tensorcircuit.applications.van.MaskedConv2D.call:35
+#: tensorcircuit.applications.van.MaskedLinear.call:35
+#: tensorcircuit.applications.van.ResidualBlock.call:35
+#: tensorcircuit.applications.vqes.Linear.call:35
msgid "whether the `call` is meant for training or inference."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:32
-#: tensorcircuit.applications.van.MaskedLinear.call:32
-#: tensorcircuit.applications.van.ResidualBlock.call:32
-#: tensorcircuit.applications.vqes.Linear.call:32
+#: of tensorcircuit.applications.van.MaskedConv2D.call:36
+#: tensorcircuit.applications.van.MaskedLinear.call:36
+#: tensorcircuit.applications.van.ResidualBlock.call:36
+#: tensorcircuit.applications.vqes.Linear.call:36
msgid ""
"`mask`: Boolean input mask. If the layer's `call()` method takes a `mask`"
" argument, its default value will be set to the mask generated for "
@@ -6975,18 +8019,40 @@ msgid ""
"masking support)."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:38
-#: tensorcircuit.applications.van.MaskedLinear.call:38
-#: tensorcircuit.applications.van.ResidualBlock.call:38
-#: tensorcircuit.applications.vqes.Linear.call:38
+#: of tensorcircuit.applications.van.MaskedConv2D.call:42
+#: tensorcircuit.applications.van.MaskedLinear.call:42
+#: tensorcircuit.applications.van.ResidualBlock.call:42
+#: tensorcircuit.applications.vqes.Linear.call:42
msgid "A tensor or list/tuple of tensors."
msgstr ""
-#: keras.engine.base_layer.Layer.get_weights:1 of
+#: keras.src.engine.base_layer.Layer.get_config:1 of
+msgid "Returns the config of the layer."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_config:3 of
+msgid ""
+"A layer config is a Python dictionary (serializable) containing the "
+"configuration of a layer. The same layer can be reinstantiated later "
+"(without its trained weights) from this configuration."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_config:8 of
+msgid ""
+"The config of a layer does not include connectivity information, nor the "
+"layer class name. These are handled by `Network` (one layer of "
+"abstraction above)."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_config:16 of
+msgid "Python dictionary."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_weights:1 of
msgid "Returns the current weights of the layer, as NumPy arrays."
msgstr ""
-#: keras.engine.base_layer.Layer.get_weights:3 of
+#: keras.src.engine.base_layer.Layer.get_weights:3 of
msgid ""
"The weights of a layer represent the state of the layer. This function "
"returns both trainable and non-trainable weight values associated with "
@@ -6994,7 +8060,7 @@ msgid ""
"state into similarly parameterized layers."
msgstr ""
-#: keras.engine.base_layer.Layer.get_weights:32 of
+#: keras.src.engine.base_layer.Layer.get_weights:32 of
msgid "Weights values as a list of NumPy arrays."
msgstr ""
@@ -7002,6 +8068,7 @@ msgstr ""
#: tensorcircuit.applications.van.MaskedLinear.metrics:1
#: tensorcircuit.applications.van.ResidualBlock.metrics:1
#: tensorcircuit.applications.vqes.Linear.metrics:1
+#: tensorcircuit.keras.HardwareLayer.metrics:1
#: tensorcircuit.keras.QuantumLayer.metrics:1
msgid "List of metrics added using the `add_metric()` API."
msgstr ""
@@ -7010,6 +8077,7 @@ msgstr ""
#: tensorcircuit.applications.van.MaskedLinear.metrics:13
#: tensorcircuit.applications.van.ResidualBlock.metrics:13
#: tensorcircuit.applications.vqes.Linear.metrics:13
+#: tensorcircuit.keras.HardwareLayer.metrics:13
#: tensorcircuit.keras.QuantumLayer.metrics:13
msgid "A list of `Metric` objects."
msgstr ""
@@ -7025,20 +8093,20 @@ msgstr ""
#: of tensorcircuit.applications.vqes.JointSchedule:1
msgid ""
"Bases: "
-":py:class:`keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule`"
+":py:class:`~keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule`"
msgstr ""
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config:1
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config:1
#: of
msgid "Instantiates a `LearningRateSchedule` from its config."
msgstr ""
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config:3
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config:3
#: of
msgid "Output of `get_config()`."
msgstr ""
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config:5
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config:5
#: of
msgid "A `LearningRateSchedule` instance."
msgstr ""
@@ -7071,6 +8139,43 @@ msgstr ""
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randn:3
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randn:15
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randu:13
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmax:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmax:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmin:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmin:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.concat:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cond:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cond:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cond:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cond:9
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.coo_sparse_matrix:10
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cumsum:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cumsum:8
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.max:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.max:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.min:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.min:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.scatter:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.scatter:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.scatter:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.scatter:9
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sigmoid:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sigmoid:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randc:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randc:11
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn:15
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu:13
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stop_gradient:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stop_gradient:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.switch:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.switch:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.switch:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tile:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tile:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.unique_with_counts:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:29
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:36
#: tensorcircuit.backends.jax_backend.JaxBackend.argmax:3
#: tensorcircuit.backends.jax_backend.JaxBackend.argmax:7
#: tensorcircuit.backends.jax_backend.JaxBackend.argmin:3
@@ -7216,8 +8321,13 @@ msgstr ""
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vectorized_value_and_grad:29
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vectorized_value_and_grad:36
#: tensorcircuit.basecircuit.BaseCircuit.sample_expectation_ps:53
-#: tensorcircuit.quantum.count_d2s:10 tensorcircuit.quantum.count_d2s:14
-#: tensorcircuit.quantum.count_s2d:4 tensorcircuit.quantum.count_s2d:8
+#: tensorcircuit.cloud.abstraction.Device.list_properties:3
+#: tensorcircuit.cloud.abstraction.Device.topology:3
+#: tensorcircuit.cloud.abstraction.Device.topology_graph:5
+#: tensorcircuit.cloud.tencent.submit_task:6
+#: tensorcircuit.cloud.tencent.submit_task:8 tensorcircuit.quantum.count_d2s:10
+#: tensorcircuit.quantum.count_d2s:14 tensorcircuit.quantum.count_s2d:4
+#: tensorcircuit.quantum.count_s2d:8
#: tensorcircuit.simplify.pseudo_contract_between:3
#: tensorcircuit.simplify.pseudo_contract_between:5
#: tensorcircuit.simplify.pseudo_contract_between:7
@@ -7259,27 +8369,23 @@ msgstr ""
msgid "The `tc.backend` object that with all registered universal functions."
msgstr ""
-#: ../../source/api/backends/jax_backend.rst:2
-msgid "tensorcircuit.backends.jax_backend"
-msgstr ""
-
-#: of tensorcircuit.backends.jax_backend:1
-msgid "Backend magic inherited from tensornetwork: jax backend"
+#: ../../source/api/backends/cupy_backend.rst:2
+msgid "tensorcircuit.backends.cupy_backend"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend:1
-msgid ""
-"Bases: :py:class:`tensornetwork.backends.jax.jax_backend.JaxBackend`, "
-":py:class:`tensorcircuit.backends.abstract_backend.ExtendedBackend`"
+#: of tensorcircuit.backends.cupy_backend:1
+msgid "CuPy backend. Not in the tensornetwork package and highly experimental."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend:1
msgid ""
-"See the original backend API at `jax backend "
-"`_"
+"Bases: "
+":py:class:`~tensornetwork.backends.abstract_backend.AbstractBackend`, "
+":py:class:`~tensorcircuit.backends.abstract_backend.ExtendedBackend`"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.abs:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.abs:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.abs:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.abs:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.abs:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.abs:1
@@ -7288,14 +8394,17 @@ msgid ""
" tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.abs:4
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.abs:4
+#: tensorcircuit.backends.jax_backend.JaxBackend.abs:4
#: tensorcircuit.backends.numpy_backend.NumpyBackend.abs:4
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.abs:4
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.abs:4
msgid "Its elementwise absolute value."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.acos:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.acos:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.asin:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.acos:1
#: tensorcircuit.backends.jax_backend.JaxBackend.asin:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.acos:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.asin:1
@@ -7307,6 +8416,22 @@ msgid "Return the acos of a tensor ``a``."
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.sqrtmh:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.acos:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.acosh:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.asin:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.asinh:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atan:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atan2:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.atanh:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.copy:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cosh:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.eigvalsh:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.kron:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.kron:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.numpy:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sinh:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tan:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tanh:3
#: tensorcircuit.backends.jax_backend.JaxBackend.acos:3
#: tensorcircuit.backends.jax_backend.JaxBackend.acosh:3
#: tensorcircuit.backends.jax_backend.JaxBackend.asin:3
@@ -7374,28 +8499,32 @@ msgstr ""
msgid "tensor in matrix form"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.acos:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.acos:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.acos:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.acos:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.acos:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.acos:5
msgid "acos of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.acosh:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.acosh:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.acosh:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.acosh:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.acosh:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.acosh:1
msgid "Return the acosh of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.acosh:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.acosh:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.acosh:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.acosh:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.acosh:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.acosh:5
msgid "acosh of ``a``"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.addition:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.addition:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.addition:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.addition:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.addition:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.addition:1
@@ -7404,7 +8533,13 @@ msgid ""
"implementation. :param tensor1: A tensor. :param tensor2: A tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cos:4
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.conj:4
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cos:4
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.expm:4
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.multiply:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sin:4
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.softmax:9
+#: tensorcircuit.backends.jax_backend.JaxBackend.cos:4
#: tensorcircuit.backends.jax_backend.JaxBackend.expm:4
#: tensorcircuit.backends.jax_backend.JaxBackend.sin:4
#: tensorcircuit.backends.jax_backend.JaxBackend.softmax:9
@@ -7422,8 +8557,11 @@ msgstr ""
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sin:4
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.softmax:9
#: tensorcircuit.experimental.hamiltonian_evol:12
+#: tensornetwork.backends.abstract_backend.AbstractBackend.addition:6
+#: tensornetwork.backends.abstract_backend.AbstractBackend.divide:6
#: tensornetwork.backends.abstract_backend.AbstractBackend.exp:4
#: tensornetwork.backends.abstract_backend.AbstractBackend.log:4
+#: tensornetwork.backends.abstract_backend.AbstractBackend.subtraction:6
#: tensornetwork.backends.jax.jax_backend.JaxBackend.addition:6
#: tensornetwork.backends.jax.jax_backend.JaxBackend.conj:4
#: tensornetwork.backends.jax.jax_backend.JaxBackend.divide:6
@@ -7459,6 +8597,7 @@ msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.adjoint:3
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reshape2:3
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.reshapem:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.relu:9
#: tensorcircuit.backends.jax_backend.JaxBackend.relu:9
#: tensorcircuit.backends.numpy_backend.NumpyBackend.relu:9
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.relu:9
@@ -7470,42 +8609,49 @@ msgstr ""
msgid "adjoint tensor of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.arange:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.arange:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.arange:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.arange:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.arange:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.arange:1
msgid "Values are generated within the half-open interval [start, stop)"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.arange:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.arange:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.arange:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.arange:3
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.arange:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.arange:3
msgid "start index"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.arange:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.arange:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.arange:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.arange:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.arange:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.arange:5
msgid "end index, defaults to None"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.arange:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.arange:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.arange:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.arange:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.arange:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.arange:7
msgid "steps, defaults to 1"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.argmax:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.argmax:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.argmax:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.argmax:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.argmax:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.argmax:1
msgid "Return the index of maximum of an array an axis."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.argmax:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.argmax:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.argmin:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.argmax:5
#: tensorcircuit.backends.jax_backend.JaxBackend.argmin:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.argmax:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.argmin:5
@@ -7516,70 +8662,80 @@ msgstr ""
msgid "[description], defaults to 0, different behavior from numpy defaults!"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.argmin:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.argmin:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.argmin:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.argmin:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.argmin:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.argmin:1
msgid "Return the index of minimum of an array an axis."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.asin:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.asin:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.asin:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.asin:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.asin:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.asin:5
msgid "asin of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.asinh:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.asinh:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.asinh:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.asinh:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.asinh:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.asinh:1
msgid "Return the asinh of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.asinh:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.asinh:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.asinh:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.asinh:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.asinh:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.asinh:5
msgid "asinh of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.atan:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.atan:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.atan:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.atan:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.atan:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.atan:1
msgid "Return the atan of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.atan:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.atan:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.atan:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.atan:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.atan:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.atan:5
msgid "atan of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.atan2:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.atan2:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.atan2:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.atan2:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.atan2:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.atan2:1
msgid "Return the atan of a tensor ``y``/``x``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.atan2:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.atan2:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.atan2:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.atan2:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.atan2:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.atan2:5
msgid "atan2 of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.atanh:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.atanh:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.atanh:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.atanh:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.atanh:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.atanh:1
msgid "Return the atanh of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.atanh:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.atanh:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.atanh:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.atanh:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.atanh:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.atanh:5
@@ -7587,6 +8743,7 @@ msgid "atanh of ``a``"
msgstr ""
#: of
+#: tensornetwork.backends.abstract_backend.AbstractBackend.broadcast_left_multiplication:1
#: tensornetwork.backends.jax.jax_backend.JaxBackend.broadcast_left_multiplication:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.broadcast_left_multiplication:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.broadcast_left_multiplication:1
@@ -7600,6 +8757,8 @@ msgid ""
msgstr ""
#: of
+#: tensornetwork.backends.abstract_backend.AbstractBackend.broadcast_left_multiplication:8
+#: tensornetwork.backends.abstract_backend.AbstractBackend.broadcast_right_multiplication:8
#: tensornetwork.backends.jax.jax_backend.JaxBackend.broadcast_left_multiplication:8
#: tensornetwork.backends.jax.jax_backend.JaxBackend.broadcast_right_multiplication:8
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.broadcast_left_multiplication:8
@@ -7612,6 +8771,7 @@ msgid "The result of multiplying `tensor1` onto `tensor2`."
msgstr ""
#: of
+#: tensornetwork.backends.abstract_backend.AbstractBackend.broadcast_right_multiplication:1
#: tensornetwork.backends.jax.jax_backend.JaxBackend.broadcast_right_multiplication:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.broadcast_right_multiplication:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.broadcast_right_multiplication:1
@@ -7624,7 +8784,8 @@ msgid ""
"tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cast:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.cast:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.cast:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.cast:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.cast:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.cast:1
@@ -7632,6 +8793,10 @@ msgid "Cast the tensor dtype of a ``a``."
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.sizen:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.cast:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.imag:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.real:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.size:3
#: tensorcircuit.backends.jax_backend.JaxBackend.cast:3
#: tensorcircuit.backends.jax_backend.JaxBackend.imag:3
#: tensorcircuit.backends.jax_backend.JaxBackend.real:3
@@ -7651,21 +8816,24 @@ msgstr ""
msgid "tensor"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cast:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.cast:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.cast:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.cast:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.cast:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.cast:5
msgid "\"float32\", \"float64\", \"complex64\", \"complex128\""
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cast:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.cast:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.cast:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.cast:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.cast:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.cast:7
msgid "``a`` of new dtype"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.concat:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.concat:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.concat:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.concat:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.concat:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.concat:1
@@ -7676,6 +8844,10 @@ msgstr ""
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.implicit_randu:5
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randn:9
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randu:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.concat:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn:9
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:31
#: tensorcircuit.backends.jax_backend.JaxBackend.concat:5
#: tensorcircuit.backends.jax_backend.JaxBackend.implicit_randn:5
#: tensorcircuit.backends.jax_backend.JaxBackend.implicit_randu:5
@@ -7695,7 +8867,8 @@ msgstr ""
msgid "[description], defaults to 0"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cond:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.cond:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.cond:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.cond:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.cond:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.cond:1
@@ -7704,14 +8877,16 @@ msgid ""
"functionality of ``jax.lax.cond``."
msgstr ""
-#: of tensorcircuit.backends.pytorch_backend._conj_torch:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.conj:1
+#: tensorcircuit.backends.pytorch_backend._conj_torch:1
#: tensornetwork.backends.jax.jax_backend.JaxBackend.conj:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.conj:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.conj:1
msgid "Return the complex conjugate of `tensor` :param tensor: A tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.convert_to_tensor:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.convert_to_tensor:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.convert_to_tensor:1
#: tensorcircuit.backends.numpy_backend._convert_to_tensor_numpy:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.convert_to_tensor:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.convert_to_tensor:1
@@ -7720,6 +8895,7 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.coo_sparse_matrix:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.coo_sparse_matrix:1
#: tensorcircuit.backends.jax_backend.JaxBackend.coo_sparse_matrix:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.coo_sparse_matrix:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.coo_sparse_matrix:1
@@ -7730,6 +8906,7 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.coo_sparse_matrix:4
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.coo_sparse_matrix:4
#: tensorcircuit.backends.jax_backend.JaxBackend.coo_sparse_matrix:4
#: tensorcircuit.backends.numpy_backend.NumpyBackend.coo_sparse_matrix:4
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.coo_sparse_matrix:4
@@ -7738,6 +8915,7 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.coo_sparse_matrix:6
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.coo_sparse_matrix:6
#: tensorcircuit.backends.jax_backend.JaxBackend.coo_sparse_matrix:6
#: tensorcircuit.backends.numpy_backend.NumpyBackend.coo_sparse_matrix:6
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.coo_sparse_matrix:6
@@ -7746,6 +8924,7 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.coo_sparse_matrix:8
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.coo_sparse_matrix:8
#: tensorcircuit.backends.jax_backend.JaxBackend.coo_sparse_matrix:8
#: tensorcircuit.backends.numpy_backend.NumpyBackend.coo_sparse_matrix:8
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.coo_sparse_matrix:8
@@ -7767,49 +8946,56 @@ msgstr ""
msgid "SparseTensor in backend format"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.copy:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.copy:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.copy:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.copy:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.copy:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.copy:1
msgid "Return the copy of ``a``, matrix exponential."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.copy:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.copy:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.copy:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.copy:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.copy:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.copy:5
msgid "matrix exponential of matrix ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cos:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.cos:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.cos:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.cos:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.cos:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.cos:1
msgid "Return cos of `tensor`. :param tensor: A tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cosh:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.cosh:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.cosh:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.cosh:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.cosh:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.cosh:1
msgid "Return the cosh of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cosh:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.cosh:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.cosh:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.cosh:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.cosh:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.cosh:5
msgid "cosh of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cumsum:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.cumsum:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.cumsum:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.cumsum:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.cumsum:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.cumsum:1
msgid "Return the cumulative sum of the elements along a given axis."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.cumsum:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.cumsum:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.cumsum:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.cumsum:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.cumsum:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.cumsum:5
@@ -7836,14 +9022,17 @@ msgstr ""
msgid "The tensor object represented by the string."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.device:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.device:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.device:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.device:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.device:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.device:1
msgid "get the universal device str for the tensor, in the format of tf"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.device:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.device:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.device_move:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.device:3
#: tensorcircuit.backends.jax_backend.JaxBackend.device_move:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.device:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.device_move:3
@@ -7855,35 +9044,40 @@ msgstr ""
msgid "the tensor"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.device:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.device:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.device:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.device:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.device:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.device:5
msgid "device str where the tensor lives on"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.device_move:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.device_move:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.device_move:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.device_move:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.device_move:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.device_move:1
msgid "move tensor ``a`` to device ``dev``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.device_move:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.device_move:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.device_move:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.device_move:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.device_move:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.device_move:5
msgid "device str or device obj in corresponding backend"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.device_move:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.device_move:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.device_move:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.device_move:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.device_move:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.device_move:7
msgid "the tensor on new device"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagflat:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagflat:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagflat:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagflat:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.diagflat:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.diagflat:1
@@ -7893,21 +9087,24 @@ msgid ""
"which to place its elements."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagflat:6
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagflat:6
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagflat:6
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagflat:6
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.diagflat:6
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.diagflat:6
msgid "A new tensor with all zeros save the specified diagonal."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.diagonal:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.diagonal:1
msgid "Return specified diagonals."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:3
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:3
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.diagonal:3
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.diagonal:3
@@ -7921,7 +9118,8 @@ msgid ""
"diagonals."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:11
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:11
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:11
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:11
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.diagonal:11
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.diagonal:11
@@ -7930,7 +9128,14 @@ msgid ""
"matrices from vectors, use diagflat."
msgstr ""
-#: of tensorcircuit.backends.tensorflow_backend._tensordot_tf:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.reshape:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.shape_tuple:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.tensordot:3
+#: tensorcircuit.backends.tensorflow_backend._tensordot_tf:3
+#: tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:14
+#: tensornetwork.backends.abstract_backend.AbstractBackend.shape_tensor:3
+#: tensornetwork.backends.abstract_backend.AbstractBackend.slice:3
+#: tensornetwork.backends.abstract_backend.AbstractBackend.trace:10
#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:14
#: tensornetwork.backends.jax.jax_backend.JaxBackend.reshape:3
#: tensornetwork.backends.jax.jax_backend.JaxBackend.shape_tensor:3
@@ -7961,7 +9166,9 @@ msgstr ""
msgid "A tensor."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:15
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:15
+#: tensornetwork.backends.abstract_backend.AbstractBackend.trace:11
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:15
#: tensornetwork.backends.jax.jax_backend.JaxBackend.trace:11
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:15
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.trace:11
@@ -7970,16 +9177,21 @@ msgstr ""
msgid "Offset of the diagonal from the main diagonal."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:16
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:19
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.trace:12
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.trace:15
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:16
+#: tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:19
+#: tensornetwork.backends.abstract_backend.AbstractBackend.trace:12
+#: tensornetwork.backends.abstract_backend.AbstractBackend.trace:15
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:16
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:19
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.trace:12
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.trace:15
msgid ""
"Axis to be used as the first/second axis of the 2D sub-arrays from which "
-"the diagonals should be taken. Defaults to second last/last axis."
+"the diagonals should be taken. Defaults to second-last/last axis."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:23
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:23
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:23
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:23
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.diagonal:25
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.diagonal:33
@@ -7988,21 +9200,24 @@ msgid ""
"batched diagonals."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:25
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:25
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:25
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:25
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.diagonal:27
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.diagonal:35
msgid "A dim = min(1, tensor.ndim - 2) tensor storing"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:26
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.diagonal:26
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:26
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:26
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.diagonal:28
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.diagonal:36
msgid "the batched diagonals."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.divide:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.divide:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.divide:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.divide:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.divide:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.divide:1
@@ -8011,145 +9226,77 @@ msgid ""
"implementation. :param tensor1: A tensor. :param tensor2: A tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.dtype:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.dtype:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.dtype:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.dtype:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.dtype:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.dtype:1
msgid "Obtain dtype string for tensor ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.dtype:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.dtype:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.dtype:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.dtype:3
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.dtype:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.dtype:3
msgid "The tensor"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.dtype:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.dtype:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.dtype:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.dtype:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.dtype:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.dtype:5
msgid "dtype str, such as \"complex64\""
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigh:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigh:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigh:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigh:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigh:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.eigh:1
msgid "Compute eigenvectors and eigenvalues of a hermitian matrix."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigh:3
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigh:3
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigh:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigh:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigh:3
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.eigh:3
msgid "A symetric matrix."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigh:5
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigh:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigh:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigh:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigh:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.eigh:5
msgid "The eigenvalues in ascending order. Tensor: The eigenvectors."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:1
msgid ""
-"Implicitly restarted Arnoldi method for finding the lowest eigenvector-"
-"eigenvalue pairs of a linear operator `A`. `A` is a function implementing"
-" the matrix-vector product."
+"Arnoldi method for finding the lowest eigenvector-eigenvalue pairs of a "
+"linear operator `A`. `A` is a callable implementing the matrix-vector "
+"product. If no `initial_state` is provided then `shape` and `dtype` have "
+"to be passed so that a suitable initial state can be randomly generated."
+" :param A: A (sparse) implementation of a linear operator :param arsg: A "
+"list of arguments to `A`. `A` will be called as"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:6
-msgid ""
-"WARNING: This routine uses jax.jit to reduce runtimes. jitting is "
-"triggered at the first invocation of `eigs`, and on any subsequent calls "
-"if the python `id` of `A` changes, even if the formal definition of `A` "
-"stays the same. Example: the following will jit once at the beginning, "
-"and then never again:"
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:8
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:8
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:8
+msgid "`res = A(initial_state, *args)`."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:12
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:12
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:10
-msgid "```python import jax import numpy as np def A(H,x):"
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:16
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:31
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:16
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:31
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:14
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:29
-msgid "return jax.np.dot(H,x)"
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:19
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:19
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:17
-msgid "for n in range(100):"
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:18
-msgid ""
-"H = jax.np.array(np.random.rand(10,10)) x = "
-"jax.np.array(np.random.rand(10,10)) res = eigs(A, [H],x) #jitting is "
-"triggerd only at `n=0`"
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:23
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:23
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:21
-msgid ""
-"The following code triggers jitting at every iteration, which results in "
-"considerably reduced performance"
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:26
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:26
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:24
-msgid "```python import jax import numpy as np for n in range(100):"
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:30
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:30
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:28
-msgid "def A(H,x):"
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:32
-msgid ""
-"H = jax.np.array(np.random.rand(10,10)) x = "
-"jax.np.array(np.random.rand(10,10)) res = eigs(A, [H],x) #jitting is "
-"triggerd at every step `n`"
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:37
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:37
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:35
-msgid ""
-"A (sparse) implementation of a linear operator. Call signature of `A` is "
-"`res = A(vector, *args)`, where `vector` can be an arbitrary `Tensor`, "
-"and `res.shape` has to be `vector.shape`."
-msgstr ""
-
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:6
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:40
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:40
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:38
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:9
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:6
-#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:6
-msgid ""
-"A list of arguments to `A`. `A` will be called as `res = "
-"A(initial_state, *args)`."
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:42
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:42
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:9
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:9
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:11
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:9
msgid ""
"An initial vector for the algorithm. If `None`, a random initial `Tensor`"
-" is created using the `backend.randn` method"
+" is created using the `numpy.random.randn` method."
msgstr ""
#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:12
@@ -8165,12 +9312,16 @@ msgstr ""
msgid "The shape of the input-dimension of `A`."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:45
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:45
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:43
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:13
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:13
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:11
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:15
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:13
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:11
+#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:11
msgid ""
-"The dtype of the input `A`. If no `initial_state` is provided, a random "
-"initial state with shape `shape` and dtype `dtype` is created."
+"The dtype of the input `A`. If both no `initial_state` is provided, a "
+"random initial state with shape `shape` and dtype `dtype` is created."
msgstr ""
#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:15
@@ -8186,111 +9337,121 @@ msgstr ""
msgid "The number of iterations (number of krylov vectors)."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:48
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:48
-msgid "The number of eigenvector-eigenvalue pairs to be computed."
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:49
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:49
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:16
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:16
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:14
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:18
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:16
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:14
+#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:14
msgid ""
-"The desired precision of the eigenvalues. For the jax backend this has "
-"currently no effect, and precision of eigenvalues is not guaranteed. This"
-" feature may be added at a later point. To increase precision the caller "
-"can either increase `maxiter` or `num_krylov_vecs`."
+"The nummber of eigenvector-eigenvalue pairs to be computed. If `numeig > "
+"1`, `reorthogonalize` has to be `True`."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:53
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:53
-msgid ""
-"Flag for targetting different types of eigenvalues. Currently supported "
-"are `which = 'LR'` (larges real part) and `which = 'LM'` (larges "
-"magnitude)."
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:18
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:18
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:20
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:18
+msgid "The desired precision of the eigenvalus. Uses"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:56
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:56
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:19
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:19
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:19
msgid ""
-"Maximum number of restarts. For `maxiter=0` the routine becomes "
-"equivalent to a simple Arnoldi method."
+"['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'] Which `k` eigenvectors and "
+"eigenvalues to find: 'LM' : largest magnitude 'SM' : smallest "
+"magnitude 'LR' : largest real part 'SR' : smallest real part "
+"'LI' : largest imaginary part 'SI' : smallest imaginary part Note "
+"that not all of those might be supported by specialized backends."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:59
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:59
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:19
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:19
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:19
msgid ""
-"(eigvals, eigvecs) eigvals: A list of `numeig` eigenvalues eigvecs: A "
-"list of `numeig` eigenvectors"
+"['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'] Which `k` eigenvectors and "
+"eigenvalues to find:"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:33
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:62
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:62
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:66
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:33
-#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:33
-msgid "(eigvals, eigvecs)"
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:21
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:21
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:21
+msgid ""
+"'LM' : largest magnitude 'SM' : smallest magnitude 'LR' : largest real "
+"part 'SR' : smallest real part 'LI' : largest imaginary part 'SI' : "
+"smallest imaginary part"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:62
-#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:62
-msgid ""
-"eigvals: A list of `numeig` eigenvalues eigvecs: A list of `numeig` "
-"eigenvectors"
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:27
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:27
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:27
+msgid "Note that not all of those might be supported by specialized backends."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:1
-msgid ""
-"Implicitly restarted Lanczos method for finding the lowest eigenvector-"
-"eigenvalue pairs of a symmetric (hermitian) linear operator `A`. `A` is a"
-" function implementing the matrix-vector product."
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:28
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:28
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:28
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:28
+msgid "The maximum number of iterations."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:6
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:30
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:30
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:30
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:30
msgid ""
-"WARNING: This routine uses jax.jit to reduce runtimes. jitting is "
-"triggered at the first invocation of `eigsh`, and on any subsequent calls"
-" if the python `id` of `A` changes, even if the formal definition of `A` "
-"stays the same. Example: the following will jit once at the beginning, "
-"and then never again:"
+"An array of `numeig` lowest eigenvalues `list`: A list of `numeig` lowest"
+" eigenvectors"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:18
-msgid ""
-"H = jax.np.array(np.random.rand(10,10)) x = "
-"jax.np.array(np.random.rand(10,10)) res = eigsh(A, [H],x) #jitting is "
-"triggerd only at `n=0`"
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:32
+#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:32
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:32
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:12
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:10
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.position:10
+#: tensornetwork.matrixproductstates.finite_mps.FiniteMPS.canonicalize:9
+msgid "`Tensor`"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:32
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:1
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:1
msgid ""
-"H = jax.np.array(np.random.rand(10,10)) x = "
-"jax.np.array(np.random.rand(10,10)) res = eigsh(A, [H],x) #jitting is "
-"triggerd at every step `n`"
+"Lanczos method for finding the lowest eigenvector-eigenvalue pairs of a "
+"symmetric (hermitian) linear operator `A`. `A` is a callable implementing"
+" the matrix-vector product. If no `initial_state` is provided then "
+"`shape` and `dtype` have to be passed so that a suitable initial state "
+"can be randomly generated. :param A: A (sparse) implementation of a "
+"linear operator :param arsg: A list of arguments to `A`. `A` will be "
+"called as"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:1
msgid ""
-"Lanczos method for finding the lowest eigenvector-eigenvalue pairs of a "
-"hermitian linear operator `A`. `A` is a function implementing the matrix-"
-"vector product. WARNING: This routine uses jax.jit to reduce runtimes. "
-"jitting is triggered at the first invocation of `eigsh_lanczos`, and on "
-"any subsequent calls if the python `id` of `A` changes, even if the "
-"formal definition of `A` stays the same. Example: the following will jit "
-"once at the beginning, and then never again:"
+"Lanczos method for finding the lowest eigenvector-eigenvalue pairs of "
+"`A`. :param A: A (sparse) implementation of a linear operator."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:16
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:4
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:4
+#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:4
msgid ""
-"H = jax.np.array(np.random.rand(10,10)) x = "
-"jax.np.array(np.random.rand(10,10)) res = eigsh_lanczos(A, [H],x) "
-"#jitting is triggerd only at `n=0`"
+"Call signature of `A` is `res = A(vector, *args)`, where `vector` can be "
+"an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:30
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:6
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:40
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:40
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:38
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:9
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:6
+#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:6
msgid ""
-"H = jax.np.array(np.random.rand(10,10)) x = "
-"jax.np.array(np.random.rand(10,10)) res = eigsh_lanczos(A, [H],x) "
-"#jitting is triggerd at every step `n`"
+"A list of arguments to `A`. `A` will be called as `res = "
+"A(initial_state, *args)`."
msgstr ""
#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:8
@@ -8301,18 +9462,12 @@ msgid ""
"`Tensor` is created using the `backend.randn` method"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:46
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:16
msgid ""
-"The number of eigenvector-eigenvalue pairs to be computed. If `numeig > "
-"1`, `reorthogonalize` has to be `True`."
-msgstr ""
-
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:48
-msgid ""
-"The desired precision of the eigenvalues. For the jax backend this has "
-"currently no effect, and precision of eigenvalues is not guaranteed. This"
-" feature may be added at a later point. To increase precision the caller "
-"can increase `num_krylov_vecs`."
+"The desired precision of the eigenvalus. Uses "
+"`backend.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol` as "
+"stopping criterion between two diagonalization steps of the tridiagonal "
+"operator."
msgstr ""
#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:20
@@ -8326,11 +9481,12 @@ msgid ""
"found."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:57
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:25
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:25
+#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:25
msgid ""
"The tridiagonal Operator is diagonalized every `ndiag` iterations to "
-"check convergence. This has currently no effect for the jax backend, but "
-"may be added at a later point."
+"check convergence."
msgstr ""
#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:27
@@ -8342,54 +9498,73 @@ msgid ""
"orthogonalization (more costly than `reorthogonalize=False`)"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:63
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:30
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:30
+#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:30
msgid ""
-"(eigvals, eigvecs) eigvals: A jax-array containing `numeig` lowest "
-"eigenvalues eigvecs: A list of `numeig` lowest eigenvectors"
+"(eigvals, eigvecs) eigvals: A list of `numeig` lowest eigenvalues "
+"eigvecs: A list of `numeig` lowest eigenvectors"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:66
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:33
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:62
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:62
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:66
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:33
+#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:33
+msgid "(eigvals, eigvecs)"
+msgstr ""
+
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:33
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:33
+#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:33
msgid ""
-"eigvals: A jax-array containing `numeig` lowest eigenvalues eigvecs: A "
-"list of `numeig` lowest eigenvectors"
+"eigvals: A list of `numeig` lowest eigenvalues eigvecs: A list of "
+"`numeig` lowest eigenvectors"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.eigvalsh:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.eigvalsh:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.eigvalsh:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.eigvalsh:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.eigvalsh:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.eigvalsh:1
msgid "Get the eigenvalues of matrix ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.eigvalsh:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.eigvalsh:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.eigvalsh:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.eigvalsh:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.eigvalsh:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.eigvalsh:5
msgid "eigenvalues of ``a``"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.einsum:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.einsum:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.einsum:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.einsum:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.einsum:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.einsum:1
msgid "Calculate sum of products of tensors according to expression."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eps:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eps:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eps:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eps:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eps:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.eps:1
msgid "Return machine epsilon for given `dtype`"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eps:3
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eps:3
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eps:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eps:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eps:3
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.eps:3
msgid "A dtype."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eps:5
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.eps:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eps:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eps:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eps:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.eps:5
@@ -8403,21 +9578,24 @@ msgstr ""
msgid "Return elementwise exp of `tensor`. :param tensor: A tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.expm:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.expm:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.expm:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.expm:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.expm:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.expm:1
msgid "Return expm log of `matrix`, matrix exponential. :param matrix: A tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.eye:4
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.eye:4
+#: tensorcircuit.backends.jax_backend.JaxBackend.eye:4
#: tensorcircuit.backends.numpy_backend.NumpyBackend.eye:4
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.eye:4
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.eye:4
msgid "Return an identity matrix of dimension `dim`"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.eye:2
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.eye:2
+#: tensorcircuit.backends.jax_backend.JaxBackend.eye:2
#: tensorcircuit.backends.numpy_backend.NumpyBackend.eye:2
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.eye:2
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.eye:2
@@ -8427,7 +9605,9 @@ msgid ""
"Block-sparse behavior is currently not supported"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.eye:6
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.eye:6
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.eye:9
+#: tensorcircuit.backends.jax_backend.JaxBackend.eye:6
#: tensorcircuit.backends.jax_backend.JaxBackend.eye:9
#: tensorcircuit.backends.numpy_backend.NumpyBackend.eye:6
#: tensorcircuit.backends.numpy_backend.NumpyBackend.eye:9
@@ -8438,7 +9618,8 @@ msgstr ""
msgid "The dimension of the returned matrix."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.eye:8
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.eye:8
+#: tensorcircuit.backends.jax_backend.JaxBackend.eye:8
#: tensorcircuit.backends.numpy_backend.NumpyBackend.eye:8
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.eye:8
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.eye:8
@@ -8633,14 +9814,17 @@ msgid ""
"convergence was achieved, the number of restarts otherwise."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.grad:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.grad:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.grad:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.grad:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.grad:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.grad:1
msgid "Return the function which is the grad function of input ``f``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.grad:13
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.grad:13
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.value_and_grad:13
+#: tensorcircuit.backends.jax_backend.JaxBackend.grad:13
#: tensorcircuit.backends.jax_backend.JaxBackend.value_and_grad:13
#: tensorcircuit.backends.numpy_backend.NumpyBackend.grad:13
#: tensorcircuit.backends.numpy_backend.NumpyBackend.value_and_grad:13
@@ -8651,7 +9835,9 @@ msgstr ""
msgid "the function to be differentiated"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.grad:15
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.grad:15
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.value_and_grad:15
+#: tensorcircuit.backends.jax_backend.JaxBackend.grad:15
#: tensorcircuit.backends.jax_backend.JaxBackend.value_and_grad:15
#: tensorcircuit.backends.numpy_backend.NumpyBackend.grad:15
#: tensorcircuit.backends.numpy_backend.NumpyBackend.value_and_grad:15
@@ -8664,42 +9850,48 @@ msgid ""
"be 0"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.grad:17
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.grad:17
+#: tensorcircuit.backends.jax_backend.JaxBackend.grad:17
#: tensorcircuit.backends.numpy_backend.NumpyBackend.grad:17
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.grad:17
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.grad:17
msgid "the grad function of ``f`` with the same set of arguments as ``f``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.i:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.i:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.i:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.i:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.i:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.i:1
msgid "Return 1.j in as a tensor compatible with the backend."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.i:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.i:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.i:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.i:3
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.i:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.i:3
msgid "\"complex64\" or \"complex128\""
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.i:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.i:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.i:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.i:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.i:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.i:5
msgid "1.j tensor"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.imag:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.imag:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.imag:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.imag:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.imag:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.imag:1
msgid "Return the elementwise imaginary value of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.imag:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.imag:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.imag:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.imag:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.imag:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.imag:5
@@ -8709,6 +9901,8 @@ msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.implicit_randc:1
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randc:1
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randn:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randc:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn:1
#: tensorcircuit.backends.jax_backend.JaxBackend.implicit_randc:1
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randc:1
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randn:1
@@ -8721,6 +9915,7 @@ msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.implicit_randc:5
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randc:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randc:5
#: tensorcircuit.backends.jax_backend.JaxBackend.implicit_randc:5
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randc:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stateful_randc:5
@@ -8730,6 +9925,7 @@ msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.implicit_randc:7
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randc:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randc:7
#: tensorcircuit.backends.jax_backend.JaxBackend.implicit_randc:7
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randc:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stateful_randc:7
@@ -8739,6 +9935,7 @@ msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.implicit_randc:9
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randc:9
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randc:9
#: tensorcircuit.backends.jax_backend.JaxBackend.implicit_randc:9
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randc:9
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stateful_randc:9
@@ -8763,6 +9960,8 @@ msgstr ""
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.implicit_randu:7
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randn:11
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randu:9
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn:11
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu:9
#: tensorcircuit.backends.jax_backend.JaxBackend.implicit_randn:3
#: tensorcircuit.backends.jax_backend.JaxBackend.implicit_randn:7
#: tensorcircuit.backends.jax_backend.JaxBackend.implicit_randu:3
@@ -8783,28 +9982,32 @@ msgstr ""
msgid "[description], defaults to \"32\""
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.index_update:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.index_update:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.index_update:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.index_update:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.index_update:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.index_update:1
msgid "Update `tensor` at elements defined by `mask` with value `assignee`."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.index_update:3
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.index_update:3
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.index_update:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.index_update:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.index_update:3
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.index_update:3
msgid "A `Tensor` object."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.index_update:4
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.index_update:4
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.index_update:4
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.index_update:4
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.index_update:4
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.index_update:4
msgid "A boolean mask."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.index_update:5
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.index_update:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.index_update:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.index_update:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.index_update:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.index_update:5
@@ -8813,21 +10016,24 @@ msgid ""
"`mask` is `True`."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.inv:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.inv:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.inv:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.inv:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.inv:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.inv:1
msgid "Compute the matrix inverse of `matrix`."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.inv:3
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.inv:3
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.inv:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.inv:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.inv:3
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.inv:3
msgid "A matrix."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.inv:5
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.inv:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.inv:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.inv:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.inv:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.inv:5
@@ -8835,6 +10041,7 @@ msgid "The inverse of `matrix`"
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.is_sparse:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.is_sparse:1
#: tensorcircuit.backends.jax_backend.JaxBackend.is_sparse:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.is_sparse:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.is_sparse:1
@@ -8842,6 +10049,7 @@ msgid "Determine whether the type of input ``a`` is ``sparse``."
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.is_sparse:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.is_sparse:3
#: tensorcircuit.backends.jax_backend.JaxBackend.is_sparse:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.is_sparse:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.is_sparse:3
@@ -8849,48 +10057,55 @@ msgid "input matrix ``a``"
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.is_sparse:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.is_sparse:5
#: tensorcircuit.backends.jax_backend.JaxBackend.is_sparse:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.is_sparse:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.is_sparse:5
msgid "a bool indicating whether the matrix ``a`` is sparse"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.is_tensor:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.is_tensor:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.is_tensor:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.is_tensor:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.is_tensor:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.is_tensor:1
msgid "Return a boolean on whether ``a`` is a tensor in backend package."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.is_tensor:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.is_tensor:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.is_tensor:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.is_tensor:3
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.is_tensor:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.is_tensor:3
msgid "a tensor to be determined"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.is_tensor:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.is_tensor:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.is_tensor:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.is_tensor:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.is_tensor:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.is_tensor:5
msgid "whether ``a`` is a tensor"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.item:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.item:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.item:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.item:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.item:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.item:1
msgid "Return the item of a 1-element tensor."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.item:3
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.item:3
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.item:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.item:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.item:3
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.item:3
msgid "A 1-element tensor"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.item:5
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.item:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.item:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.item:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.item:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.item:5
@@ -8926,7 +10141,8 @@ msgstr ""
msgid "outer tuple for input args, inner tuple for outputs"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.jit:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.jit:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.jit:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.jit:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.jit:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.jit:1
@@ -8936,14 +10152,16 @@ msgid ""
"Arguments to `fun`. :param kwargs: Keyword arguments to `fun`."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.jit:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.jit:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.jit:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.jit:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.jit:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.jit:7
msgid "jitted/graph-compiled version of `fun`, or just `fun`."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.jvp:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.jvp:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.jvp:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.jvp:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.jvp:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.jvp:1
@@ -8952,14 +10170,17 @@ msgid ""
" Strictly speaking, this function is value_and_jvp."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.jvp:4
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.jvp:4
+#: tensorcircuit.backends.jax_backend.JaxBackend.jvp:4
#: tensorcircuit.backends.numpy_backend.NumpyBackend.jvp:4
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.jvp:4
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.jvp:4
msgid "The function to compute jvp"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.jvp:6
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.jvp:6
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vjp:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.jvp:6
#: tensorcircuit.backends.jax_backend.JaxBackend.vjp:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.jvp:6
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vjp:7
@@ -8970,14 +10191,16 @@ msgstr ""
msgid "input for ``f``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.jvp:8
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.jvp:8
+#: tensorcircuit.backends.jax_backend.JaxBackend.jvp:8
#: tensorcircuit.backends.numpy_backend.NumpyBackend.jvp:8
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.jvp:8
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.jvp:8
msgid "tangents"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.jvp:10
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.jvp:10
+#: tensorcircuit.backends.jax_backend.JaxBackend.jvp:10
#: tensorcircuit.backends.numpy_backend.NumpyBackend.jvp:10
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.jvp:10
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.jvp:10
@@ -8986,28 +10209,34 @@ msgid ""
"output of ``f``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.kron:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.kron:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.kron:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.kron:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.kron:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.kron:1
msgid "Return the kronecker product of two matrices ``a`` and ``b``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.kron:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.kron:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.kron:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.kron:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.kron:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.kron:7
msgid "kronecker product of ``a`` and ``b``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.left_shift:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.left_shift:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.left_shift:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.left_shift:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.left_shift:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.left_shift:1
msgid "Shift the bits of an integer x to the left y bits."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.left_shift:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.left_shift:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.mod:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.right_shift:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.left_shift:3
#: tensorcircuit.backends.jax_backend.JaxBackend.mod:3
#: tensorcircuit.backends.jax_backend.JaxBackend.right_shift:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.left_shift:3
@@ -9022,7 +10251,9 @@ msgstr ""
msgid "input values"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.left_shift:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.left_shift:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.right_shift:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.left_shift:5
#: tensorcircuit.backends.jax_backend.JaxBackend.right_shift:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.left_shift:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.right_shift:5
@@ -9033,7 +10264,9 @@ msgstr ""
msgid "Number of bits shift to ``x``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.left_shift:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.left_shift:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.right_shift:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.left_shift:7
#: tensorcircuit.backends.jax_backend.JaxBackend.right_shift:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.left_shift:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.right_shift:7
@@ -9052,6 +10285,7 @@ msgid "Return elementwise natural logarithm of `tensor`. :param tensor: A tensor
msgstr ""
#: of tensorcircuit.backends.tensorflow_backend._matmul_tf:1
+#: tensornetwork.backends.abstract_backend.AbstractBackend.matmul:1
#: tensornetwork.backends.jax.jax_backend.JaxBackend.matmul:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.matmul:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.matmul:1
@@ -9062,6 +10296,7 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.tensorflow_backend._matmul_tf:5
+#: tensornetwork.backends.abstract_backend.AbstractBackend.matmul:5
#: tensornetwork.backends.jax.jax_backend.JaxBackend.matmul:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.matmul:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.matmul:5
@@ -9069,6 +10304,7 @@ msgid "matrices."
msgstr ""
#: of tensorcircuit.backends.tensorflow_backend._matmul_tf:6
+#: tensornetwork.backends.abstract_backend.AbstractBackend.matmul:6
#: tensornetwork.backends.jax.jax_backend.JaxBackend.matmul:6
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.matmul:6
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.matmul:6
@@ -9078,6 +10314,7 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.tensorflow_backend._matmul_tf:8
+#: tensornetwork.backends.abstract_backend.AbstractBackend.matmul:8
#: tensornetwork.backends.jax.jax_backend.JaxBackend.matmul:8
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.matmul:8
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.matmul:8
@@ -9087,20 +10324,24 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.tensorflow_backend._matmul_tf:12
+#: tensornetwork.backends.abstract_backend.AbstractBackend.matmul:12
#: tensornetwork.backends.jax.jax_backend.JaxBackend.matmul:12
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.matmul:12
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.matmul:12
msgid "The result of performing the matmul."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.max:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.max:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.max:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.max:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.max:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.max:1
msgid "Return the maximum of an array or maximum along an axis."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.max:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.max:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.min:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.max:5
#: tensorcircuit.backends.jax_backend.JaxBackend.min:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.max:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.min:5
@@ -9112,42 +10353,49 @@ msgstr ""
msgid "[description], defaults to None"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.mean:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.mean:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.mean:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.mean:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.mean:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.mean:1
msgid "Compute the arithmetic mean for ``a`` along the specified ``axis``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.mean:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.mean:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.mean:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.mean:3
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.mean:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.mean:3
msgid "tensor to take average"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.mean:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.mean:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.mean:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.mean:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.mean:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.mean:5
msgid "the axis to take mean, defaults to None indicating sum over flatten array"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.mean:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.mean:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.mean:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.mean:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.mean:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.mean:7
+#: tensorcircuit.interfaces.torch.torch_interface_kws:26
msgid "_description_, defaults to False"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.min:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.min:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.min:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.min:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.min:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.min:1
msgid "Return the minimum of an array or minimum along an axis."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.mod:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.mod:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.mod:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.mod:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.mod:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.mod:1
@@ -9156,28 +10404,32 @@ msgid ""
"consistent)"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.mod:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.mod:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.mod:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.mod:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.mod:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.mod:5
msgid "mod ``y``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.mod:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.mod:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.mod:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.mod:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.mod:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.mod:7
msgid "results"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.multiply:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.multiply:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.multiply:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.multiply:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.multiply:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.multiply:1
msgid "Return the default multiplication of `tensor`."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.multiply:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.multiply:3
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.multiply:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.multiply:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.multiply:3
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.multiply:3
@@ -9186,14 +10438,16 @@ msgid ""
":param tensor2: A tensor."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.norm:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.norm:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.norm:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.norm:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.norm:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.norm:1
msgid "Calculate the L2-norm of the elements of `tensor`"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.numpy:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.numpy:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.numpy:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.numpy:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.numpy:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.numpy:1
@@ -9202,7 +10456,8 @@ msgid ""
"function."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.numpy:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.numpy:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.numpy:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.numpy:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.numpy:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.numpy:5
@@ -9213,7 +10468,8 @@ msgstr ""
msgid "See doc for :py:meth:`onehot`"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.onehot:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.onehot:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.onehot:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.onehot:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.onehot:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.onehot:1
@@ -9223,28 +10479,32 @@ msgid ""
"one:"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.onehot:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.onehot:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.onehot:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.onehot:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.onehot:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.onehot:5
msgid "input tensor"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.onehot:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.onehot:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.onehot:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.onehot:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.onehot:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.onehot:7
msgid "number of features in onehot dimension"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.onehot:9
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.onehot:9
+#: tensorcircuit.backends.jax_backend.JaxBackend.onehot:9
#: tensorcircuit.backends.numpy_backend.NumpyBackend.onehot:9
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.onehot:9
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.onehot:9
msgid "onehot tensor with the last extra dimension"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.ones:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.ones:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.ones:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.ones:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.ones:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.ones:1
@@ -9256,7 +10516,8 @@ msgid ""
"shape: int :param dtype: The dtype of the returned matrix."
msgstr ""
-#: of tensorcircuit.backends.tensorflow_backend._outer_product_tf:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.outer_product:1
+#: tensorcircuit.backends.tensorflow_backend._outer_product_tf:1
#: tensornetwork.backends.jax.jax_backend.JaxBackend.outer_product:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.outer_product:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.outer_product:1
@@ -9287,30 +10548,71 @@ msgstr ""
msgid "The pivoted tensor."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:7
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:7
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:7
+msgid "Returns the exponentiation of tensor a raised to b."
+msgstr ""
+
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:4
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:4
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:4
+msgid "If b is a tensor, then the exponentiation is element-wise"
+msgstr ""
+
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:3
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:3
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:3
msgid ""
-"Returns the power of tensor a to the value of b. In the case b is a "
-"tensor, then the power is by element"
+"between the two tensors, with a as the base and b as the power. Note that"
+" a and b must be broadcastable to the same shape if b is a tensor."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:3
-msgid "with a as the base and b as the exponent."
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:7
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:7
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:7
+msgid "If b is a scalar, then the exponentiation is each value in a"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:5
-msgid "In the case b is a scalar, then the power of each value in a"
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:7
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:7
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:7
+msgid "raised to the power of b."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:5
-msgid "is raised to the exponent of b."
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:9
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:9
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:9
+msgid "The tensor containing the bases."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:7
-msgid "The tensor that contains the base."
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:10
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:10
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:10
+msgid "The tensor containing the powers; or a single scalar as the power."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:8
-msgid "The tensor that contains the exponent or a single scalar."
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:12
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:12
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:12
+msgid ""
+"The tensor that is each element of a raised to the power of b. Note "
+"that the shape of the returned tensor is that produced by the broadcast"
+" of a and b."
+msgstr ""
+
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:15
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:15
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:15
+msgid "The tensor that is each element of a raised to the"
+msgstr ""
+
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:15
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:15
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:15
+msgid ""
+"power of b. Note that the shape of the returned tensor is that produced "
+"by the broadcast of a and b."
msgstr ""
#: of
@@ -9350,13 +10652,13 @@ msgstr ""
msgid "The drawn sample as an int tensor"
msgstr ""
-#: of tensorcircuit.backends.jax_backend._qr_jax:1
-msgid ""
-"Computes the QR decomposition of a tensor. See "
-"tensornetwork.backends.tensorflow.decompositions for details."
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.qr:1
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.qr:1
+msgid "Computes the QR decomposition of a tensor."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.randn:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.randn:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.randn:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.randn:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.randn:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.randn:1
@@ -9366,7 +10668,8 @@ msgid ""
"`ShapeType` object (for block-sparse backends)."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.randn:5
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.randn:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.randn:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.randn:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.randn:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.randn:5
@@ -9386,14 +10689,16 @@ msgid ""
"utility to write backend agnostic code instead of doing magic things."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.random_uniform:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.random_uniform:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.random_uniform:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.random_uniform:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.random_uniform:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.random_uniform:1
msgid "Return a random uniform matrix of dimension `dim`."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.random_uniform:3
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.random_uniform:3
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.random_uniform:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.random_uniform:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.random_uniform:3
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.random_uniform:3
@@ -9407,28 +10712,32 @@ msgid ""
"random number generator"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.random_uniform:14
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.random_uniform:14
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.random_uniform:14
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.random_uniform:14
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.random_uniform:14
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.random_uniform:14
msgid "random uniform initialized tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.real:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.real:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.real:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.real:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.real:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.real:1
msgid "Return the elementwise real value of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.real:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.real:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.real:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.real:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.real:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.real:5
msgid "real value of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.relu:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.relu:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.relu:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.relu:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.relu:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.relu:1
@@ -9437,28 +10746,32 @@ msgid ""
"function:"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.relu:4
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.relu:4
+#: tensorcircuit.backends.jax_backend.JaxBackend.relu:4
#: tensorcircuit.backends.numpy_backend.NumpyBackend.relu:4
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.relu:4
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.relu:4
msgid "\\mathrm{relu}(x)=\\max(x,0)"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.relu:11
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.relu:11
+#: tensorcircuit.backends.jax_backend.JaxBackend.relu:11
#: tensorcircuit.backends.numpy_backend.NumpyBackend.relu:11
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.relu:11
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.relu:11
msgid "Tensor after relu"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.reshape:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.reshape:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.reshape:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.reshape:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.reshape:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.reshape:1
msgid "Reshape tensor to the given shape."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.reshape:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.reshape:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.reshape:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.reshape:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.reshape:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.reshape:5
@@ -9493,20 +10806,27 @@ msgstr ""
msgid "1D tensor in reverse order"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.right_shift:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.right_shift:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.right_shift:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.right_shift:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.right_shift:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.right_shift:1
msgid "Shift the bits of an integer x to the right y bits."
msgstr ""
-#: of tensorcircuit.backends.jax_backend._rq_jax:1
-msgid ""
-"Computes the RQ (reversed QR) decomposition of a tensor. See "
-"tensornetwork.backends.tensorflow.decompositions for details."
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.rq:1
+#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.rq:1
+msgid "Computes the RQ (reversed QR) decomposition of a tensor."
+msgstr ""
+
+#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.scan:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.scan:1
+#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scan:1
+msgid "This API follows ``tf.scan`` covention, i.e. no ys supported as jax"
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.scatter:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.scatter:1
#: tensorcircuit.backends.jax_backend.JaxBackend.scatter:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.scatter:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.scatter:1
@@ -9515,28 +10835,32 @@ msgid ""
"shape with rank 2 for now."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.searchsorted:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.searchsorted:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.searchsorted:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.searchsorted:1
msgid "Find indices where elements should be inserted to maintain order."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.searchsorted:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.searchsorted:3
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.searchsorted:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.searchsorted:3
msgid "input array sorted in ascending order"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.searchsorted:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.searchsorted:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.searchsorted:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.searchsorted:5
msgid "value to inserted"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.searchsorted:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.searchsorted:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.searchsorted:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.searchsorted:7
@@ -9546,7 +10870,8 @@ msgid ""
"return either 0 or N (where N is the length of a), defaults to \"left\""
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:12
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.searchsorted:12
+#: tensorcircuit.backends.jax_backend.JaxBackend.searchsorted:12
#: tensorcircuit.backends.numpy_backend.NumpyBackend.searchsorted:12
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.searchsorted:12
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.searchsorted:12
@@ -9561,7 +10886,7 @@ msgstr ""
msgid "Return a string that serializes the given tensor."
msgstr ""
-#: of
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sign:4
#: tensornetwork.backends.abstract_backend.AbstractBackend.serialize_tensor:3
#: tensornetwork.backends.jax.jax_backend.JaxBackend.sign:7
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.serialize_tensor:3
@@ -9579,6 +10904,7 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.set_random_state:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.set_random_state:1
#: tensorcircuit.backends.jax_backend.JaxBackend.set_random_state:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.set_random_state:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.set_random_state:1
@@ -9587,6 +10913,7 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.set_random_state:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.set_random_state:3
#: tensorcircuit.backends.jax_backend.JaxBackend.set_random_state:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.set_random_state:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.set_random_state:3
@@ -9595,6 +10922,7 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.set_random_state:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.set_random_state:5
#: tensorcircuit.backends.jax_backend.JaxBackend.set_random_state:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.set_random_state:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.set_random_state:5
@@ -9603,56 +10931,64 @@ msgid ""
" the state on the backend"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.shape_concat:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.shape_concat:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.shape_concat:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.shape_concat:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.shape_concat:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_concat:1
msgid "Concatenate a sequence of tensors together about the given axis."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.shape_prod:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.shape_prod:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.shape_prod:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.shape_prod:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.shape_prod:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_prod:1
msgid "Take the product of all of the elements in values"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.shape_tensor:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.shape_tensor:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.shape_tensor:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.shape_tensor:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.shape_tensor:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_tensor:1
msgid "Get the shape of a tensor."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.shape_tensor:5
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.shape_tensor:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.shape_tensor:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.shape_tensor:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.shape_tensor:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_tensor:5
msgid "The shape of the input tensor returned as another tensor."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.shape_tuple:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.shape_tuple:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.shape_tuple:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.shape_tuple:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.shape_tuple:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_tuple:1
msgid "Get the shape of a tensor as a tuple of integers."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.shape_tuple:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.shape_tuple:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.shape_tuple:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.shape_tuple:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.shape_tuple:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_tuple:5
msgid "The shape of the input tensor returned as a tuple of ints."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.sigmoid:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sigmoid:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.sigmoid:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.sigmoid:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.sigmoid:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sigmoid:1
msgid "Compute sigmoid of input ``a``"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.sign:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sign:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.sign:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.sign:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sign:1
msgid ""
@@ -9660,41 +10996,40 @@ msgid ""
"tensor[i] > 0, == 0, and < 0 respectively."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.sign:4
-msgid ""
-"For complex input the behaviour of this function may depend on the "
-"backend. The Jax backend version returns y[i] = x[i]/sqrt(x[i]^2)."
-msgstr ""
-
-#: of tensorcircuit.backends.jax_backend.JaxBackend.sin:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sin:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.sin:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.sin:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.sin:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sin:1
msgid "Return sin of `tensor`. :param tensor: A tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.sinh:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sinh:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.sinh:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.sinh:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.sinh:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sinh:1
msgid "Return the sinh of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.sinh:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sinh:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.sinh:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.sinh:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.sinh:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sinh:5
msgid "sinh of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.size:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.size:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.size:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.size:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.size:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.size:1
msgid "Return the total number of elements in ``a`` in tensor form."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.size:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.size:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.size:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.size:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.size:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.size:5
@@ -9709,28 +11044,32 @@ msgstr ""
msgid "the total number of elements in tensor ``a``"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.slice:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.slice:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.slice:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.slice:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.slice:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.slice:1
msgid "Obtains a slice of a tensor based on start_indices and slice_sizes."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.slice:4
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.slice:4
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.slice:4
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.slice:4
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.slice:4
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.slice:4
msgid "Tuple of integers denoting start indices of slice."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.slice:5
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.slice:5
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.slice:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.slice:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.slice:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.slice:5
msgid "Tuple of integers denoting size of slice along each axis."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.softmax:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.softmax:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.softmax:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.softmax:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.softmax:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.softmax:1
@@ -9739,14 +11078,16 @@ msgid ""
"range [0,1] such that the elements along axis sum to 1."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.softmax:4
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.softmax:4
+#: tensorcircuit.backends.jax_backend.JaxBackend.softmax:4
#: tensorcircuit.backends.numpy_backend.NumpyBackend.softmax:4
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.softmax:4
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.softmax:4
msgid "\\mathrm{softmax}(x) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.softmax:11
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.softmax:11
+#: tensorcircuit.backends.jax_backend.JaxBackend.softmax:11
#: tensorcircuit.backends.numpy_backend.NumpyBackend.softmax:11
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.softmax:11
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.softmax:11
@@ -9755,7 +11096,9 @@ msgid ""
"all axis sum."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.softmax:13
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.softmax:13
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stack:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.softmax:13
#: tensorcircuit.backends.jax_backend.JaxBackend.stack:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.softmax:13
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stack:7
@@ -9766,28 +11109,32 @@ msgstr ""
msgid "concatenated tensor"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.solve:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.solve:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.solve:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.solve:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.solve:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.solve:1
msgid "Solve the linear system Ax=b and return the solution x."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.solve:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.solve:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.solve:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.solve:3
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.solve:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.solve:3
msgid "The multiplied matrix."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.solve:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.solve:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.solve:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.solve:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.solve:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.solve:5
msgid "The resulted matrix."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.solve:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.solve:7
+#: tensorcircuit.backends.jax_backend.JaxBackend.solve:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.solve:7
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.solve:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.solve:7
@@ -9796,6 +11143,7 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.sparse_dense_matmul:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sparse_dense_matmul:1
#: tensorcircuit.backends.jax_backend.JaxBackend.sparse_dense_matmul:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.sparse_dense_matmul:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sparse_dense_matmul:1
@@ -9805,6 +11153,8 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.sparse_dense_matmul:3
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.to_dense:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sparse_dense_matmul:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.to_dense:3
#: tensorcircuit.backends.jax_backend.JaxBackend.sparse_dense_matmul:3
#: tensorcircuit.backends.jax_backend.JaxBackend.to_dense:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.sparse_dense_matmul:3
@@ -9816,6 +11166,7 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.sparse_dense_matmul:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sparse_dense_matmul:5
#: tensorcircuit.backends.jax_backend.JaxBackend.sparse_dense_matmul:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.sparse_dense_matmul:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sparse_dense_matmul:5
@@ -9824,13 +11175,15 @@ msgstr ""
#: of
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.sparse_dense_matmul:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.sparse_dense_matmul:7
#: tensorcircuit.backends.jax_backend.JaxBackend.sparse_dense_matmul:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.sparse_dense_matmul:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.sparse_dense_matmul:7
msgid "dense matrix"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.sqrt:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.sqrt:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.sqrt:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.sqrt:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.sqrt:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sqrt:1
@@ -9845,21 +11198,24 @@ msgstr ""
msgid "sqrtm of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.stack:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.stack:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.stack:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stack:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.stack:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.stack:1
msgid "Concatenates a sequence of tensors ``a`` along a new dimension ``axis``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.stack:3
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.stack:3
+#: tensorcircuit.backends.jax_backend.JaxBackend.stack:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stack:3
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.stack:3
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.stack:3
msgid "List of tensors in the same shape"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.stack:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.stack:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.stack:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stack:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.stack:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.stack:5
@@ -9868,6 +11224,8 @@ msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randn:5
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randu:3
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu:3
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randn:5
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randu:3
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stateful_randn:5
@@ -9878,6 +11236,7 @@ msgid "stateful register for each package"
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randn:7
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn:7
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randn:7
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stateful_randn:7
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.stateful_randn:7
@@ -9886,6 +11245,8 @@ msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randn:13
#: tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randu:11
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randn:13
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu:11
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randn:13
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randu:11
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stateful_randn:13
@@ -9896,6 +11257,7 @@ msgid "only real data type is supported, \"32\" or \"64\", defaults to \"32\""
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randu:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu:1
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randu:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stateful_randu:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.stateful_randu:1
@@ -9903,20 +11265,23 @@ msgid "Uniform random sampler from ``low`` to ``high``."
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.stateful_randu:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.stateful_randu:5
#: tensorcircuit.backends.jax_backend.JaxBackend.stateful_randu:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stateful_randu:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.stateful_randu:5
msgid "shape of output sampling tensor, defaults to 1"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.std:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.std:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.std:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.std:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.std:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.std:1
msgid "Compute the standard deviation along the specified axis."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.std:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.std:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.std:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.std:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.std:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.std:5
@@ -9925,7 +11290,8 @@ msgid ""
"None, implying all axis"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.std:8
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.std:8
+#: tensorcircuit.backends.jax_backend.JaxBackend.std:8
#: tensorcircuit.backends.numpy_backend.NumpyBackend.std:8
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.std:8
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.std:8
@@ -9934,14 +11300,16 @@ msgid ""
" as dimensions with size one, defaults to False"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.stop_gradient:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.stop_gradient:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.stop_gradient:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.stop_gradient:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.stop_gradient:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.stop_gradient:1
msgid "Stop backpropagation from ``a``."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.subtraction:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.subtraction:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.subtraction:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.subtraction:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.subtraction:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.subtraction:1
@@ -9950,7 +11318,8 @@ msgid ""
"implementation. :param tensor1: A tensor. :param tensor2: A tensor."
msgstr ""
-#: of tensorcircuit.backends.numpy_backend._sum_numpy:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sum:1
+#: tensorcircuit.backends.numpy_backend._sum_numpy:1
#: tensorcircuit.backends.pytorch_backend._sum_torch:1
#: tensornetwork.backends.jax.jax_backend.JaxBackend.sum:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sum:1
@@ -9959,7 +11328,8 @@ msgid ""
"Tensor with the summed axis removed. :param tensor: An input tensor."
msgstr ""
-#: of tensorcircuit.backends.numpy_backend._sum_numpy:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sum:5
+#: tensorcircuit.backends.numpy_backend._sum_numpy:5
#: tensorcircuit.backends.pytorch_backend._sum_torch:5
#: tensornetwork.backends.jax.jax_backend.JaxBackend.sum:5
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sum:5
@@ -9968,14 +11338,16 @@ msgid ""
" reduced by 1."
msgstr ""
-#: of tensorcircuit.backends.numpy_backend._sum_numpy:7
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sum:7
+#: tensorcircuit.backends.numpy_backend._sum_numpy:7
#: tensorcircuit.backends.pytorch_backend._sum_torch:7
#: tensornetwork.backends.jax.jax_backend.JaxBackend.sum:7
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sum:7
msgid "The result of performing the summation. The order of the tensor"
msgstr ""
-#: of tensorcircuit.backends.numpy_backend._sum_numpy:8
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.sum:8
+#: tensorcircuit.backends.numpy_backend._sum_numpy:8
#: tensorcircuit.backends.pytorch_backend._sum_torch:8
#: tensornetwork.backends.jax.jax_backend.JaxBackend.sum:8
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sum:8
@@ -9983,16 +11355,18 @@ msgid "will be reduced by 1."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:1
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:1
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:1
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:1
msgid "Computes the singular value decomposition (SVD) of a tensor."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:3
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:3
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:3
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:3
msgid ""
"The SVD is performed by treating the tensor as a matrix, with an "
"effective left (row) index resulting from combining the axes "
@@ -10001,9 +11375,10 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:8
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:8
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:8
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:8
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:8
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:8
msgid ""
"For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2,"
" then `u` would have shape (2, 3, 6), `s` would have shape (6), and `vh` "
@@ -10011,18 +11386,20 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:12
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:12
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:12
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:12
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:12
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:12
msgid ""
"If `max_singular_values` is set to an integer, the SVD is truncated to "
"keep at most this many singular values."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:15
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:15
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:15
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:15
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:15
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:15
msgid ""
"If `max_truncation_error > 0`, as many singular values will be truncated "
"as possible, so that the truncation error (the norm of discarded singular"
@@ -10032,9 +11409,9 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:21
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:21
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:21
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:21
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:21
msgid ""
"If both `max_singular_values` and `max_truncation_error` are specified, "
"the number of retained singular values will be `min(max_singular_values, "
@@ -10044,23 +11421,23 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:27
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:27
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:27
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:27
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:27
msgid "The output consists of three tensors `u, s, vh` such that: ```python"
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:29
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:29
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:29
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:29
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:29
msgid "u[i1,...,iN, j] * s[j] * vh[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]"
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:30
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:30
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:30
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:30
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:30
msgid ""
"``` Note that the output ordering matches numpy.linalg.svd rather than "
"tf.svd."
@@ -10071,9 +11448,9 @@ msgstr ""
#: tensorcircuit.backends.pytorch_backend._rq_torch:18
#: tensorcircuit.backends.tensorflow_backend._qr_tf:18
#: tensorcircuit.backends.tensorflow_backend._rq_tf:18
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:33
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:33
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:33
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:33
msgid "A tensor to be decomposed."
msgstr ""
@@ -10082,39 +11459,39 @@ msgstr ""
#: tensorcircuit.backends.pytorch_backend._rq_torch:20
#: tensorcircuit.backends.tensorflow_backend._qr_tf:20
#: tensorcircuit.backends.tensorflow_backend._rq_tf:20
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:34
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:34
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:34
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:34
msgid "Where to split the tensor's axes before flattening into a matrix."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:36
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:36
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:36
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:36
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:36
msgid "The number of singular values to keep, or `None` to keep them all."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:38
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:38
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:38
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:38
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:38
msgid "The maximum allowed truncation error or `None` to not do any truncation."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:40
#: tensorcircuit.cons.split_rules:7
#: tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:24
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:40
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:40
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:40
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:40
msgid "Multiply `max_truncation_err` with the largest singular value."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:42
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:42
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:42
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:42
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:42
msgid ""
"Left tensor factor. s: Vector of ordered singular values from largest to "
"smallest. vh: Right tensor factor. s_rest: Vector of discarded singular "
@@ -10122,9 +11499,9 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:42
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:42
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:42
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:42
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:42
msgid ""
"Left tensor factor. s: Vector of ordered singular values from largest to "
"smallest. vh: Right tensor factor. s_rest: Vector of discarded singular "
@@ -10132,76 +11509,87 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:46
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:49
+#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:46
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:46
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:46
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:46
msgid "truncation)."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.switch:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.switch:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.switch:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.switch:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.switch:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.switch:1
msgid "``branches[index]()``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.tan:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.tan:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.tan:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.tan:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.tan:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.tan:1
msgid "Return the tan of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.tan:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.tan:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.tan:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.tan:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.tan:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.tan:5
msgid "tan of ``a``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.tanh:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.tanh:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.tanh:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.tanh:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.tanh:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.tanh:1
msgid "Return the tanh of a tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.tanh:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.tanh:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.tanh:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.tanh:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.tanh:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.tanh:5
msgid "tanh of ``a``"
msgstr ""
-#: of tensorcircuit.backends.tensorflow_backend._tensordot_tf:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.tensordot:1
+#: tensorcircuit.backends.tensorflow_backend._tensordot_tf:1
#: tensornetwork.backends.jax.jax_backend.JaxBackend.tensordot:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.tensordot:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.tensordot:1
msgid "Do a tensordot of tensors `a` and `b` over the given axes."
msgstr ""
-#: of tensorcircuit.backends.tensorflow_backend._tensordot_tf:4
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.tensordot:4
+#: tensorcircuit.backends.tensorflow_backend._tensordot_tf:4
#: tensornetwork.backends.jax.jax_backend.JaxBackend.tensordot:4
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.tensordot:4
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.tensordot:4
msgid "Another tensor."
msgstr ""
-#: of tensorcircuit.backends.tensorflow_backend._tensordot_tf:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.tensordot:5
+#: tensorcircuit.backends.tensorflow_backend._tensordot_tf:5
#: tensornetwork.backends.jax.jax_backend.JaxBackend.tensordot:5
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.tensordot:5
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.tensordot:5
msgid "Two lists of integers. These values are the contraction axes."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.tile:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.tile:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.tile:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.tile:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.tile:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.tile:1
msgid "Constructs a tensor by tiling a given tensor."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.tile:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.tile:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.tile:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.tile:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.tile:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.tile:5
@@ -10209,6 +11597,7 @@ msgid "1d tensor with length the same as the rank of ``a``"
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.to_dense:1
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.to_dense:1
#: tensorcircuit.backends.jax_backend.JaxBackend.to_dense:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.to_dense:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.to_dense:1
@@ -10216,6 +11605,7 @@ msgid "Convert a sparse matrix to dense tensor."
msgstr ""
#: of tensorcircuit.backends.abstract_backend.ExtendedBackend.to_dense:5
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.to_dense:5
#: tensorcircuit.backends.jax_backend.JaxBackend.to_dense:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.to_dense:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.to_dense:5
@@ -10229,14 +11619,16 @@ msgstr ""
msgid "Transform the tensor ``a`` as a dlpack capsule"
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.trace:1
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.trace:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.trace:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.trace:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.trace:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace:1
msgid "Return summed entries along diagonals."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.trace:3
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.trace:3
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.trace:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.trace:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.trace:3
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace:3
@@ -10247,14 +11639,16 @@ msgid ""
" are used to determine the 2-D sub-array whose diagonal is summed."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.trace:19
+#: of tensornetwork.backends.abstract_backend.AbstractBackend.trace:19
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.trace:19
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.trace:19
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.trace:31
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace:28
msgid "The batched summed diagonals."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.transpose:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.transpose:1
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.transpose:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.transpose:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.transpose:1
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.transpose:1
@@ -10264,7 +11658,8 @@ msgid ""
"the axes."
msgstr ""
-#: of tensornetwork.backends.jax.jax_backend.JaxBackend.transpose:6
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.transpose:6
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.transpose:6
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.transpose:6
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.transpose:6
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.transpose:6
@@ -10345,7 +11740,8 @@ msgstr ""
msgid "Packed pytree"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.unique_with_counts:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.unique_with_counts:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.unique_with_counts:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.unique_with_counts:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.unique_with_counts:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.unique_with_counts:1
@@ -10354,21 +11750,24 @@ msgid ""
"tensor ``a``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.unique_with_counts:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.unique_with_counts:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.unique_with_counts:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.unique_with_counts:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.unique_with_counts:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.unique_with_counts:5
msgid "Unique elements, corresponding counts"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.value_and_grad:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.value_and_grad:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.value_and_grad:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.value_and_grad:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.value_and_grad:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.value_and_grad:1
msgid "Return the function which returns the value and grad of ``f``."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.value_and_grad:17
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.value_and_grad:17
+#: tensorcircuit.backends.jax_backend.JaxBackend.value_and_grad:17
#: tensorcircuit.backends.numpy_backend.NumpyBackend.value_and_grad:17
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.value_and_grad:17
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.value_and_grad:17
@@ -10377,7 +11776,9 @@ msgid ""
"``f``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:1
+#: of
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vectorized_value_and_grad:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vectorized_value_and_grad:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vectorized_value_and_grad:1
@@ -10394,7 +11795,9 @@ msgid ""
"for argnum in argnums). The gradient for argnums=k is defined as"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:9
+#: of
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:9
+#: tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:9
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vectorized_value_and_grad:9
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vectorized_value_and_grad:9
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vectorized_value_and_grad:9
@@ -10404,6 +11807,7 @@ msgid ""
msgstr ""
#: of
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:13
#: tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:13
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vectorized_value_and_grad:13
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vectorized_value_and_grad:13
@@ -10412,6 +11816,7 @@ msgid "Therefore, if argnums=0, the gradient is reduced to"
msgstr ""
#: of
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:15
#: tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:15
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vectorized_value_and_grad:15
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vectorized_value_and_grad:15
@@ -10420,6 +11825,7 @@ msgid "g^0_i = \\frac{\\partial f(vargs[0][i])}{\\partial vargs[0][i]}"
msgstr ""
#: of
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:19
#: tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:19
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vectorized_value_and_grad:19
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vectorized_value_and_grad:19
@@ -10430,6 +11836,7 @@ msgid ""
msgstr ""
#: of
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:21
#: tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:21
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vectorized_value_and_grad:21
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vectorized_value_and_grad:21
@@ -10438,6 +11845,7 @@ msgid "And if argnums=1, the gradient is like"
msgstr ""
#: of
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:23
#: tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:23
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vectorized_value_and_grad:23
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vectorized_value_and_grad:23
@@ -10449,6 +11857,7 @@ msgid ""
msgstr ""
#: of
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:26
#: tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:26
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vectorized_value_and_grad:26
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vectorized_value_and_grad:26
@@ -10460,6 +11869,8 @@ msgid ""
msgstr ""
#: of
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vectorized_value_and_grad:33
+#: tensorcircuit.backends.cupy_backend.CuPyBackend.vmap:6
#: tensorcircuit.backends.jax_backend.JaxBackend.vectorized_value_and_grad:33
#: tensorcircuit.backends.jax_backend.JaxBackend.vmap:6
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vectorized_value_and_grad:33
@@ -10473,7 +11884,8 @@ msgid ""
"shape in the fist dimension"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.vjp:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.vjp:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.vjp:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vjp:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vjp:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vjp:1
@@ -10483,14 +11895,16 @@ msgid ""
" mode AD relevant) Strictly speaking, this function is value_and_vjp."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.vjp:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.vjp:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.vjp:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vjp:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vjp:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vjp:5
msgid "the function to carry out vjp calculation"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.vjp:9
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.vjp:9
+#: tensorcircuit.backends.jax_backend.JaxBackend.vjp:9
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vjp:9
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vjp:9
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vjp:9
@@ -10499,14 +11913,16 @@ msgid ""
"shape as return of function ``f``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.vjp:12
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.vjp:12
+#: tensorcircuit.backends.jax_backend.JaxBackend.vjp:12
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vjp:12
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vjp:12
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vjp:12
msgid "(``f(*inputs)``, vjp_tensor), where vjp_tensor is the same shape as inputs"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.vmap:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.vmap:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.vmap:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vmap:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vmap:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vmap:1
@@ -10516,21 +11932,24 @@ msgid ""
"broadcast in the fist dimension."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.vmap:4
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.vmap:4
+#: tensorcircuit.backends.jax_backend.JaxBackend.vmap:4
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vmap:4
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vmap:4
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vmap:4
msgid "function to be broadcasted."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.vmap:9
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.vmap:9
+#: tensorcircuit.backends.jax_backend.JaxBackend.vmap:9
#: tensorcircuit.backends.numpy_backend.NumpyBackend.vmap:9
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.vmap:9
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.vmap:9
msgid "vmap version of ``f``"
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.zeros:1
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.zeros:1
+#: tensorcircuit.backends.jax_backend.JaxBackend.zeros:1
#: tensorcircuit.backends.numpy_backend.NumpyBackend.zeros:1
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.zeros:1
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.zeros:1
@@ -10540,7 +11959,8 @@ msgid ""
" object (for block-sparse backends)."
msgstr ""
-#: of tensorcircuit.backends.jax_backend.JaxBackend.zeros:5
+#: of tensorcircuit.backends.cupy_backend.CuPyBackend.zeros:5
+#: tensorcircuit.backends.jax_backend.JaxBackend.zeros:5
#: tensorcircuit.backends.numpy_backend.NumpyBackend.zeros:5
#: tensorcircuit.backends.pytorch_backend.PyTorchBackend.zeros:5
#: tensorcircuit.backends.tensorflow_backend.TensorFlowBackend.zeros:5
@@ -10550,317 +11970,382 @@ msgid ""
"dtype of the returned matrix."
msgstr ""
-#: ../../source/api/backends/numpy_backend.rst:2
-msgid "tensorcircuit.backends.numpy_backend"
+#: ../../source/api/backends/jax_backend.rst:2
+msgid "tensorcircuit.backends.jax_backend"
msgstr ""
-#: of tensorcircuit.backends.numpy_backend:1
-msgid "Backend magic inherited from tensornetwork: numpy backend"
+#: of tensorcircuit.backends.jax_backend:1
+msgid "Backend magic inherited from tensornetwork: jax backend"
msgstr ""
-#: of tensorcircuit.backends.numpy_backend.NumpyBackend:1
+#: of tensorcircuit.backends.jax_backend.JaxBackend:1
msgid ""
-"Bases: "
-":py:class:`tensornetwork.backends.numpy.numpy_backend.NumPyBackend`, "
-":py:class:`tensorcircuit.backends.abstract_backend.ExtendedBackend`"
+"Bases: :py:class:`~tensornetwork.backends.jax.jax_backend.JaxBackend`, "
+":py:class:`~tensorcircuit.backends.abstract_backend.ExtendedBackend`"
msgstr ""
-#: of tensorcircuit.backends.numpy_backend.NumpyBackend:1
+#: of tensorcircuit.backends.jax_backend.JaxBackend:1
msgid ""
-"see the original backend API at `numpy backend "
-"`_"
+"See the original backend API at `jax backend "
+"`_"
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:16
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.diagonal:19
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.trace:12
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.trace:15
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:16
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.diagonal:19
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.trace:12
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.trace:15
msgid ""
"Axis to be used as the first/second axis of the 2D sub-arrays from which "
-"the diagonals should be taken. Defaults to second-last/last axis."
+"the diagonals should be taken. Defaults to second last/last axis."
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:1
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:1
msgid ""
-"Arnoldi method for finding the lowest eigenvector-eigenvalue pairs of a "
-"linear operator `A`. If no `initial_state` is provided then `shape` and "
-"`dtype` are required so that a suitable initial state can be randomly "
-"generated. This is a wrapper for scipy.sparse.linalg.eigs which only "
-"supports a subset of the arguments of scipy.sparse.linalg.eigs."
-msgstr ""
-
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:8
-msgid "A (sparse) implementation of a linear operator"
+"Implicitly restarted Arnoldi method for finding the lowest eigenvector-"
+"eigenvalue pairs of a linear operator `A`. `A` is a function implementing"
+" the matrix-vector product."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:9
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:9
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:11
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:9
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:6
msgid ""
-"An initial vector for the algorithm. If `None`, a random initial `Tensor`"
-" is created using the `numpy.random.randn` method."
+"WARNING: This routine uses jax.jit to reduce runtimes. jitting is "
+"triggered at the first invocation of `eigs`, and on any subsequent calls "
+"if the python `id` of `A` changes, even if the formal definition of `A` "
+"stays the same. Example: the following will jit once at the beginning, "
+"and then never again:"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:13
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:13
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:11
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:15
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:13
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:11
-#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:11
-msgid ""
-"The dtype of the input `A`. If both no `initial_state` is provided, a "
-"random initial state with shape `shape` and dtype `dtype` is created."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:12
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:12
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:10
+msgid "```python import jax import numpy as np def A(H,x):"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:16
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:16
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:14
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:18
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:16
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:14
-#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:14
-msgid ""
-"The nummber of eigenvector-eigenvalue pairs to be computed. If `numeig > "
-"1`, `reorthogonalize` has to be `True`."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:16
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:31
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:16
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:31
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:14
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:29
+msgid "return jax.np.dot(H,x)"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:18
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:18
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:20
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:18
-msgid "The desired precision of the eigenvalus. Uses"
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:19
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:19
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:17
+msgid "for n in range(100):"
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:21
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:18
msgid ""
-"['LM' | 'SM' | 'LR' | 'SR' | 'LI'] Which `k` eigenvectors and eigenvalues"
-" to find: 'LM' : largest magnitude 'SM' : smallest magnitude "
-"'LR' : largest real part 'SR' : smallest real part 'LI' : largest"
-" imaginary part"
+"H = jax.np.array(np.random.rand(10,10)) x = "
+"jax.np.array(np.random.rand(10,10)) res = eigs(A, [H],x) #jitting is "
+"triggerd only at `n=0`"
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:21
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:23
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:23
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:21
msgid ""
-"['LM' | 'SM' | 'LR' | 'SR' | 'LI'] Which `k` eigenvectors and eigenvalues"
-" to find:"
+"The following code triggers jitting at every iteration, which results in "
+"considerably reduced performance"
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:23
-msgid ""
-"'LM' : largest magnitude 'SM' : smallest magnitude 'LR' : largest real "
-"part 'SR' : smallest real part 'LI' : largest imaginary part"
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:26
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:26
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:24
+msgid "```python import jax import numpy as np for n in range(100):"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:28
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:28
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:28
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:28
-msgid "The maximum number of iterations."
-msgstr ""
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:30
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:30
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:28
+msgid "def A(H,x):"
+msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:30
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:30
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:30
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:30
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:32
msgid ""
-"An array of `numeig` lowest eigenvalues `list`: A list of `numeig` lowest"
-" eigenvectors"
+"H = jax.np.array(np.random.rand(10,10)) x = "
+"jax.np.array(np.random.rand(10,10)) res = eigs(A, [H],x) #jitting is "
+"triggerd at every step `n`"
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:32
-msgid "`np.ndarray`"
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:37
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:37
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:35
+msgid ""
+"A (sparse) implementation of a linear operator. Call signature of `A` is "
+"`res = A(vector, *args)`, where `vector` can be an arbitrary `Tensor`, "
+"and `res.shape` has to be `vector.shape`."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:1
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:1
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:42
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:42
msgid ""
-"Lanczos method for finding the lowest eigenvector-eigenvalue pairs of a "
-"symmetric (hermitian) linear operator `A`. `A` is a callable implementing"
-" the matrix-vector product. If no `initial_state` is provided then "
-"`shape` and `dtype` have to be passed so that a suitable initial state "
-"can be randomly generated. :param A: A (sparse) implementation of a "
-"linear operator :param arsg: A list of arguments to `A`. `A` will be "
-"called as"
+"An initial vector for the algorithm. If `None`, a random initial `Tensor`"
+" is created using the `backend.randn` method"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:8
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:8
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:8
-msgid "`res = A(initial_state, *args)`."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:45
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:45
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:43
+msgid ""
+"The dtype of the input `A`. If no `initial_state` is provided, a random "
+"initial state with shape `shape` and dtype `dtype` is created."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:19
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:19
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:19
-msgid ""
-"['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'] Which `k` eigenvectors and "
-"eigenvalues to find: 'LM' : largest magnitude 'SM' : smallest "
-"magnitude 'LR' : largest real part 'SR' : smallest real part "
-"'LI' : largest imaginary part 'SI' : smallest imaginary part Note "
-"that not all of those might be supported by specialized backends."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:48
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:48
+msgid "The number of eigenvector-eigenvalue pairs to be computed."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:19
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:19
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:19
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:49
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:49
msgid ""
-"['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'] Which `k` eigenvectors and "
-"eigenvalues to find:"
+"The desired precision of the eigenvalues. For the jax backend this has "
+"currently no effect, and precision of eigenvalues is not guaranteed. This"
+" feature may be added at a later point. To increase precision the caller "
+"can either increase `maxiter` or `num_krylov_vecs`."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:21
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:21
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:21
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:53
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:53
msgid ""
-"'LM' : largest magnitude 'SM' : smallest magnitude 'LR' : largest real "
-"part 'SR' : smallest real part 'LI' : largest imaginary part 'SI' : "
-"smallest imaginary part"
+"Flag for targetting different types of eigenvalues. Currently supported "
+"are `which = 'LR'` (larges real part) and `which = 'LM'` (larges "
+"magnitude)."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:27
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:27
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:27
-msgid "Note that not all of those might be supported by specialized backends."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:56
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:56
+msgid ""
+"Maximum number of restarts. For `maxiter=0` the routine becomes "
+"equivalent to a simple Arnoldi method."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:32
-#: tensornetwork.backends.abstract_backend.AbstractBackend.eigsh:32
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh:32
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:12
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:10
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.position:10
-#: tensornetwork.matrixproductstates.finite_mps.FiniteMPS.canonicalize:9
-msgid "`Tensor`"
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:59
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:59
+msgid ""
+"(eigvals, eigvecs) eigvals: A list of `numeig` eigenvalues eigvecs: A "
+"list of `numeig` eigenvectors"
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:1
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:62
+#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:62
msgid ""
-"Lanczos method for finding the lowest eigenvector-eigenvalue pairs of a "
-"linear operator `A`. :param A: A (sparse) implementation of a linear "
-"operator."
+"eigvals: A list of `numeig` eigenvalues eigvecs: A list of `numeig` "
+"eigenvectors"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:4
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:4
-#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:4
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:1
msgid ""
-"Call signature of `A` is `res = A(vector, *args)`, where `vector` can be "
-"an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`."
+"Implicitly restarted Lanczos method for finding the lowest eigenvector-"
+"eigenvalue pairs of a symmetric (hermitian) linear operator `A`. `A` is a"
+" function implementing the matrix-vector product."
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:16
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:6
msgid ""
-"The desired precision of the eigenvalus. Uses "
-"`np.linalg.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol` as "
-"stopping criterion between two diagonalization steps of the tridiagonal "
-"operator."
+"WARNING: This routine uses jax.jit to reduce runtimes. jitting is "
+"triggered at the first invocation of `eigsh`, and on any subsequent calls"
+" if the python `id` of `A` changes, even if the formal definition of `A` "
+"stays the same. Example: the following will jit once at the beginning, "
+"and then never again:"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:25
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:25
-#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:25
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:18
msgid ""
-"The tridiagonal Operator is diagonalized every `ndiag` iterations to "
-"check convergence."
+"H = jax.np.array(np.random.rand(10,10)) x = "
+"jax.np.array(np.random.rand(10,10)) res = eigsh(A, [H],x) #jitting is "
+"triggerd only at `n=0`"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:30
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:30
-#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:30
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:32
msgid ""
-"(eigvals, eigvecs) eigvals: A list of `numeig` lowest eigenvalues "
-"eigvecs: A list of `numeig` lowest eigenvectors"
+"H = jax.np.array(np.random.rand(10,10)) x = "
+"jax.np.array(np.random.rand(10,10)) res = eigsh(A, [H],x) #jitting is "
+"triggerd at every step `n`"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:33
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:33
-#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:33
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:1
msgid ""
-"eigvals: A list of `numeig` lowest eigenvalues eigvecs: A list of "
-"`numeig` lowest eigenvectors"
+"Lanczos method for finding the lowest eigenvector-eigenvalue pairs of a "
+"hermitian linear operator `A`. `A` is a function implementing the matrix-"
+"vector product. WARNING: This routine uses jax.jit to reduce runtimes. "
+"jitting is triggered at the first invocation of `eigsh_lanczos`, and on "
+"any subsequent calls if the python `id` of `A` changes, even if the "
+"formal definition of `A` stays the same. Example: the following will jit "
+"once at the beginning, and then never again:"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:7
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:7
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:7
-msgid "Returns the exponentiation of tensor a raised to b."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:16
+msgid ""
+"H = jax.np.array(np.random.rand(10,10)) x = "
+"jax.np.array(np.random.rand(10,10)) res = eigsh_lanczos(A, [H],x) "
+"#jitting is triggerd only at `n=0`"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:4
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:4
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:4
-msgid "If b is a tensor, then the exponentiation is element-wise"
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:30
+msgid ""
+"H = jax.np.array(np.random.rand(10,10)) x = "
+"jax.np.array(np.random.rand(10,10)) res = eigsh_lanczos(A, [H],x) "
+"#jitting is triggerd at every step `n`"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:3
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:3
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:3
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:46
msgid ""
-"between the two tensors, with a as the base and b as the power. Note that"
-" a and b must be broadcastable to the same shape if b is a tensor."
+"The number of eigenvector-eigenvalue pairs to be computed. If `numeig > "
+"1`, `reorthogonalize` has to be `True`."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:7
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:7
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:7
-msgid "If b is a scalar, then the exponentiation is each value in a"
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:48
+msgid ""
+"The desired precision of the eigenvalues. For the jax backend this has "
+"currently no effect, and precision of eigenvalues is not guaranteed. This"
+" feature may be added at a later point. To increase precision the caller "
+"can increase `num_krylov_vecs`."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:7
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:7
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:7
-msgid "raised to the power of b."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:57
+msgid ""
+"The tridiagonal Operator is diagonalized every `ndiag` iterations to "
+"check convergence. This has currently no effect for the jax backend, but "
+"may be added at a later point."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:9
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:9
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:9
-msgid "The tensor containing the bases."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:63
+msgid ""
+"(eigvals, eigvecs) eigvals: A jax-array containing `numeig` lowest "
+"eigenvalues eigvecs: A list of `numeig` lowest eigenvectors"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:10
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:10
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:10
-msgid "The tensor containing the powers; or a single scalar as the power."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh_lanczos:66
+msgid ""
+"eigvals: A jax-array containing `numeig` lowest eigenvalues eigvecs: A "
+"list of `numeig` lowest eigenvectors"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:12
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:12
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:12
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:1
msgid ""
-"The tensor that is each element of a raised to the power of b. Note "
-"that the shape of the returned tensor is that produced by the broadcast"
-" of a and b."
+"Returns the power of tensor a to the value of b. In the case b is a "
+"tensor, then the power is by element"
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:15
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:15
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:15
-msgid "The tensor that is each element of a raised to the"
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:3
+msgid "with a as the base and b as the exponent."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.power:15
-#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.power:15
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.power:15
-msgid ""
-"power of b. Note that the shape of the returned tensor is that produced "
-"by the broadcast of a and b."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:5
+msgid "In the case b is a scalar, then the power of each value in a"
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.qr:1
-msgid "Computes the QR decomposition of a tensor."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:5
+msgid "is raised to the exponent of b."
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.rq:1
-msgid "Computes the RQ (reversed QR) decomposition of a tensor."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:7
+msgid "The tensor that contains the base."
msgstr ""
-#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.sign:1
-msgid ""
-"Returns an elementwise tensor with entries y[i] = 1, 0, -1 tensor[i] > 0,"
-" == 0, and < 0 respectively."
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.power:8
+msgid "The tensor that contains the exponent or a single scalar."
+msgstr ""
+
+#: of tensorcircuit.backends.jax_backend._qr_jax:1
+msgid ""
+"Computes the QR decomposition of a tensor. See "
+"tensornetwork.backends.tensorflow.decompositions for details."
+msgstr ""
+
+#: of tensorcircuit.backends.jax_backend._rq_jax:1
+msgid ""
+"Computes the RQ (reversed QR) decomposition of a tensor. See "
+"tensornetwork.backends.tensorflow.decompositions for details."
+msgstr ""
+
+#: of tensornetwork.backends.jax.jax_backend.JaxBackend.sign:4
+msgid ""
+"For complex input the behaviour of this function may depend on the "
+"backend. The Jax backend version returns y[i] = x[i]/sqrt(x[i]^2)."
+msgstr ""
+
+#: ../../source/api/backends/numpy_backend.rst:2
+msgid "tensorcircuit.backends.numpy_backend"
+msgstr ""
+
+#: of tensorcircuit.backends.numpy_backend:1
+msgid "Backend magic inherited from tensornetwork: numpy backend"
+msgstr ""
+
+#: of tensorcircuit.backends.numpy_backend.NumpyBackend:1
+msgid ""
+"Bases: "
+":py:class:`~tensornetwork.backends.numpy.numpy_backend.NumPyBackend`, "
+":py:class:`~tensorcircuit.backends.abstract_backend.ExtendedBackend`"
+msgstr ""
+
+#: of tensorcircuit.backends.numpy_backend.NumpyBackend:1
+msgid ""
+"see the original backend API at `numpy backend "
+"`_"
+msgstr ""
+
+#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:1
+msgid ""
+"Arnoldi method for finding the lowest eigenvector-eigenvalue pairs of a "
+"linear operator `A`. If no `initial_state` is provided then `shape` and "
+"`dtype` are required so that a suitable initial state can be randomly "
+"generated. This is a wrapper for scipy.sparse.linalg.eigs which only "
+"supports a subset of the arguments of scipy.sparse.linalg.eigs."
+msgstr ""
+
+#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:8
+msgid "A (sparse) implementation of a linear operator"
+msgstr ""
+
+#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:21
+msgid ""
+"['LM' | 'SM' | 'LR' | 'SR' | 'LI'] Which `k` eigenvectors and eigenvalues"
+" to find: 'LM' : largest magnitude 'SM' : smallest magnitude "
+"'LR' : largest real part 'SR' : smallest real part 'LI' : largest"
+" imaginary part"
+msgstr ""
+
+#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:21
+msgid ""
+"['LM' | 'SM' | 'LR' | 'SR' | 'LI'] Which `k` eigenvectors and eigenvalues"
+" to find:"
+msgstr ""
+
+#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:23
+msgid ""
+"'LM' : largest magnitude 'SM' : smallest magnitude 'LR' : largest real "
+"part 'SR' : smallest real part 'LI' : largest imaginary part"
+msgstr ""
+
+#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigs:32
+msgid "`np.ndarray`"
+msgstr ""
+
+#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:1
+msgid ""
+"Lanczos method for finding the lowest eigenvector-eigenvalue pairs of a "
+"linear operator `A`. :param A: A (sparse) implementation of a linear "
+"operator."
+msgstr ""
+
+#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.eigsh_lanczos:16
+msgid ""
+"The desired precision of the eigenvalus. Uses "
+"`np.linalg.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol` as "
+"stopping criterion between two diagonalization steps of the tridiagonal "
+"operator."
+msgstr ""
+
+#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.sign:1
+msgid ""
+"Returns an elementwise tensor with entries y[i] = 1, 0, -1 tensor[i] > 0,"
+" == 0, and < 0 respectively."
msgstr ""
#: of tensornetwork.backends.numpy.numpy_backend.NumPyBackend.sign:4
@@ -10880,8 +12365,8 @@ msgstr ""
#: of tensorcircuit.backends.pytorch_backend.PyTorchBackend:1
msgid ""
"Bases: "
-":py:class:`tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend`,"
-" :py:class:`tensorcircuit.backends.abstract_backend.ExtendedBackend`"
+":py:class:`~tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend`,"
+" :py:class:`~tensorcircuit.backends.abstract_backend.ExtendedBackend`"
msgstr ""
#: of tensorcircuit.backends.pytorch_backend.PyTorchBackend:1
@@ -10906,16 +12391,6 @@ msgid ""
"(note this differs from the NumPy defaults)."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigs:1
-msgid ""
-"Arnoldi method for finding the lowest eigenvector-eigenvalue pairs of a "
-"linear operator `A`. `A` is a callable implementing the matrix-vector "
-"product. If no `initial_state` is provided then `shape` and `dtype` have "
-"to be passed so that a suitable initial state can be randomly generated."
-" :param A: A (sparse) implementation of a linear operator :param arsg: A "
-"list of arguments to `A`. `A` will be called as"
-msgstr ""
-
#: of
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.eigsh_lanczos:1
msgid ""
@@ -11032,6 +12507,12 @@ msgid ""
"raised if they are specified."
msgstr ""
+#: of tensorcircuit.backends.pytorch_backend.torch_jit_func:1
+msgid ""
+"Delay the tracing of torch jit to the first run time: consistent with tf "
+"and jax mechanism"
+msgstr ""
+
#: ../../source/api/backends/tensorflow_backend.rst:2
msgid "tensorcircuit.backends.tensorflow_backend"
msgstr ""
@@ -11043,8 +12524,8 @@ msgstr ""
#: of tensorcircuit.backends.tensorflow_backend.TensorFlowBackend:1
msgid ""
"Bases: "
-":py:class:`tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend`,"
-" :py:class:`tensorcircuit.backends.abstract_backend.ExtendedBackend`"
+":py:class:`~tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend`,"
+" :py:class:`~tensorcircuit.backends.abstract_backend.ExtendedBackend`"
msgstr ""
#: of tensorcircuit.backends.tensorflow_backend.TensorFlowBackend:1
@@ -11072,25 +12553,67 @@ msgid ""
"will be raised if they are specified."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:1
+#: of
+#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sign:4
msgid ""
-"Lanczos method for finding the lowest eigenvector-eigenvalue pairs of "
-"`A`. :param A: A (sparse) implementation of a linear operator."
+"For complex input the behaviour of this function may depend on the "
+"backend. The TensorFlow version returns y[i] = x[i] / abs(x[i])."
msgstr ""
-#: of tensornetwork.backends.abstract_backend.AbstractBackend.eigsh_lanczos:16
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:21
msgid ""
-"The desired precision of the eigenvalus. Uses "
-"`backend.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol` as "
-"stopping criterion between two diagonalization steps of the tridiagonal "
-"operator."
+"If both `max_singular_values` snd `max_truncation_error` are specified, "
+"the number of retained singular values will be `min(max_singular_values, "
+"nsv_auto_trunc)`, where `nsv_auto_trunc` is the number of singular values"
+" that must be kept to maintain a truncation error smaller than "
+"`max_truncation_error`."
msgstr ""
-#: of
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sign:4
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:27
msgid ""
-"For complex input the behaviour of this function may depend on the "
-"backend. The TensorFlow version returns y[i] = x[i] / abs(x[i])."
+"The output consists of three tensors `u, s, vh` such that: ```python "
+"u[i1,...,iN, j] * s[j] * vh[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]"
+" ``` Note that the output ordering matches numpy.linalg.svd rather than "
+"tf.svd."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:33
+msgid ""
+"Args: tf: The tensorflow module. tensor: A tensor to be decomposed. "
+"pivot_axis: Where to split the tensor's axes before flattening into a"
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:37
+msgid "matrix."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:38
+msgid "max_singular_values: The number of singular values to keep, or `None` to"
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:39
+msgid "keep them all."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:40
+msgid ""
+"max_truncation_error: The maximum allowed truncation error or `None` to "
+"not"
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:41
+msgid "do any truncation."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:42
+msgid "relative: Multiply `max_truncation_err` with the largest singular value."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:44
+msgid ""
+"Returns: u: Left tensor factor. s: Vector of ordered singular values from"
+" largest to smallest. vh: Right tensor factor. s_rest: Vector of "
+"discarded singular values (length zero if no"
msgstr ""
#: of
@@ -11121,7 +12644,7 @@ msgstr ""
#: of tensorcircuit.basecircuit.BaseCircuit:1
#: tensorcircuit.mpscircuit.MPSCircuit:1
-msgid "Bases: :py:class:`tensorcircuit.abstractcircuit.AbstractCircuit`"
+msgid "Bases: :py:class:`~tensorcircuit.abstractcircuit.AbstractCircuit`"
msgstr ""
#: of tensorcircuit.basecircuit.BaseCircuit.amplitude:1
@@ -11152,6 +12675,7 @@ msgid ""
msgstr ""
#: of tensorcircuit.basecircuit.BaseCircuit.expectation_before:4
+#: tensorcircuit.interfaces.torch.torch_interface_kws:24
#: tensorcircuit.quantum.sample2count:7 tensorcircuit.utils.benchmark:7
msgid "_description_, defaults to True"
msgstr ""
@@ -11265,6 +12789,7 @@ msgid ""
msgstr ""
#: of tensorcircuit.basecircuit.BaseCircuit.sample:13
+#: tensorcircuit.cloud.abstraction.Task.results:5
#: tensorcircuit.quantum.measurement_counts:45
#: tensorcircuit.quantum.sample2all:10
msgid "alias for the argument ``format``"
@@ -11599,7 +13124,8 @@ msgid ":math:`p_z`"
msgstr ""
#: of tensorcircuit.channels.depolarizingchannel:36
-#: tensorcircuit.channels.generaldepolarizingchannel:15
+#: tensorcircuit.channels.generaldepolarizingchannel:39
+#: tensorcircuit.channels.isotropicdepolarizingchannel:28
msgid "Sequences of Gates"
msgstr ""
@@ -11628,15 +13154,50 @@ msgid "The dynamic evolution according to Superoperator."
msgstr ""
#: of tensorcircuit.channels.generaldepolarizingchannel:1
-msgid "Return a Depolarizing Channel for 1 qubit or 2 qubits"
+msgid ""
+"Return a depolarizing channel. If :math:`p` is a float number, the one "
+"qubit channel is"
+msgstr ""
+
+#: of tensorcircuit.channels.generaldepolarizingchannel:4
+msgid "\\mathcal{E}(\\rho) = (1 - 3p)\\rho + p(X\\rho X + Y\\rho Y + Z\\rho Z)"
+msgstr ""
+
+#: of tensorcircuit.channels.generaldepolarizingchannel:8
+msgid "Or alternatively"
+msgstr ""
+
+#: of tensorcircuit.channels.generaldepolarizingchannel:10
+msgid "\\mathcal{E}(\\rho) = 4p \\frac{I}{2} + (1 - 4p) \\rho"
+msgstr ""
+
+#: of tensorcircuit.channels.generaldepolarizingchannel:16
+msgid ""
+"The definition of ``p`` in this method is different from "
+":func:`isotropicdepolarizingchannel`."
msgstr ""
-#: of tensorcircuit.channels.generaldepolarizingchannel:11
+#: of tensorcircuit.channels.generaldepolarizingchannel:19
+msgid "And if :math:`p` is a sequence, the one qubit channel is"
+msgstr ""
+
+#: of tensorcircuit.channels.generaldepolarizingchannel:21
+msgid ""
+"\\mathcal{E}(\\rho) = (1 - \\sum_i p_i) \\rho + p_1 X\\rho X + p_2 Y\\rho"
+" Y + p_3 \\rho Z"
+msgstr ""
+
+#: of tensorcircuit.channels.generaldepolarizingchannel:25
+msgid "The logic for two-qubit or more-qubit channel follows similarly."
+msgstr ""
+
+#: of tensorcircuit.channels.generaldepolarizingchannel:35
msgid "parameter for each Pauli channel"
msgstr ""
-#: of tensorcircuit.channels.generaldepolarizingchannel:13
-msgid "number of qubits, 1 and 2 are avaliable, defaults 1"
+#: of tensorcircuit.channels.generaldepolarizingchannel:37
+#: tensorcircuit.channels.isotropicdepolarizingchannel:26
+msgid "number of qubits, defaults 1"
msgstr ""
#: of tensorcircuit.channels.is_hermitian_matrix:1
@@ -11655,6 +13216,36 @@ msgstr ""
msgid "_description_, defaults to 1e-5"
msgstr ""
+#: of tensorcircuit.channels.isotropicdepolarizingchannel:1
+msgid "Return an isotropic depolarizing channel."
+msgstr ""
+
+#: of tensorcircuit.channels.isotropicdepolarizingchannel:3
+msgid "\\mathcal{E}(\\rho) = (1 - p)\\rho + p/(4^n-1)\\sum_j P_j \\rho P_j"
+msgstr ""
+
+#: of tensorcircuit.channels.isotropicdepolarizingchannel:7
+msgid ""
+"where $n$ is the number of qubits and $P_j$ are $n$-qubit Pauli strings "
+"except $I$. Or alternatively"
+msgstr ""
+
+#: of tensorcircuit.channels.isotropicdepolarizingchannel:10
+msgid ""
+"\\mathcal{E}(\\rho) = \\frac{4^n}{4^n-1}p \\frac{I}{2} + (1 - "
+"\\frac{4^n}{4^n-1}p) \\rho"
+msgstr ""
+
+#: of tensorcircuit.channels.isotropicdepolarizingchannel:16
+msgid ""
+"The definition of ``p`` in this method is different from "
+":func:`generaldepolarizingchannel`."
+msgstr ""
+
+#: of tensorcircuit.channels.isotropicdepolarizingchannel:24
+msgid "error probability"
+msgstr ""
+
#: of tensorcircuit.channels.kraus_identity_check:1
msgid "Check identity of Kraus operators."
msgstr ""
@@ -11885,7 +13476,7 @@ msgid "Quantum circuit: the state simulator"
msgstr ""
#: of tensorcircuit.circuit.Circuit:1 tensorcircuit.densitymatrix.DMCircuit:1
-msgid "Bases: :py:class:`tensorcircuit.basecircuit.BaseCircuit`"
+msgid "Bases: :py:class:`~tensorcircuit.basecircuit.BaseCircuit`"
msgstr ""
#: of tensorcircuit.circuit.Circuit:1
@@ -11893,15 +13484,15 @@ msgid "``Circuit`` class. Simple usage demo below."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ANY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.any_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:4
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:4
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:5
#: tensorcircuit.circuit.Circuit.apply_general_kraus_delayed..apply:4
#: tensorcircuit.densitymatrix.DMCircuit.apply_general_kraus_delayed..apply:4
#: tensorcircuit.densitymatrix.DMCircuit2.apply_general_kraus_delayed..apply:4
@@ -11909,20 +13500,20 @@ msgid "Qubit number that the gate applies on."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:6
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:7
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:6
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:7
msgid "Parameters for the gate."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **CNOT** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.cnot_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -11931,12 +13522,12 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid "Qubit number that the gate applies on. The matrix for the gate is"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j & 1.+0.j\\\\ "
@@ -11944,56 +13535,56 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CPHASE** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.cphase_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CR** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.cr_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CRX** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.crx_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CRY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.cry_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CRZ** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.crz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CU** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.cu_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **CY** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.cy_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -12002,7 +13593,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j & 0.-1.j\\\\ "
@@ -12010,14 +13601,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **CZ** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.cz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -12026,7 +13617,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j\\\\ "
@@ -12034,28 +13625,28 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **EXP** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.exp_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **EXP1** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.exp1_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **FREDKIN** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.fredkin_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & "
@@ -12071,7 +13662,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j &"
" 0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & "
@@ -12086,14 +13677,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **H** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.h_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.70710677+0.j & 0.70710677+0.j\\\\ "
@@ -12101,21 +13692,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 0.70710677+0.j & 0.70710677+0.j\\\\ 0.70710677+0.j"
" & -0.70710677+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **I** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.i_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j "
@@ -12123,61 +13714,61 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ISWAP** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.iswap_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply mpo gate in MPO format on the circuit. See "
":py:meth:`tensorcircuit.gates.mpo_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply multicontrol gate in MPO format on the circuit. See "
":py:meth:`tensorcircuit.gates.multicontrol_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ORX** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.orx_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ORY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.ory_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ORZ** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.orz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **OX** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.ox_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -12186,7 +13777,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j\\\\ 1.+0.j & "
"0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j\\\\ "
@@ -12194,14 +13785,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **OY** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.oy_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.+0.j & 0.-1.j & 0.+0.j & 0.+0.j\\\\"
@@ -12210,7 +13801,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 0.+0.j & 0.-1.j & 0.+0.j & 0.+0.j\\\\ 0.+1.j & "
"0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j\\\\ "
@@ -12218,14 +13809,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **OZ** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.oz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -12234,7 +13825,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"-1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j\\\\"
@@ -12242,70 +13833,70 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **PHASE** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.phase_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **R** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.r_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RX** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.rx_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RXX** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.rxx_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.ry_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RYY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.ryy_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RZ** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.rz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RZZ** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.rzz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **S** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.s_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+1.j "
@@ -12313,19 +13904,19 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+1.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **SD** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.sd_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 0.-1.j "
@@ -12333,19 +13924,19 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 0.-1.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **SWAP** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.swap_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -12354,7 +13945,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"0.+0.j & 1.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j\\\\ "
@@ -12362,14 +13953,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **T** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.t_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1. & +0.j & 0. & +0.j\\\\ 0. & +0.j "
@@ -12377,21 +13968,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1. & +0.j & 0. & +0.j\\\\ 0. & +0.j & "
"0.70710677+0.70710677j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **TD** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.td_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1. & +0.j & 0. & +0.j\\\\ 0. & +0.j "
@@ -12399,21 +13990,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1. & +0.j & 0. & +0.j\\\\ 0. & +0.j & "
"0.70710677-0.70710677j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **TOFFOLI** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.toffoli_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & "
@@ -12429,7 +14020,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j &"
" 0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & "
@@ -12444,21 +14035,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **U** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.u_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **WROOT** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.wroot_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.70710677+0.j & -0.5 & -0.5j\\\\ "
@@ -12466,21 +14057,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 0.70710677+0.j & -0.5 & -0.5j\\\\ 0.5 & -0.5j & "
"0.70710677+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **X** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.x_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.+0.j & 1.+0.j\\\\ 1.+0.j & 0.+0.j "
@@ -12488,19 +14079,19 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 0.+0.j & 1.+0.j\\\\ 1.+0.j & 0.+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **Y** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.y_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.+0.j & 0.-1.j\\\\ 0.+1.j & 0.+0.j "
@@ -12508,19 +14099,19 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 0.+0.j & 0.-1.j\\\\ 0.+1.j & 0.+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **Z** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.z_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & -1.+0.j"
@@ -12528,7 +14119,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & -1.+0.j \\end{bmatrix}"
msgstr ""
@@ -12710,6 +14301,15 @@ msgstr ""
msgid "The bool indicating whether the circuit is legal"
msgstr ""
+#: of
+#: tensorcircuit.circuit.Circuit.apply_general_kraus_delayed..apply:1
+#: tensorcircuit.densitymatrix.DMCircuit.apply_general_kraus_delayed..apply:1
+#: tensorcircuit.densitymatrix.DMCircuit2.apply_general_kraus_delayed..apply:1
+msgid ""
+"Apply isotropicdepolarizing quantum channel on the circuit. See "
+":py:meth:`tensorcircuit.channels.isotropicdepolarizingchannel`"
+msgstr ""
+
#: of tensorcircuit.circuit.Circuit.matrix:1
msgid ""
"Get the unitary matrix for the circuit irrespective with the circuit "
@@ -12849,9257 +14449,11448 @@ msgstr ""
msgid "The result of :math:`\\langle bra\\vert ops \\vert ket\\rangle`."
msgstr ""
-#: ../../source/api/compiler.rst:2
-msgid "tensorcircuit.compiler"
+#: ../../source/api/cloud.rst:2
+msgid "tensorcircuit.cloud"
msgstr ""
-#: ../../source/api/compiler/qiskit_compiler.rst:2
-msgid "tensorcircuit.compiler.qiskit_compiler"
+#: ../../source/api/cloud/abstraction.rst:2
+msgid "tensorcircuit.cloud.abstraction"
msgstr ""
-#: of tensorcircuit.compiler.qiskit_compiler:1
-msgid "compiler interface via qiskit"
+#: of tensorcircuit.cloud.abstraction:1
+msgid "Abstraction for Provider, Device and Task"
msgstr ""
-#: ../../source/api/cons.rst:2
-msgid "tensorcircuit.cons"
+#: of tensorcircuit.cloud.abstraction.Device:1
+msgid "Device abstraction for cloud connection, eg. quantum chips"
msgstr ""
-#: of tensorcircuit.cons:1
-msgid "Constants and setups"
+#: of tensorcircuit.cloud.abstraction.Device.list_properties:1
+msgid "List all device properties in as dict"
msgstr ""
-#: of tensorcircuit.cons.get_contractor:1 tensorcircuit.cons.set_contractor:1
-msgid ""
-"To set runtime contractor of the tensornetwork for a better contraction "
-"path. For more information on the usage of contractor, please refer to "
-"independent tutorial."
+#: of tensorcircuit.cloud.abstraction.Device.native_gates:1
+msgid "List native gates supported for the device, str conforms qiskit convention"
msgstr ""
-#: of tensorcircuit.cons.get_contractor:4 tensorcircuit.cons.set_contractor:4
-msgid ""
-"\"auto\", \"greedy\", \"branch\", \"plain\", \"tng\", \"custom\", "
-"\"custom_stateful\". defaults to None (\"auto\")"
+#: of tensorcircuit.cloud.abstraction.Device.topology:1
+msgid "Get the bidirectional topology link list of the device"
msgstr ""
-#: of tensorcircuit.cons.get_contractor:6 tensorcircuit.cons.set_contractor:6
-msgid "Valid for \"custom\" or \"custom_stateful\" as method, defaults to None"
+#: of tensorcircuit.cloud.abstraction.Device.topology_graph:1
+msgid "Get the qubit topology in ``nx.Graph`` or directly visualize it"
msgstr ""
-#: of tensorcircuit.cons.get_contractor:8 tensorcircuit.cons.set_contractor:8
-msgid ""
-"It is not very useful, as ``memory_limit`` leads to ``branch`` "
-"contraction instead of ``greedy`` which is rather slow, defaults to None"
+#: of tensorcircuit.cloud.abstraction.Device.topology_graph:3
+#: tensorcircuit.vis.render_pdf:23
+msgid "[description], defaults to False"
msgstr ""
-#: of tensorcircuit.cons.get_contractor:11 tensorcircuit.cons.set_contractor:11
-msgid "Tensornetwork version is too low to support some of the contractors."
+#: of tensorcircuit.cloud.abstraction.Provider:1
+msgid "Provider abstraction for cloud connection, eg. \"tencent\", \"local\""
msgstr ""
-#: of tensorcircuit.cons.get_contractor:12 tensorcircuit.cons.set_contractor:12
-msgid "Unknown method options."
+#: of tensorcircuit.cloud.abstraction.TCException:1
+msgid "Bases: :py:class:`BaseException`"
msgstr ""
-#: of tensorcircuit.cons.get_contractor:13 tensorcircuit.cons.set_contractor:13
-msgid "The new tensornetwork with its contractor set."
+#: of tensorcircuit.cloud.abstraction.TCException.with_traceback:1
+#: tensorcircuit.cloud.abstraction.TaskException.with_traceback:1
+#: tensorcircuit.cloud.abstraction.TaskFailed.with_traceback:1
+#: tensorcircuit.cloud.abstraction.TaskUnfinished.with_traceback:1
+#: tensorcircuit.cloud.utils.HttpStatusError.with_traceback:1
+msgid ""
+"Exception.with_traceback(tb) -- set self.__traceback__ to tb and return "
+"self."
msgstr ""
-#: of tensorcircuit.cons.get_dtype:1 tensorcircuit.cons.set_dtype:1
-msgid "Set the global runtime numerical dtype of tensors."
+#: of tensorcircuit.cloud.abstraction.Task:1
+msgid "Task abstraction for quantum jobs on the cloud"
msgstr ""
-#: of tensorcircuit.cons.get_dtype:3 tensorcircuit.cons.set_dtype:3
-msgid ""
-"\"complex64\"/\"float32\" or \"complex128\"/\"float64\", defaults to "
-"None, which is equivalent to \"complex64\"."
+#: of tensorcircuit.cloud.abstraction.Task.details:1
+msgid "Get the current task details"
msgstr ""
-#: of tensorcircuit.cons.get_dtype:6 tensorcircuit.cons.set_dtype:6
-msgid "complex dtype str and the corresponding real dtype str"
+#: of tensorcircuit.cloud.abstraction.Task.details:4
+msgid "whether return until task is finished, defaults to False"
msgstr ""
-#: of tensorcircuit.cons.plain_contractor:1
-msgid "The naive state-vector simulator contraction path."
+#: of tensorcircuit.cloud.abstraction.Task.details:6
+#: tensorcircuit.cloud.abstraction.Task.results:10
+msgid "alias for the argument ``blocked``"
msgstr ""
-#: of tensorcircuit.cons.plain_contractor:3
-msgid "The list of ``tn.Node``."
+#: of tensorcircuit.cloud.abstraction.Task.get_device:1
+msgid "Query which device the task is run on"
msgstr ""
-#: of tensorcircuit.cons.plain_contractor:5
-msgid "The list of dangling node edges, defaults to be None."
+#: of tensorcircuit.cloud.abstraction.Task.resubmit:1
+msgid "resubmit the task"
msgstr ""
-#: of tensorcircuit.cons.plain_contractor:7
-msgid "The ``tn.Node`` after contraction"
+#: of tensorcircuit.cloud.abstraction.Task.resubmit:3
+msgid "the resubmitted task"
msgstr ""
-#: of tensorcircuit.cons.runtime_backend:1
-msgid "Context manager to set with-level runtime backend"
+#: of tensorcircuit.cloud.abstraction.Task.results:1
+msgid "get task results of the qjob"
msgstr ""
-#: of tensorcircuit.cons.runtime_backend:3
-#: tensorcircuit.cons.set_function_backend:3
-msgid "\"numpy\", \"tensorflow\", \"jax\", \"pytorch\", defaults to None"
+#: of tensorcircuit.cloud.abstraction.Task.results:3
+msgid "unsupported now, defaults to None, which is \"count_dict_bin\""
msgstr ""
-#: of tensorcircuit.cons.runtime_backend tensorcircuit.cons.runtime_contractor
-#: tensorcircuit.cons.runtime_dtype
-msgid "yield"
+#: of tensorcircuit.cloud.abstraction.Task.results:7
+msgid ""
+"whether blocked to wait until the result is returned, defaults to False, "
+"which raise error when the task is unfinished"
msgstr ""
-#: of tensorcircuit.cons.runtime_backend:5
-msgid "the backend object"
+#: of tensorcircuit.cloud.abstraction.Task.results:12
+msgid "whether enable readout error mitigation, defaults to False"
msgstr ""
-#: of tensorcircuit.cons.runtime_contractor:1
-msgid "Context manager to change with-levek contractor"
+#: of tensorcircuit.cloud.abstraction.Task.results:14
+msgid "option dict for ``ReadoutMit.cals_from_system``, defaults to None"
msgstr ""
-#: of tensorcircuit.cons.runtime_dtype:1
-msgid "Context manager to set with-level runtime dtype"
+#: of tensorcircuit.cloud.abstraction.Task.results:17
+msgid ""
+"if given, directly use the calibriation info on ``readout_mit``, defaults"
+" to None"
msgstr ""
-#: of tensorcircuit.cons.runtime_dtype:3
-msgid "\"complex64\" or \"complex128\", defaults to None (\"complex64\")"
+#: of tensorcircuit.cloud.abstraction.Task.results:20
+msgid "option dict for ``ReadoutMit.apply_correction``, defaults to None"
msgstr ""
-#: of tensorcircuit.cons.runtime_dtype:5
-msgid "complex dtype str and real dtype str"
-msgstr ""
-
-#: of tensorcircuit.cons.set_tensornetwork_backend:1
-msgid "To set the runtime backend of tensorcircuit."
+#: of tensorcircuit.cloud.abstraction.Task.results:22
+msgid "count dict results"
msgstr ""
-#: of tensorcircuit.cons.set_tensornetwork_backend:3
-msgid ""
-"Note: ``tc.set_backend`` and ``tc.cons.set_tensornetwork_backend`` are "
-"the same."
+#: of tensorcircuit.cloud.abstraction.Task.state:1
+msgid "Query the current task status"
msgstr ""
-#: of tensorcircuit.cons.set_tensornetwork_backend:27
-msgid ""
-"\"numpy\", \"tensorflow\", \"jax\", \"pytorch\". defaults to None, which "
-"gives the same behavior as "
-"``tensornetwork.backend_contextmanager.get_default_backend()``."
+#: of tensorcircuit.cloud.abstraction.TaskException:1
+msgid "Bases: :py:class:`~tensorcircuit.cloud.abstraction.TCException`"
msgstr ""
-#: of tensorcircuit.cons.set_tensornetwork_backend:30
-msgid "Whether the object should be set as global."
+#: of tensorcircuit.cloud.abstraction.TaskFailed:1
+#: tensorcircuit.cloud.abstraction.TaskUnfinished:1
+msgid "Bases: :py:class:`~tensorcircuit.cloud.abstraction.TaskException`"
msgstr ""
-#: of tensorcircuit.cons.set_function_backend:1
-msgid "Function decorator to set function-level runtime backend"
+#: ../../source/api/cloud/apis.rst:2
+msgid "tensorcircuit.cloud.apis"
msgstr ""
-#: of tensorcircuit.cons.set_function_backend:5
-msgid "Decorated function"
+#: of tensorcircuit.cloud.apis:1
+msgid "main entrypoints of cloud module"
msgstr ""
-#: of tensorcircuit.cons.set_function_contractor:1
-msgid "Function decorate to change function-level contractor"
+#: of tensorcircuit.cloud.apis.get_device:1
+#: tensorcircuit.cloud.apis.set_device:1
+msgid "set the default device"
msgstr ""
-#: of tensorcircuit.cons.set_function_dtype:1
-msgid "Function decorator to set function-level numerical dtype"
+#: of tensorcircuit.cloud.apis.get_device:3
+#: tensorcircuit.cloud.apis.set_device:3
+msgid "provider of the device, defaults to None"
msgstr ""
-#: of tensorcircuit.cons.set_function_dtype:3
-msgid "\"complex64\" or \"complex128\", defaults to None"
+#: of tensorcircuit.cloud.apis.get_device:5
+#: tensorcircuit.cloud.apis.set_device:5
+msgid "the device, defaults to None"
msgstr ""
-#: of tensorcircuit.cons.set_function_dtype:5
-msgid "The decorated function"
+#: of tensorcircuit.cloud.apis.get_device:7
+#: tensorcircuit.cloud.apis.set_device:7
+msgid ""
+"whether set, defaults to True, if False, equivalent to ``get_device``, "
+"defaults to True"
msgstr ""
-#: of tensorcircuit.cons.split_rules:1
-msgid "Obtain the direcionary of truncation rules"
+#: of tensorcircuit.cloud.apis.get_provider:1
+#: tensorcircuit.cloud.apis.set_provider:1
+msgid "set default provider for the program"
msgstr ""
-#: of tensorcircuit.cons.split_rules:3
-#: tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:11
-msgid "The maximum number of singular values to keep."
+#: of tensorcircuit.cloud.apis.get_provider:5
+#: tensorcircuit.cloud.apis.set_provider:5
+msgid "whether set, defaults to True, if False, equivalent to ``get_provider``"
msgstr ""
-#: of tensorcircuit.cons.split_rules:5
-#: tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:13
-msgid "The maximum allowed truncation error."
+#: of tensorcircuit.cloud.apis.get_task:1
+msgid ""
+"Get ``Task`` object from task string, the binding device can also be "
+"provided"
msgstr ""
-#: ../../source/api/densitymatrix.rst:2
-msgid "tensorcircuit.densitymatrix"
+#: of tensorcircuit.cloud.apis.get_task_details:1
+msgid "Get task details dict given task id"
msgstr ""
-#: of tensorcircuit.densitymatrix:1
-msgid "Quantum circuit class but with density matrix simulator"
+#: of tensorcircuit.cloud.apis.get_task_details:7
+msgid ""
+"whether make the returned dict more readable and more phythonic, defaults"
+" to False"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.__init__:1
-msgid "The density matrix simulator based on tensornetwork engine."
+#: of tensorcircuit.cloud.apis.get_token:1
+msgid ""
+"Get API token setted for given provider or device, if no device token "
+"saved, the corresponding provider tken is returned"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.__init__:3
-msgid "Number of qubits"
+#: of tensorcircuit.cloud.apis.list_devices:1
+msgid "List all devices under a provider"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.__init__:5
-msgid "if True, nothing initialized, only for internal use, defaults to False"
+#: of tensorcircuit.cloud.apis.list_properties:1
+msgid "List properties of a given device"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.__init__:7
-msgid "the state input for the circuit, defaults to None"
+#: of tensorcircuit.cloud.apis.list_properties:9
+msgid "Propeties dict"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.__init__:9
-msgid "QuVector for a MPS like initial pure state."
+#: of tensorcircuit.cloud.apis.list_providers:1
+msgid "list all cloud providers that tensorcircuit supports"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.__init__:11
-msgid "the density matrix input for the circuit, defaults to None"
+#: of tensorcircuit.cloud.apis.list_tasks:1
+msgid "List tasks based on given filters"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.__init__:13
-msgid "QuOperator for a MPO like initial density matrix."
+#: of tensorcircuit.cloud.apis.list_tasks:9
+msgid "list of task object that satisfy these filter criteria"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.densitymatrix:1
-msgid "Return the output density matrix of the circuit."
+#: of tensorcircuit.cloud.apis.resubmit_task:1
+msgid "Rerun the given task"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.densitymatrix:3
-msgid ""
-"check whether the final return is a legal density matrix, defaults to "
-"False"
+#: of tensorcircuit.cloud.apis.set_token:1
+msgid "Set API token for given provider or specifically to given device"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.densitymatrix:5
-msgid "whether to reuse previous results, defaults to True"
+#: of tensorcircuit.cloud.apis.set_token:3
+msgid "the API token, defaults to None"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.densitymatrix:7
-msgid "The output densitymatrix in 2D shape tensor form"
+#: of tensorcircuit.cloud.apis.set_token:9
+msgid "whether save on the disk, defaults to True"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.expectation:6
-msgid "whether contract the density matrix in advance, defaults to True"
+#: of tensorcircuit.cloud.apis.set_token:11
+msgid "if True, clear all token saved, defaults to False"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.get_dm_as_quoperator:1
-msgid ""
-"Get the representation of the output state in the form of ``QuOperator`` "
-"while maintaining the circuit uncomputed"
+#: of tensorcircuit.cloud.apis.submit_task:1
+msgid "submit task to the cloud platform, batch submission default enabled"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.get_dm_as_quoperator:4
-msgid "``QuOperator`` representation of the output state from the circuit"
+#: of tensorcircuit.cloud.apis.submit_task:5
+msgid ":py:meth:`tensorcircuit.cloud.tencent.submit_task`"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.to_circuit:1
+#: of tensorcircuit.cloud.apis.submit_task:13
msgid ""
-"convert into state simulator (current implementation ignores all noise "
-"channels)"
+"all necessary keywords arguments for task submission, see detailed API in"
+" each provider backend: 1. tencent - "
+":py:meth:`tensorcircuit.cloud.tencent.submit_task`"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.to_circuit:4
-msgid "kws to initialize circuit object, defaults to None"
+#: of tensorcircuit.cloud.apis.submit_task:17
+msgid "The task object"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.to_circuit:7
-msgid "Circuit with no noise"
+#: ../../source/api/cloud/config.rst:2
+msgid "tensorcircuit.cloud.config"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.wavefunction:1
-msgid ""
-"get the wavefunction of outputs, raise error if the final state is not "
-"purified [Experimental: the phase factor is not fixed for different "
-"backend]"
+#: ../../source/api/cloud/local.rst:2
+msgid "tensorcircuit.cloud.local"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit.wavefunction:5
-msgid "wavefunction vector"
+#: of tensorcircuit.cloud.local:1
+msgid "Cloud provider from local machine"
msgstr ""
-#: of tensorcircuit.densitymatrix.DMCircuit2:1
-msgid "Bases: :py:class:`tensorcircuit.densitymatrix.DMCircuit`"
+#: ../../source/api/cloud/quafu_provider.rst:2
+msgid "tensorcircuit.cloud.quafu_provider"
msgstr ""
-#: ../../source/api/experimental.rst:2
-msgid "tensorcircuit.experimental"
+#: ../../source/api/cloud/tencent.rst:2
+msgid "tensorcircuit.cloud.tencent"
msgstr ""
-#: of tensorcircuit.experimental:1
-msgid "Experimental features"
+#: of tensorcircuit.cloud.tencent:1
+msgid "Cloud provider from Tencent"
msgstr ""
-#: of tensorcircuit.experimental.hamiltonian_evol:1
+#: of tensorcircuit.cloud.tencent.submit_task:1
msgid ""
-"Fast implementation of static full Hamiltonian evolution (default as "
-"imaginary time)"
+"Submit task via tencent provider, we suggest to enable one of the "
+"compiling functionality: either in tc: frontend or in qos: backend. If "
+"both are enabled, try on your own risk, some qubit mapping may fail "
+"silently. If the user directly provide ``source`` or qiskit Circuit in "
+"``circuit``, the qubit mapping should be taken care of by the users."
msgstr ""
-#: of tensorcircuit.experimental.hamiltonian_evol:13
-msgid "result dynamics on ``tlist``"
+#: of tensorcircuit.cloud.tencent.submit_task:10
+msgid "language choice for ``source``, defaults to \"OPENQASM\""
msgstr ""
-#: of tensorcircuit.experimental.parameter_shift_grad:1
-msgid ""
-"similar to `grad` function but using parameter shift internally instead "
-"of AD, vmap is utilized for evaluation, so the speed is still ok"
+#: of tensorcircuit.cloud.tencent.submit_task:12
+msgid "number of measurement shots, defaults to 1024"
msgstr ""
-#: of tensorcircuit.experimental.parameter_shift_grad:4
-#: tensorcircuit.experimental.parameter_shift_grad_v2:6
-msgid "quantum function with weights in and expectation out"
+#: of tensorcircuit.cloud.tencent.submit_task:14
+msgid "submit task protocol version, defaults to \"1\""
msgstr ""
-#: of tensorcircuit.experimental.parameter_shift_grad:6
-#: tensorcircuit.experimental.parameter_shift_grad_v2:8
-msgid "label which args should be differentiated, defaults to 0"
+#: of tensorcircuit.cloud.tencent.submit_task:16
+msgid "priority for the task queue, defaults to 1"
msgstr ""
-#: of tensorcircuit.experimental.parameter_shift_grad:9
-#: tensorcircuit.experimental.parameter_shift_grad_v2:11
-msgid "whether jit the original function `f` at the beginning, defaults to False"
+#: of tensorcircuit.cloud.tencent.submit_task:18
+msgid "tensorcircuit or qiskit circuit object, defaults to None"
msgstr ""
-#: of tensorcircuit.experimental.parameter_shift_grad:12
-#: tensorcircuit.experimental.parameter_shift_grad_v2:14
-msgid ""
-"two floats for the delta shift on the numerator and dominator, defaults "
-"to (pi/2, 2) for parameter shift"
+#: of tensorcircuit.cloud.tencent.submit_task:20
+msgid "directly given circuit representation, defaults to None"
msgstr ""
-#: of tensorcircuit.experimental.parameter_shift_grad:15
-#: tensorcircuit.experimental.parameter_shift_grad_v2:17
-msgid "the grad function"
+#: of tensorcircuit.cloud.tencent.submit_task:22
+msgid "remarks on the task, defaults to None"
msgstr ""
-#: of tensorcircuit.experimental.parameter_shift_grad_v2:1
-msgid ""
-"similar to `grad` function but using parameter shift internally instead "
-"of AD, vmap is utilized for evaluation, v2 also supports random generator"
-" for finite measurememt shot, only jax backend is supported, since no "
-"vmap randomness is available in tensorflow"
+#: of tensorcircuit.cloud.tencent.submit_task:24
+msgid "whether compiling in tc via qiskit compiling system, defaults to False"
msgstr ""
-#: ../../source/api/gates.rst:2
-msgid "tensorcircuit.gates"
+#: of tensorcircuit.cloud.tencent.submit_task:27
+msgid "alias for the argument ``compiling``"
msgstr ""
-#: of tensorcircuit.gates:1
-msgid ""
-"Declarations of single-qubit and two-qubit gates and their corresponding "
-"matrix."
+#: of tensorcircuit.cloud.tencent.submit_task:29
+msgid "compiling options for qiskit ``transpile`` method, defaults to None"
msgstr ""
-#: of tensorcircuit.gates.Gate:1
-msgid "Bases: :py:class:`tensornetwork.network_components.Node`"
+#: of tensorcircuit.cloud.tencent.submit_task:32
+msgid "alias for the argument ``compiled_options``"
msgstr ""
-#: of tensorcircuit.gates.Gate:1
-msgid "Wrapper of tn.Node, quantum gate"
+#: of tensorcircuit.cloud.tencent.submit_task:34
+msgid "whether to insert swap if necessary in qos, defaults to True"
msgstr ""
-#: of tensornetwork.network_components.Node.__init__:1
-msgid "Create a node."
+#: of tensorcircuit.cloud.tencent.submit_task:36
+msgid "whether to compile the gate in qos, defaults to True"
msgstr ""
-#: of tensornetwork.network_components.Node.__init__:3
-msgid ""
-"The concrete that is represented by this node, or a `AbstractNode` "
-"object. If a tensor is passed, it can be be either a numpy array or the "
-"tensor-type of the used backend. If a `AbstractNode` is passed, the "
-"passed node has to have the same backend as given by `backend`."
+#: of tensorcircuit.cloud.tencent.submit_task:38
+msgid "whether to run an initial qubit mapping in qos, defaults to False"
msgstr ""
-#: of tensornetwork.network_components.Node.__init__:7
-msgid "Name of the node. Used primarily for debugging."
+#: of tensorcircuit.cloud.tencent.submit_task:41
+msgid ""
+"when dry run, only compiled circuit is returned (no real circuit "
+"execution), defaults to False"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.add_axis_names:3
-#: tensornetwork.network_components.Node.__init__:8
-msgid "List of names for each of the tensor's axes."
+#: of tensorcircuit.cloud.tencent.submit_task:44
+msgid "Task object or List of Task for batch submission"
msgstr ""
-#: of tensornetwork.network_components.Node.__init__:9
-msgid "The name of the backend or an instance of a `AbstractBackend`."
+#: ../../source/api/cloud/utils.rst:2
+msgid "tensorcircuit.cloud.utils"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.add_axis_names:5
-#: tensornetwork.network_components.Node.__init__:11
-msgid ""
-"If there is a repeated name in `axis_names` or if the length doesn't "
-"match the shape of the tensor."
+#: of tensorcircuit.cloud.utils:1
+msgid "utility functions for cloud connection"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.add_axis_names:1
-msgid "Add axis names to a Node."
+#: of tensorcircuit.cloud.utils.HttpStatusError:1
+msgid "Bases: :py:class:`Exception`"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.add_edge:1
-msgid "Add an edge to the node on the given axis."
+#: of tensorcircuit.cloud.utils.HttpStatusError:1
+msgid "Used when the return request has http code beyond 200"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.add_edge:3
-msgid "The edge to add."
+#: of tensorcircuit.cloud.utils.set_proxy:1
+msgid ""
+"str. format as \"http://user:passwd@host:port\" user passwd part can be "
+"omitted if not set. None for turning off the proxy."
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.add_edge:4
-msgid "The axis the edge points to."
+#: ../../source/api/cloud/wrapper.rst:2
+msgid "tensorcircuit.cloud.wrapper"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.add_edge:5
-msgid "If true, replace the existing edge with the new one."
+#: of tensorcircuit.cloud.wrapper:1
+msgid "higher level wrapper shortcut for submit_task"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.add_edge:7
-msgid "If the edge on axis is not dangling."
+#: of tensorcircuit.cloud.wrapper.batch_expectation_ps:1
+msgid ""
+"Unified interface to compute the Pauli string expectation lists or sums "
+"via simulation or on real qpu. Error mitigation, circuit compilation and "
+"Pauli string grouping are all built-in."
msgstr ""
-#: of tensornetwork.network_components.Node.from_serial_dict:1
-msgid "Return a node given a serialized dict representing it."
+#: of tensorcircuit.cloud.wrapper.batch_expectation_ps:4
+msgid ""
+"One line access to unlock the whole power or real quantum hardware on "
+"quantum cloud."
msgstr ""
-#: of tensornetwork.network_components.Node.from_serial_dict:3
-msgid "A python dict representing a serialized node."
+#: of tensorcircuit.cloud.wrapper.batch_expectation_ps:18
+msgid "The target circuit to compute expectation"
msgstr ""
-#: of tensornetwork.network_components.Node.from_serial_dict:5
-msgid "A node."
+#: of tensorcircuit.cloud.wrapper.batch_expectation_ps:20
+msgid ""
+"List of Pauli string list, eg. [[0, 1, 0], [2, 3, 3]] represents [X1, "
+"Y0Z1Z2]."
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.get_all_dangling:1
-msgid "Return the set of dangling edges connected to this node."
+#: of tensorcircuit.cloud.wrapper.batch_expectation_ps:22
+msgid ""
+"The device str or object for quantum cloud module, defaults to None, None"
+" is for analytical exact simulation"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.get_all_nondangling:1
-msgid "Return the set of nondangling edges connected to this node."
+#: of tensorcircuit.cloud.wrapper.batch_expectation_ps:25
+msgid ""
+"List of float to indicate the final return is the weighted sum of Pauli "
+"string expectations, e.g. [2., -0.3] represents the final results is 2* "
+"``pss`` [0]-0.3* ``pss`` [1] defaults to None, None indicate the list of "
+"expectations for ``pss`` are all returned"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.get_axis_number:1
-msgid "Get the axis number for a given axis name or value."
+#: of tensorcircuit.cloud.wrapper.batch_expectation_ps:29
+msgid "measurement shots for each expectation estimation, defaults to 8192"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.get_dimension:1
-msgid "Get the dimension of the given axis."
+#: of tensorcircuit.cloud.wrapper.batch_expectation_ps:31
+msgid "whether enable readout error mitigation for the result, defaults to True"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.get_dimension:3
-msgid "The axis of the underlying tensor."
+#: of tensorcircuit.cloud.wrapper.batch_expectation_ps:33
+msgid ""
+"List of Pauli string expectation or a weighted sum float for Pauli "
+"strings, depending on ``ws``"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.get_dimension:5
-msgid "The dimension of the given axis."
+#: of tensorcircuit.cloud.wrapper.sample_expectation_ps:1
+msgid ""
+"Deprecated, please use "
+":py:meth:`tensorcircuit.cloud.wrapper.batch_expectation_ps`."
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.get_dimension:7
-msgid "if axis isn't an int or if axis is too large or small."
+#: ../../source/api/compiler.rst:2
+msgid "tensorcircuit.compiler"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.get_rank:1
-msgid "Return rank of tensor represented by self."
+#: ../../source/api/compiler/composed_compiler.rst:2
+msgid "tensorcircuit.compiler.composed_compiler"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.reorder_axes:1
-msgid "Reorder axes of the node's tensor."
+#: of tensorcircuit.compiler.composed_compiler:1
+msgid "object oriented compiler pipeline"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.reorder_axes:3
-msgid "This will also update all of the node's edges."
+#: of tensorcircuit.compiler.composed_compiler.DefaultCompiler:1
+msgid "Bases: :py:class:`~tensorcircuit.compiler.composed_compiler.Compiler`"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.reorder_axes:5
-msgid "Permutation of the dimensions of the node's tensor."
+#: of tensorcircuit.compiler.composed_compiler.DefaultCompiler.__init__:1
+msgid ""
+"A fallback choice to compile circuit running on tencent quantum cloud "
+"with rz as native gate"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.reorder_axes:7
-#: tensornetwork.network_components.AbstractNode.reorder_edges:9
-msgid "This node post reordering."
+#: of tensorcircuit.compiler.composed_compiler.DefaultCompiler.__init__:3
+msgid ""
+"qiskit compiled options to be added options documented in "
+"`qiskit.transpile` method, to use tencent quantum cloud, "
+"`{\"coupling_map\": d.topology()}` is in general enough, where d is a "
+"device object, defaults to None, i.e. no qubit mapping is applied"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.reorder_axes:9
-#: tensornetwork.network_components.AbstractNode.reorder_edges:12
-msgid "If the Node has no tensor."
+#: ../../source/api/compiler/qiskit_compiler.rst:2
+msgid "tensorcircuit.compiler.qiskit_compiler"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.reorder_edges:1
-msgid "Reorder the edges for this given Node."
+#: of tensorcircuit.compiler.qiskit_compiler:1
+msgid "compiler interface via qiskit"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.reorder_edges:3
+#: of tensorcircuit.compiler.qiskit_compiler.qiskit_compile:1
msgid ""
-"This will reorder the node's edges and transpose the underlying tensor "
-"accordingly."
+"compile the circuit using ``qiskit.transpile`` method with some tricks "
+"and hacks"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.reorder_edges:6
-msgid "List of edges. The order in the list determines the new edge ordering."
+#: of tensorcircuit.compiler.qiskit_compiler.qiskit_compile:3
+msgid "circuit in ``tc.Circuit`` or ``qiskit.QuantumCircuit`` form"
msgstr ""
-#: of tensornetwork.network_components.AbstractNode.reorder_edges:11
-msgid ""
-"If either the list of edges is not the same as expected or if you try"
-" to reorder with a trace edge."
+#: of tensorcircuit.compiler.qiskit_compiler.qiskit_compile:5
+msgid "info for qubit mappings, defaults to None"
msgstr ""
-#: of tensornetwork.network_components.Node.to_serial_dict:1
-msgid "Return a serializable dict representing the node."
+#: of tensorcircuit.compiler.qiskit_compiler.qiskit_compile:7
+msgid "output circuit format, defaults to \"tc\""
msgstr ""
-#: of tensornetwork.network_components.Node.to_serial_dict:3
-msgid "Returns: A dict object."
+#: of tensorcircuit.compiler.qiskit_compiler.qiskit_compile:9
+msgid "``qiskit.transpile`` options in a dict, defaults to None"
msgstr ""
-#: of tensorcircuit.gates.GateVF:1
-msgid "Bases: :py:class:`tensorcircuit.gates.GateF`"
+#: of tensorcircuit.compiler.qiskit_compiler.qiskit_compile:11
+msgid "Tuple containing the output circuit and the qubit mapping info dict"
msgstr ""
-#: of tensorcircuit.gates.any_gate:1
-msgid "Note one should provide the gate with properly reshaped."
+#: ../../source/api/compiler/simple_compiler.rst:2
+msgid "tensorcircuit.compiler.simple_compiler"
msgstr ""
-#: of tensorcircuit.gates.any_gate:3
-msgid "corresponding gate"
+#: of tensorcircuit.compiler.simple_compiler:1
+msgid "Very simple transformations that qiskit may even fail or hard to control"
msgstr ""
-#: of tensorcircuit.gates.any_gate:5
-msgid "The name of the gate."
+#: ../../source/api/cons.rst:2
+msgid "tensorcircuit.cons"
msgstr ""
-#: of tensorcircuit.gates.any_gate:7
-msgid "the resulted gate"
+#: of tensorcircuit.cons:1
+msgid "Constants and setups"
msgstr ""
-#: of tensorcircuit.gates.num_to_tensor:1
-msgid "Convert the inputs to Tensor with specified dtype."
+#: of tensorcircuit.cons.get_contractor:1 tensorcircuit.cons.set_contractor:1
+msgid ""
+"To set runtime contractor of the tensornetwork for a better contraction "
+"path. For more information on the usage of contractor, please refer to "
+"independent tutorial."
msgstr ""
-#: of tensorcircuit.gates.num_to_tensor:35
-msgid "inputs"
+#: of tensorcircuit.cons.get_contractor:4 tensorcircuit.cons.set_contractor:4
+msgid ""
+"\"auto\", \"greedy\", \"branch\", \"plain\", \"tng\", \"custom\", "
+"\"custom_stateful\". defaults to None (\"auto\")"
msgstr ""
-#: of tensorcircuit.gates.num_to_tensor:37
-msgid "dtype of the output Tensors"
+#: of tensorcircuit.cons.get_contractor:6 tensorcircuit.cons.set_contractor:6
+msgid "Valid for \"custom\" or \"custom_stateful\" as method, defaults to None"
msgstr ""
-#: of tensorcircuit.gates.num_to_tensor:39
-msgid "List of Tensors"
+#: of tensorcircuit.cons.get_contractor:8 tensorcircuit.cons.set_contractor:8
+msgid ""
+"It is not very useful, as ``memory_limit`` leads to ``branch`` "
+"contraction instead of ``greedy`` which is rather slow, defaults to None"
msgstr ""
-#: of tensorcircuit.gates.bmatrix:1
-msgid "Returns a :math:`\\LaTeX` bmatrix."
+#: of tensorcircuit.cons.get_contractor:11 tensorcircuit.cons.set_contractor:11
+msgid "Tensornetwork version is too low to support some of the contractors."
msgstr ""
-#: of tensorcircuit.gates.bmatrix:13
-msgid "Formatted Display:"
+#: of tensorcircuit.cons.get_contractor:12 tensorcircuit.cons.set_contractor:12
+msgid "Unknown method options."
msgstr ""
-#: of tensorcircuit.gates.bmatrix:15
+#: of tensorcircuit.cons.get_contractor:13 tensorcircuit.cons.set_contractor:13
+msgid "The new tensornetwork with its contractor set."
+msgstr ""
+
+#: of tensorcircuit.cons.get_dtype:1 tensorcircuit.cons.set_dtype:1
+msgid "Set the global runtime numerical dtype of tensors."
+msgstr ""
+
+#: of tensorcircuit.cons.get_dtype:3 tensorcircuit.cons.set_dtype:3
msgid ""
-"\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j \\end{bmatrix}"
-"\n"
-"\n"
+"\"complex64\"/\"float32\" or \"complex128\"/\"float64\", defaults to "
+"None, which is equivalent to \"complex64\"."
msgstr ""
-#: of tensorcircuit.gates.bmatrix:18
-msgid "2D numpy array"
+#: of tensorcircuit.cons.get_dtype:6 tensorcircuit.cons.set_dtype:6
+msgid "complex dtype str and the corresponding real dtype str"
msgstr ""
-#: of tensorcircuit.gates.bmatrix:20
-msgid "ValueError(\"bmatrix can at most display two dimensions\")"
+#: of tensorcircuit.cons.plain_contractor:1
+msgid "The naive state-vector simulator contraction path."
msgstr ""
-#: of tensorcircuit.gates.bmatrix:21
-msgid ":math:`\\LaTeX`-formatted string for bmatrix of the array a"
+#: of tensorcircuit.cons.plain_contractor:3
+msgid "The list of ``tn.Node``."
msgstr ""
-#: of tensorcircuit.gates.cr_gate:1
-msgid ""
-"Controlled rotation gate. When the control qubit is 1, `rgate` is applied"
-" to the target qubit."
+#: of tensorcircuit.cons.plain_contractor:5
+msgid "The list of dangling node edges, defaults to be None."
msgstr ""
-#: of tensorcircuit.gates.cr_gate:3 tensorcircuit.gates.cr_gate:5
-#: tensorcircuit.gates.cr_gate:7 tensorcircuit.gates.exponential_gate:12
-#: tensorcircuit.gates.exponential_gate_unity:13
-#: tensorcircuit.gates.iswap_gate:12 tensorcircuit.gates.r_gate:9
-#: tensorcircuit.gates.r_gate:11 tensorcircuit.gates.r_gate:13
-#: tensorcircuit.gates.rgate_theoretical:8
-#: tensorcircuit.gates.rgate_theoretical:10
-#: tensorcircuit.gates.rgate_theoretical:12 tensorcircuit.gates.rx_gate:6
-#: tensorcircuit.gates.rxx_gate:13 tensorcircuit.gates.ry_gate:6
-#: tensorcircuit.gates.ryy_gate:13 tensorcircuit.gates.rz_gate:6
-#: tensorcircuit.gates.rzz_gate:13
-msgid "angle in radians"
+#: of tensorcircuit.cons.plain_contractor:7
+msgid "The ``tn.Node`` after contraction"
msgstr ""
-#: of tensorcircuit.gates.cr_gate:10
-msgid "CR Gate"
+#: of tensorcircuit.cons.runtime_backend:1
+msgid "Context manager to set with-level runtime backend"
msgstr ""
-#: of tensorcircuit.gates.exponential_gate_unity:1
-#: tensorcircuit.gates.rxx_gate:1 tensorcircuit.gates.ryy_gate:1
-#: tensorcircuit.gates.rzz_gate:1
-msgid ""
-"Faster exponential gate directly implemented based on RHS. Only works "
-"when :math:`U^2 = I` is an identity matrix."
+#: of tensorcircuit.cons.runtime_backend:3
+#: tensorcircuit.cons.set_function_backend:3
+msgid "\"numpy\", \"tensorflow\", \"jax\", \"pytorch\", defaults to None"
msgstr ""
-#: of tensorcircuit.gates.exponential_gate_unity:3
-#: tensorcircuit.gates.rxx_gate:3 tensorcircuit.gates.ryy_gate:3
-#: tensorcircuit.gates.rzz_gate:3
-msgid ""
-"\\textrm{exp}(U) &= e^{-j \\theta U} \\\\\n"
-" &= \\cos(\\theta) I - j \\sin(\\theta) U \\\\\n"
-"\n"
+#: of tensorcircuit.cons.runtime_backend tensorcircuit.cons.runtime_contractor
+#: tensorcircuit.cons.runtime_dtype
+msgid "yield"
msgstr ""
-#: of tensorcircuit.gates.exponential_gate:6
-#: tensorcircuit.gates.exponential_gate_unity:7
-#: tensorcircuit.gates.multicontrol_gate:7 tensorcircuit.gates.rxx_gate:7
-#: tensorcircuit.gates.ryy_gate:7 tensorcircuit.gates.rzz_gate:7
-msgid "input unitary :math:`U`"
+#: of tensorcircuit.cons.runtime_backend:5
+msgid "the backend object"
msgstr ""
-#: of tensorcircuit.gates.exponential_gate:8
-#: tensorcircuit.gates.exponential_gate:10
-#: tensorcircuit.gates.exponential_gate_unity:9
-#: tensorcircuit.gates.exponential_gate_unity:11 tensorcircuit.gates.rxx_gate:9
-#: tensorcircuit.gates.rxx_gate:11 tensorcircuit.gates.ryy_gate:9
-#: tensorcircuit.gates.ryy_gate:11 tensorcircuit.gates.rzz_gate:9
-#: tensorcircuit.gates.rzz_gate:11
-msgid "alias for the argument ``unitary``"
+#: of tensorcircuit.cons.runtime_contractor:1
+msgid "Context manager to change with-levek contractor"
msgstr ""
-#: of tensorcircuit.gates.exponential_gate_unity:15
-#: tensorcircuit.gates.rxx_gate:15 tensorcircuit.gates.ryy_gate:15
-#: tensorcircuit.gates.rzz_gate:15
-msgid "if True, the angel theta is mutiplied by 1/2, defaults to False"
+#: of tensorcircuit.cons.runtime_dtype:1
+msgid "Context manager to set with-level runtime dtype"
msgstr ""
-#: of tensorcircuit.gates.exponential_gate:14
-#: tensorcircuit.gates.exponential_gate_unity:18
-#: tensorcircuit.gates.rxx_gate:18 tensorcircuit.gates.ryy_gate:18
-#: tensorcircuit.gates.rzz_gate:18
-msgid "suffix of Gate name"
+#: of tensorcircuit.cons.runtime_dtype:3
+msgid "\"complex64\" or \"complex128\", defaults to None (\"complex64\")"
msgstr ""
-#: of tensorcircuit.gates.exponential_gate:15
-#: tensorcircuit.gates.exponential_gate_unity:20
-#: tensorcircuit.gates.rxx_gate:20 tensorcircuit.gates.ryy_gate:20
-#: tensorcircuit.gates.rzz_gate:20
-msgid "Exponential Gate"
+#: of tensorcircuit.cons.runtime_dtype:5
+msgid "complex dtype str and real dtype str"
msgstr ""
-#: of tensorcircuit.gates.exponential_gate:1
-msgid "Exponential gate."
+#: of tensorcircuit.cons.set_tensornetwork_backend:1
+msgid "To set the runtime backend of tensorcircuit."
msgstr ""
-#: of tensorcircuit.gates.exponential_gate:3
+#: of tensorcircuit.cons.set_tensornetwork_backend:3
msgid ""
-"\\textrm{exp}(U) = e^{-j \\theta U}\n"
-"\n"
+"Note: ``tc.set_backend`` and ``tc.cons.set_tensornetwork_backend`` are "
+"the same."
msgstr ""
-#: of tensorcircuit.gates.get_u_parameter:1
-msgid "From the single qubit unitary to infer three angles of IBMUgate,"
+#: of tensorcircuit.cons.set_tensornetwork_backend:27
+msgid ""
+"\"numpy\", \"tensorflow\", \"jax\", \"pytorch\". defaults to None, which "
+"gives the same behavior as "
+"``tensornetwork.backend_contextmanager.get_default_backend()``."
msgstr ""
-#: of tensorcircuit.gates.get_u_parameter:3
-msgid "numpy array, no backend agnostic version for now"
+#: of tensorcircuit.cons.set_tensornetwork_backend:30
+msgid "Whether the object should be set as global."
msgstr ""
-#: of tensorcircuit.gates.get_u_parameter:5
-msgid "theta, phi, lbd"
+#: of tensorcircuit.cons.set_function_backend:1
+msgid "Function decorator to set function-level runtime backend"
msgstr ""
-#: of tensorcircuit.gates.iswap_gate:1
-msgid "iSwap gate."
+#: of tensorcircuit.cons.set_function_backend:5
+msgid "Decorated function"
msgstr ""
-#: of tensorcircuit.gates.iswap_gate:3
-msgid ""
-"\\textrm{iSwap}(\\theta) =\n"
-"\\begin{pmatrix}\n"
-" 1 & 0 & 0 & 0\\\\\n"
-" 0 & \\cos(\\frac{\\pi}{2} \\theta ) & j \\sin(\\frac{\\pi}{2} \\theta"
-" ) & 0\\\\\n"
-" 0 & j \\sin(\\frac{\\pi}{2} \\theta ) & \\cos(\\frac{\\pi}{2} \\theta"
-" ) & 0\\\\\n"
-" 0 & 0 & 0 & 1\\\\\n"
-"\\end{pmatrix}\n"
-"\n"
+#: of tensorcircuit.cons.set_function_contractor:1
+msgid "Function decorate to change function-level contractor"
msgstr ""
-#: of tensorcircuit.gates.iswap_gate:14
-msgid "iSwap Gate"
+#: of tensorcircuit.cons.set_function_dtype:1
+msgid "Function decorator to set function-level numerical dtype"
msgstr ""
-#: of tensorcircuit.gates.matrix_for_gate:1
-msgid "Convert Gate to numpy array."
+#: of tensorcircuit.cons.set_function_dtype:3
+msgid "\"complex64\" or \"complex128\", defaults to None"
msgstr ""
-#: of tensorcircuit.gates.matrix_for_gate:10
-msgid "input Gate"
+#: of tensorcircuit.cons.set_function_dtype:5
+msgid "The decorated function"
msgstr ""
-#: of tensorcircuit.gates.matrix_for_gate:12
-msgid "Corresponding Tensor"
+#: of tensorcircuit.cons.split_rules:1
+msgid "Obtain the direcionary of truncation rules"
msgstr ""
-#: of tensorcircuit.gates.meta_gate:1
-msgid ""
-"Inner helper function to generate gate functions, such as ``z()`` from "
-"``_z_matrix``"
+#: of tensorcircuit.cons.split_rules:3
+#: tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:11
+msgid "The maximum number of singular values to keep."
msgstr ""
-#: of tensorcircuit.gates.multicontrol_gate:1
-msgid ""
-"Multicontrol gate. If the control qubits equal to ``ctrl``, :math:`U` is "
-"applied to the target qubits."
+#: of tensorcircuit.cons.split_rules:5
+#: tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:13
+msgid "The maximum allowed truncation error."
msgstr ""
-#: of tensorcircuit.gates.multicontrol_gate:5
-msgid ""
-"E.g., ``multicontrol_gate(tc.gates._zz_matrix, [1, 0, 1])`` returns a "
-"gate of 5 qubits,"
+#: ../../source/api/densitymatrix.rst:2
+msgid "tensorcircuit.densitymatrix"
msgstr ""
-#: of tensorcircuit.gates.multicontrol_gate:4
-msgid ""
-"where the last 2 qubits are applied :math:`ZZ` gate, if the first 3 "
-"qubits are :math:`\\ket{101}`."
+#: of tensorcircuit.densitymatrix:1
+msgid "Quantum circuit class but with density matrix simulator"
msgstr ""
-#: of tensorcircuit.gates.multicontrol_gate:9
-msgid "control bit sequence"
+#: of tensorcircuit.densitymatrix.DMCircuit.__init__:1
+msgid "The density matrix simulator based on tensornetwork engine."
msgstr ""
-#: of tensorcircuit.gates.multicontrol_gate:11
-msgid "Multicontrol Gate"
+#: of tensorcircuit.densitymatrix.DMCircuit.__init__:3
+msgid "Number of qubits"
msgstr ""
-#: of tensorcircuit.gates.phase_gate:1
-msgid "The phase gate"
+#: of tensorcircuit.densitymatrix.DMCircuit.__init__:5
+msgid "if True, nothing initialized, only for internal use, defaults to False"
msgstr ""
-#: of tensorcircuit.gates.phase_gate:3
-msgid ""
-"\\textrm{phase}(\\theta) =\n"
-"\\begin{pmatrix}\n"
-" 1 & 0 \\\\\n"
-" 0 & e^{i\\theta} \\\\\n"
-"\\end{pmatrix}\n"
-"\n"
+#: of tensorcircuit.densitymatrix.DMCircuit.__init__:7
+msgid "the state input for the circuit, defaults to None"
msgstr ""
-#: of tensorcircuit.gates.phase_gate:10
-msgid "angle in radians, defaults to 0"
+#: of tensorcircuit.densitymatrix.DMCircuit.__init__:9
+msgid "QuVector for a MPS like initial pure state."
msgstr ""
-#: of tensorcircuit.gates.phase_gate:12
-msgid "phase gate"
+#: of tensorcircuit.densitymatrix.DMCircuit.__init__:11
+msgid "the density matrix input for the circuit, defaults to None"
msgstr ""
-#: of tensorcircuit.gates.r_gate:1
-msgid "General single qubit rotation gate"
+#: of tensorcircuit.densitymatrix.DMCircuit.__init__:13
+msgid "QuOperator for a MPO like initial density matrix."
msgstr ""
-#: of tensorcircuit.gates.r_gate:3
+#: of tensorcircuit.densitymatrix.DMCircuit.densitymatrix:1
+msgid "Return the output density matrix of the circuit."
+msgstr ""
+
+#: of tensorcircuit.densitymatrix.DMCircuit.densitymatrix:3
msgid ""
-"R(\\theta, \\alpha, \\phi) = j \\cos(\\theta) I\n"
-"- j \\cos(\\phi) \\sin(\\alpha) \\sin(\\theta) X\n"
-"- j \\sin(\\phi) \\sin(\\alpha) \\sin(\\theta) Y\n"
-"- j \\sin(\\theta) \\cos(\\alpha) Z\n"
-"\n"
-msgstr ""
-
-#: of tensorcircuit.gates.r_gate:16
-msgid "R Gate"
+"check whether the final return is a legal density matrix, defaults to "
+"False"
msgstr ""
-#: of tensorcircuit.gates.random_single_qubit_gate:1
-msgid "Random single qubit gate described in https://arxiv.org/abs/2002.07730."
+#: of tensorcircuit.densitymatrix.DMCircuit.densitymatrix:5
+msgid "whether to reuse previous results, defaults to True"
msgstr ""
-#: of tensorcircuit.gates.random_single_qubit_gate:3
-msgid "A random single-qubit gate"
+#: of tensorcircuit.densitymatrix.DMCircuit.densitymatrix:7
+msgid "The output densitymatrix in 2D shape tensor form"
msgstr ""
-#: of tensorcircuit.gates.random_two_qubit_gate:1
-msgid "Returns a random two-qubit gate."
+#: of tensorcircuit.densitymatrix.DMCircuit.expectation:6
+msgid "whether contract the density matrix in advance, defaults to True"
msgstr ""
-#: of tensorcircuit.gates.random_two_qubit_gate:3
-msgid "A random two-qubit gate"
+#: of tensorcircuit.densitymatrix.DMCircuit.get_dm_as_quoperator:1
+msgid ""
+"Get the representation of the output state in the form of ``QuOperator`` "
+"while maintaining the circuit uncomputed"
msgstr ""
-#: of tensorcircuit.gates.rgate_theoretical:1
-msgid ""
-"Rotation gate implemented by matrix exponential. The output is the same "
-"as `rgate`."
+#: of tensorcircuit.densitymatrix.DMCircuit.get_dm_as_quoperator:4
+msgid "``QuOperator`` representation of the output state from the circuit"
msgstr ""
-#: of tensorcircuit.gates.rgate_theoretical:3
+#: of tensorcircuit.densitymatrix.DMCircuit.to_circuit:1
msgid ""
-"R(\\theta, \\alpha, \\phi) = e^{-j \\theta \\left[\\sin(\\alpha) "
-"\\cos(\\phi) X\n"
-" + \\sin(\\alpha) \\sin(\\phi) "
-"Y\n"
-" + \\cos(\\alpha) Z\\right]}\n"
-"\n"
+"convert into state simulator (current implementation ignores all noise "
+"channels)"
msgstr ""
-#: of tensorcircuit.gates.rgate_theoretical:14
-msgid "Rotation Gate"
+#: of tensorcircuit.densitymatrix.DMCircuit.to_circuit:4
+msgid "kws to initialize circuit object, defaults to None"
msgstr ""
-#: of tensorcircuit.gates.rx_gate:1
-msgid "Rotation gate along :math:`x` axis."
+#: of tensorcircuit.densitymatrix.DMCircuit.to_circuit:7
+msgid "Circuit with no noise"
msgstr ""
-#: of tensorcircuit.gates.rx_gate:3
+#: of tensorcircuit.densitymatrix.DMCircuit.wavefunction:1
msgid ""
-"RX(\\theta) = e^{-j\\frac{\\theta}{2}X}\n"
-"\n"
+"get the wavefunction of outputs, raise error if the final state is not "
+"purified [Experimental: the phase factor is not fixed for different "
+"backend]"
msgstr ""
-#: of tensorcircuit.gates.rx_gate:8
-msgid "RX Gate"
+#: of tensorcircuit.densitymatrix.DMCircuit.wavefunction:5
+msgid "wavefunction vector"
msgstr ""
-#: of tensorcircuit.gates.ry_gate:1
-msgid "Rotation gate along :math:`y` axis."
+#: of tensorcircuit.densitymatrix.DMCircuit2:1
+msgid "Bases: :py:class:`~tensorcircuit.densitymatrix.DMCircuit`"
msgstr ""
-#: of tensorcircuit.gates.ry_gate:3
-msgid ""
-"RY(\\theta) = e^{-j\\frac{\\theta}{2}Y}\n"
-"\n"
+#: ../../source/api/experimental.rst:2
+msgid "tensorcircuit.experimental"
msgstr ""
-#: of tensorcircuit.gates.ry_gate:8
-msgid "RY Gate"
+#: of tensorcircuit.experimental:1
+msgid "Experimental features"
msgstr ""
-#: of tensorcircuit.gates.rz_gate:1
-msgid "Rotation gate along :math:`z` axis."
+#: of tensorcircuit.experimental.evol_global:1
+msgid ""
+"ode evolution of time dependent Hamiltonian on circuit of all qubits "
+"[only jax backend support for now]"
msgstr ""
-#: of tensorcircuit.gates.rz_gate:3
+#: of tensorcircuit.experimental.evol_global:6
msgid ""
-"RZ(\\theta) = e^{-j\\frac{\\theta}{2}Z}\n"
-"\n"
+"h_fun should return a **SPARSE** Hamiltonian matrix with input arguments "
+"time and *args"
msgstr ""
-#: of tensorcircuit.gates.rz_gate:8
-msgid "RZ Gate"
+#: of tensorcircuit.experimental.evol_local:1
+msgid ""
+"ode evolution of time dependent Hamiltonian on circuit of given indices "
+"[only jax backend support for now]"
msgstr ""
-#: of tensorcircuit.gates.u_gate:1
+#: of tensorcircuit.experimental.evol_local:8
msgid ""
-"IBMQ U gate following the converntion of OpenQASM3.0. See `OpenQASM doc "
-"`_"
+"h_fun should return a dense Hamiltonian matrix with input arguments time "
+"and *args"
msgstr ""
-#: of tensorcircuit.gates.u_gate:4
+#: of tensorcircuit.experimental.evol_local:11
+msgid "evolution time"
+msgstr ""
+
+#: of tensorcircuit.experimental.hamiltonian_evol:1
msgid ""
-"\\begin{split}U(\\theta,\\phi,\\lambda) := \\left(\\begin{array}{cc}\n"
-"\\cos(\\theta/2) & -e^{i\\lambda}\\sin(\\theta/2) \\\\\n"
-"e^{i\\phi}\\sin(\\theta/2) & e^{i(\\phi+\\lambda)}\\cos(\\theta/2) "
-"\\end{array}\\right).\\end{split}"
+"Fast implementation of static full Hamiltonian evolution (default as "
+"imaginary time)"
msgstr ""
-#: of tensorcircuit.gates.u_gate:10 tensorcircuit.gates.u_gate:12
-#: tensorcircuit.gates.u_gate:14
-msgid "_description_, defaults to 0"
+#: of tensorcircuit.experimental.hamiltonian_evol:13
+msgid "result dynamics on ``tlist``"
msgstr ""
-#: ../../source/api/interfaces.rst:2
-msgid "tensorcircuit.interfaces"
+#: of tensorcircuit.experimental.parameter_shift_grad:1
+msgid ""
+"similar to `grad` function but using parameter shift internally instead "
+"of AD, vmap is utilized for evaluation, so the speed is still ok"
msgstr ""
-#: ../../source/api/interfaces/numpy.rst:2
-msgid "tensorcircuit.interfaces.numpy"
+#: of tensorcircuit.experimental.parameter_shift_grad:4
+#: tensorcircuit.experimental.parameter_shift_grad_v2:6
+msgid "quantum function with weights in and expectation out"
msgstr ""
-#: of tensorcircuit.interfaces.numpy:1
-msgid "Interface wraps quantum function as a numpy function"
+#: of tensorcircuit.experimental.parameter_shift_grad:6
+#: tensorcircuit.experimental.parameter_shift_grad_v2:8
+msgid "label which args should be differentiated, defaults to 0"
msgstr ""
-#: of tensorcircuit.interfaces.numpy.numpy_interface:1
-msgid "Convert ``fun`` on ML backend into a numpy function"
+#: of tensorcircuit.experimental.parameter_shift_grad:9
+#: tensorcircuit.experimental.parameter_shift_grad_v2:11
+msgid "whether jit the original function `f` at the beginning, defaults to False"
msgstr ""
-#: of tensorcircuit.interfaces.numpy.numpy_interface:23
-msgid "The quantum function"
+#: of tensorcircuit.experimental.parameter_shift_grad:12
+#: tensorcircuit.experimental.parameter_shift_grad_v2:14
+msgid ""
+"two floats for the delta shift on the numerator and dominator, defaults "
+"to (pi/2, 2) for parameter shift"
msgstr ""
-#: of tensorcircuit.interfaces.numpy.numpy_interface:25
-#: tensorcircuit.interfaces.scipy.scipy_optimize_interface:39
-msgid "whether to jit ``fun``, defaults to True"
+#: of tensorcircuit.experimental.parameter_shift_grad:15
+#: tensorcircuit.experimental.parameter_shift_grad_v2:17
+msgid "the grad function"
msgstr ""
-#: of tensorcircuit.interfaces.numpy.numpy_interface:27
-msgid "The numpy interface compatible version of ``fun``"
+#: of tensorcircuit.experimental.parameter_shift_grad_v2:1
+msgid ""
+"similar to `grad` function but using parameter shift internally instead "
+"of AD, vmap is utilized for evaluation, v2 also supports random generator"
+" for finite measurememt shot, only jax backend is supported, since no "
+"vmap randomness is available in tensorflow"
msgstr ""
-#: ../../source/api/interfaces/scipy.rst:2
-msgid "tensorcircuit.interfaces.scipy"
+#: ../../source/api/gates.rst:2
+msgid "tensorcircuit.gates"
msgstr ""
-#: of tensorcircuit.interfaces.scipy:1
-msgid "Interface wraps quantum function as a scipy function for optimization"
+#: of tensorcircuit.gates:1
+msgid ""
+"Declarations of single-qubit and two-qubit gates and their corresponding "
+"matrix."
msgstr ""
-#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:1
-msgid "Convert ``fun`` into a scipy optimize interface compatible version"
+#: of tensorcircuit.gates.Gate:1
+msgid "Bases: :py:class:`~tensornetwork.network_components.Node`"
msgstr ""
-#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:35
-msgid "The quantum function with scalar out that to be optimized"
+#: of tensorcircuit.gates.Gate:1
+msgid "Wrapper of tn.Node, quantum gate"
msgstr ""
-#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:37
-msgid "the shape of parameters that ``fun`` accepts, defaults to None"
+#: of tensornetwork.network_components.Node.__init__:1
+msgid "Create a node."
msgstr ""
-#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:41
+#: of tensornetwork.network_components.Node.__init__:3
msgid ""
-"whether using gradient-based or gradient free scipy optimize interface, "
-"defaults to True"
+"The concrete that is represented by this node, or a `AbstractNode` "
+"object. If a tensor is passed, it can be be either a numpy array or the "
+"tensor-type of the used backend. If a `AbstractNode` is passed, the "
+"passed node has to have the same backend as given by `backend`."
msgstr ""
-#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:44
-msgid "The scipy interface compatible version of ``fun``"
+#: of tensornetwork.network_components.Node.__init__:7
+msgid "Name of the node. Used primarily for debugging."
msgstr ""
-#: ../../source/api/interfaces/tensorflow.rst:2
-msgid "tensorcircuit.interfaces.tensorflow"
+#: of tensornetwork.network_components.AbstractNode.add_axis_names:3
+#: tensornetwork.network_components.Node.__init__:8
+msgid "List of names for each of the tensor's axes."
msgstr ""
-#: of tensorcircuit.interfaces.tensorflow:1
-msgid "Interface wraps quantum function as a tensorflow function"
+#: of tensornetwork.network_components.Node.__init__:9
+msgid "The name of the backend or an instance of a `AbstractBackend`."
msgstr ""
-#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:1
+#: of tensornetwork.network_components.AbstractNode.add_axis_names:5
+#: tensornetwork.network_components.Node.__init__:11
msgid ""
-"Wrap a quantum function on different ML backend with a tensorflow "
-"interface."
+"If there is a repeated name in `axis_names` or if the length doesn't "
+"match the shape of the tensor."
msgstr ""
-#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:22
-#: tensorcircuit.interfaces.torch.torch_interface:28
-msgid "The quantum function with tensor in and tensor out"
+#: of tensornetwork.network_components.AbstractNode.add_axis_names:1
+msgid "Add axis names to a Node."
msgstr ""
-#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:24
-msgid "output tf dtype or in str"
+#: of tensornetwork.network_components.AbstractNode.add_edge:1
+msgid "Add an edge to the node on the given axis."
msgstr ""
-#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:26
-#: tensorcircuit.interfaces.torch.torch_interface:30
-msgid "whether to jit ``fun``, defaults to False"
+#: of tensornetwork.network_components.AbstractNode.add_edge:3
+msgid "The edge to add."
msgstr ""
-#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:28
-#: tensorcircuit.interfaces.torch.torch_interface:32
-msgid "whether transform tensor backend via dlpack, defaults to False"
+#: of tensornetwork.network_components.AbstractNode.add_edge:4
+msgid "The axis the edge points to."
msgstr ""
-#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:30
-#: tensorcircuit.interfaces.torch.torch_interface:34
-msgid ""
-"The same quantum function but now with torch tensor in and torch tensor "
-"out while AD is also supported"
+#: of tensornetwork.network_components.AbstractNode.add_edge:5
+msgid "If true, replace the existing edge with the new one."
msgstr ""
-#: ../../source/api/interfaces/tensortrans.rst:2
-msgid "tensorcircuit.interfaces.tensortrans"
+#: of tensornetwork.network_components.AbstractNode.add_edge:7
+msgid "If the edge on axis is not dangling."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans:1
-msgid "general function for interfaces transformation"
+#: of tensornetwork.network_components.Node.from_serial_dict:1
+msgid "Return a node given a serialized dict representing it."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:1
-msgid ""
-"Function decorator that automatically convert inputs to tensors on "
-"current backend"
+#: of tensornetwork.network_components.Node.from_serial_dict:3
+msgid "A python dict representing a serialized node."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:63
-msgid ""
-"the wrapped function whose arguments in ``argnums`` position are expected"
-" to be tensor format"
+#: of tensornetwork.network_components.Node.from_serial_dict:5
+msgid "A node."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:66
-msgid "position of args under the auto conversion, defaults to 0"
+#: of tensornetwork.network_components.AbstractNode.get_all_dangling:1
+msgid "Return the set of dangling edges connected to this node."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:68
-msgid ""
-"try reshape all input tensor as matrix with shape rank 2, defaults to "
-"False"
+#: of tensornetwork.network_components.AbstractNode.get_all_nondangling:1
+msgid "Return the set of nondangling edges connected to this node."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:71
-msgid "convert ``Gate`` to tensor, defaults to False"
+#: of tensornetwork.network_components.AbstractNode.get_axis_number:1
+msgid "Get the axis number for a given axis name or value."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:73
-msgid "reshape tensor from ``Gate`` input as matrix, defaults to True"
-msgstr ""
-
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:75
-msgid "convert ``QuOperator`` to tensor, defaults to False"
+#: of tensornetwork.network_components.AbstractNode.get_dimension:1
+msgid "Get the dimension of the given axis."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:77
-msgid "reshape tensor from ``QuOperator`` input as matrix, defaults to True"
+#: of tensornetwork.network_components.AbstractNode.get_dimension:3
+msgid "The axis of the underlying tensor."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:79
-msgid "whether cast to backend dtype, defaults to True"
+#: of tensornetwork.network_components.AbstractNode.get_dimension:5
+msgid "The dimension of the given axis."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:81
-msgid "The wrapped function"
+#: of tensornetwork.network_components.AbstractNode.get_dimension:7
+msgid "if axis isn't an int or if axis is too large or small."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.general_args_to_numpy:1
-msgid "Given a pytree, get the corresponding numpy array pytree"
+#: of tensornetwork.network_components.AbstractNode.get_rank:1
+msgid "Return rank of tensor represented by self."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.general_args_to_numpy:3
-msgid "pytree"
+#: of tensornetwork.network_components.AbstractNode.reorder_axes:1
+msgid "Reorder axes of the node's tensor."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.general_args_to_numpy:5
-msgid "the same format pytree with all tensor replaced by numpy array"
+#: of tensornetwork.network_components.AbstractNode.reorder_axes:3
+msgid "This will also update all of the node's edges."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:1
-msgid "Given a pytree of numpy arrays, get the corresponding tensor pytree"
+#: of tensornetwork.network_components.AbstractNode.reorder_axes:5
+msgid "Permutation of the dimensions of the node's tensor."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:3
-msgid "pytree of numpy arrays"
+#: of tensornetwork.network_components.AbstractNode.reorder_axes:7
+#: tensornetwork.network_components.AbstractNode.reorder_edges:9
+msgid "This node post reordering."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:5
-msgid "str of str of the same pytree shape as args, defaults to None"
+#: of tensornetwork.network_components.AbstractNode.reorder_axes:9
+#: tensornetwork.network_components.AbstractNode.reorder_edges:12
+msgid "If the Node has no tensor."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:7
-msgid ""
-"str or backend object, defaults to None, indicating the current default "
-"backend"
+#: of tensornetwork.network_components.AbstractNode.reorder_edges:1
+msgid "Reorder the edges for this given Node."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:10
+#: of tensornetwork.network_components.AbstractNode.reorder_edges:3
msgid ""
-"the same format pytree with all numpy array replaced by the tensors in "
-"the target backend"
+"This will reorder the node's edges and transpose the underlying tensor "
+"accordingly."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.which_backend:1
-msgid "Given a tensor ``a``, return the corresponding backend"
+#: of tensornetwork.network_components.AbstractNode.reorder_edges:6
+msgid "List of edges. The order in the list determines the new edge ordering."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.which_backend:5
+#: of tensornetwork.network_components.AbstractNode.reorder_edges:11
msgid ""
-"if true, return backend object, if false, return backend str, defaults to"
-" True"
+"If either the list of edges is not the same as expected or if you try"
+" to reorder with a trace edge."
msgstr ""
-#: of tensorcircuit.interfaces.tensortrans.which_backend:8
-msgid "the backend object or backend str"
+#: of tensornetwork.network_components.Node.to_serial_dict:1
+msgid "Return a serializable dict representing the node."
msgstr ""
-#: ../../source/api/interfaces/torch.rst:2
-msgid "tensorcircuit.interfaces.torch"
+#: of tensornetwork.network_components.Node.to_serial_dict:3
+msgid "Returns: A dict object."
msgstr ""
-#: of tensorcircuit.interfaces.torch:1
-msgid "Interface wraps quantum function as a torch function"
+#: of tensorcircuit.gates.GateVF:1
+msgid "Bases: :py:class:`~tensorcircuit.gates.GateF`"
msgstr ""
-#: of tensorcircuit.interfaces.torch.torch_interface:1
-msgid "Wrap a quantum function on different ML backend with a pytorch interface."
+#: of tensorcircuit.gates.any_gate:1
+msgid "Note one should provide the gate with properly reshaped."
msgstr ""
-#: ../../source/api/keras.rst:2
-msgid "tensorcircuit.keras"
+#: of tensorcircuit.gates.any_gate:3
+msgid "corresponding gate"
msgstr ""
-#: of tensorcircuit.keras:1
-msgid "Keras layer for tc quantum function"
+#: of tensorcircuit.gates.any_gate:5
+msgid "The name of the gate."
msgstr ""
-#: of tensorcircuit.keras.QuantumLayer.__init__:1
-msgid ""
-"`QuantumLayer` wraps the quantum function `f` as a `keras.Layer` so that "
-"tensorcircuit is better integrated with tensorflow. Note that the input "
-"of the layer can be tensors or even list/dict of tensors."
+#: of tensorcircuit.gates.any_gate:7
+msgid "the resulted gate"
msgstr ""
-#: of tensorcircuit.keras.QuantumLayer.__init__:5
-msgid "Callabel function."
+#: of tensorcircuit.gates.num_to_tensor:1
+msgid "Convert the inputs to Tensor with specified dtype."
msgstr ""
-#: of tensorcircuit.keras.QuantumLayer.__init__:7
-msgid "The shape of the weights."
+#: of tensorcircuit.gates.num_to_tensor:35
+msgid "inputs"
msgstr ""
-#: of tensorcircuit.keras.QuantumLayer.__init__:9
-msgid "The initializer of the weights, defaults to \"glorot_uniform\""
+#: of tensorcircuit.gates.num_to_tensor:37
+msgid "dtype of the output Tensors"
msgstr ""
-#: of tensorcircuit.keras.load_func:1
-msgid ""
-"Load function from the files in the ``tf.savedmodel`` format. We can load"
-" several functions at the same time, as they can be the same function of "
-"different input shapes."
+#: of tensorcircuit.gates.num_to_tensor:39
+msgid "List of Tensors"
msgstr ""
-#: of tensorcircuit.keras.load_func:24
-msgid ""
-"The fallback function when all functions loaded are failed, defaults to "
-"None"
+#: of tensorcircuit.gates.bmatrix:1
+msgid "Returns a :math:`\\LaTeX` bmatrix."
msgstr ""
-#: of tensorcircuit.keras.load_func:26
-msgid ""
-"When there is not legal loaded function of the input shape and no "
-"fallback callable."
+#: of tensorcircuit.gates.bmatrix:13
+msgid "Formatted Display:"
msgstr ""
-#: of tensorcircuit.keras.load_func:27
+#: of tensorcircuit.gates.bmatrix:15
msgid ""
-"A function that tries all loaded function against the input until the "
-"first success one."
+"\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j \\end{bmatrix}"
+"\n"
+"\n"
msgstr ""
-#: of tensorcircuit.keras.output_asis_loss:1
-msgid "The keras loss function that directly taking the model output as the loss."
+#: of tensorcircuit.gates.bmatrix:18
+msgid "2D numpy array"
msgstr ""
-#: of tensorcircuit.keras.output_asis_loss:3
-msgid "Ignoring this parameter."
+#: of tensorcircuit.gates.bmatrix:20
+msgid "ValueError(\"bmatrix can at most display two dimensions\")"
msgstr ""
-#: of tensorcircuit.keras.output_asis_loss:5
-msgid "Model output."
+#: of tensorcircuit.gates.bmatrix:21
+msgid ":math:`\\LaTeX`-formatted string for bmatrix of the array a"
msgstr ""
-#: of tensorcircuit.keras.output_asis_loss:7
-msgid "Model output, which is y_pred."
+#: of tensorcircuit.gates.cr_gate:1
+msgid ""
+"Controlled rotation gate. When the control qubit is 1, `rgate` is applied"
+" to the target qubit."
msgstr ""
-#: of tensorcircuit.keras.save_func:1
-msgid "Save tf function in the file (``tf.savedmodel`` format)."
+#: of tensorcircuit.gates.cr_gate:3 tensorcircuit.gates.cr_gate:5
+#: tensorcircuit.gates.cr_gate:7 tensorcircuit.gates.exponential_gate:12
+#: tensorcircuit.gates.exponential_gate_unity:13
+#: tensorcircuit.gates.iswap_gate:12 tensorcircuit.gates.r_gate:9
+#: tensorcircuit.gates.r_gate:11 tensorcircuit.gates.r_gate:13
+#: tensorcircuit.gates.rgate_theoretical:8
+#: tensorcircuit.gates.rgate_theoretical:10
+#: tensorcircuit.gates.rgate_theoretical:12 tensorcircuit.gates.rx_gate:6
+#: tensorcircuit.gates.rxx_gate:13 tensorcircuit.gates.ry_gate:6
+#: tensorcircuit.gates.ryy_gate:13 tensorcircuit.gates.rz_gate:6
+#: tensorcircuit.gates.rzz_gate:13
+msgid "angle in radians"
msgstr ""
-#: of tensorcircuit.keras.save_func:30
-msgid "``tf.function`` ed function with graph building"
+#: of tensorcircuit.gates.cr_gate:10
+msgid "CR Gate"
msgstr ""
-#: of tensorcircuit.keras.save_func:32
-msgid "the dir path to save the function"
+#: of tensorcircuit.gates.exponential_gate_unity:1
+#: tensorcircuit.gates.rxx_gate:1 tensorcircuit.gates.ryy_gate:1
+#: tensorcircuit.gates.rzz_gate:1
+msgid ""
+"Faster exponential gate directly implemented based on RHS. Only works "
+"when :math:`U^2 = I` is an identity matrix."
msgstr ""
-#: ../../source/api/mps_base.rst:2
-msgid "tensorcircuit.mps_base"
+#: of tensorcircuit.gates.exponential_gate_unity:3
+#: tensorcircuit.gates.rxx_gate:3 tensorcircuit.gates.ryy_gate:3
+#: tensorcircuit.gates.rzz_gate:3
+msgid ""
+"\\textrm{exp}(U) &= e^{-j \\theta U} \\\\\n"
+" &= \\cos(\\theta) I - j \\sin(\\theta) U \\\\\n"
+"\n"
msgstr ""
-#: of tensorcircuit.mps_base:1
-msgid "FiniteMPS from tensornetwork with bug fixed"
+#: of tensorcircuit.gates.exponential_gate:6
+#: tensorcircuit.gates.exponential_gate_unity:7
+#: tensorcircuit.gates.multicontrol_gate:7 tensorcircuit.gates.rxx_gate:7
+#: tensorcircuit.gates.ryy_gate:7 tensorcircuit.gates.rzz_gate:7
+msgid "input unitary :math:`U`"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS:1
-msgid "Bases: :py:class:`tensornetwork.matrixproductstates.finite_mps.FiniteMPS`"
+#: of tensorcircuit.gates.exponential_gate:8
+#: tensorcircuit.gates.exponential_gate:10
+#: tensorcircuit.gates.exponential_gate_unity:9
+#: tensorcircuit.gates.exponential_gate_unity:11 tensorcircuit.gates.rxx_gate:9
+#: tensorcircuit.gates.rxx_gate:11 tensorcircuit.gates.ryy_gate:9
+#: tensorcircuit.gates.ryy_gate:11 tensorcircuit.gates.rzz_gate:9
+#: tensorcircuit.gates.rzz_gate:11
+msgid "alias for the argument ``unitary``"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:4
-msgid "Initialize a `FiniteMPS`. If `canonicalize` is `True` the state"
+#: of tensorcircuit.gates.exponential_gate_unity:15
+#: tensorcircuit.gates.rxx_gate:15 tensorcircuit.gates.ryy_gate:15
+#: tensorcircuit.gates.rzz_gate:15
+msgid "if True, the angel theta is mutiplied by 1/2, defaults to False"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:2
-msgid ""
-"is brought into canonical form, with `BaseMPS.center_position` at "
-"`center_position`. if `center_position` is `None` and `canonicalize = "
-"True`, `BaseMPS.center_position` is set to 0."
+#: of tensorcircuit.gates.exponential_gate:14
+#: tensorcircuit.gates.exponential_gate_unity:18
+#: tensorcircuit.gates.rxx_gate:18 tensorcircuit.gates.ryy_gate:18
+#: tensorcircuit.gates.rzz_gate:18
+msgid "suffix of Gate name"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:6
-msgid "A list of `Tensor` objects."
+#: of tensorcircuit.gates.exponential_gate:15
+#: tensorcircuit.gates.exponential_gate_unity:20
+#: tensorcircuit.gates.rxx_gate:20 tensorcircuit.gates.ryy_gate:20
+#: tensorcircuit.gates.rzz_gate:20
+msgid "Exponential Gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:7
-msgid "The initial position of the center site."
+#: of tensorcircuit.gates.exponential_gate:1
+msgid "Exponential gate."
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:8
-msgid "If `True` the mps is canonicalized at initialization."
+#: of tensorcircuit.gates.exponential_gate:3
+msgid ""
+"\\textrm{exp}(U) = e^{-j \\theta U}\n"
+"\n"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:9
-msgid ""
-"The name of the backend that should be used to perform contractions. "
-"Available backends are currently 'numpy', 'tensorflow', 'pytorch', 'jax'"
+#: of tensorcircuit.gates.get_u_parameter:1
+msgid "From the single qubit unitary to infer three angles of IBMUgate,"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_one_site_gate:1
-msgid ""
-"Apply a one-site gate to an MPS. This routine will in general destroy any"
-" canonical form of the state. If a canonical form is needed, the user can"
-" restore it using `FiniteMPS.position` :param gate: a one-body gate "
-":param site: the site where the gate should be applied"
+#: of tensorcircuit.gates.get_u_parameter:3
+msgid "numpy array, no backend agnostic version for now"
msgstr ""
-#: of
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:1
-msgid "Compute the action of the MPS transfer-operator at site `site`."
+#: of tensorcircuit.gates.get_u_parameter:5
+msgid "theta, phi, lbd"
msgstr ""
-#: of
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:3
-msgid "A site of the MPS"
+#: of tensorcircuit.gates.iswap_gate:1
+msgid "iSwap gate."
msgstr ""
-#: of
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:4
+#: of tensorcircuit.gates.iswap_gate:3
msgid ""
-"* if `1, 'l'` or `'left'`: compute the left-action of the MPS transfer-"
-"operator at `site` on the input `matrix`. * if `-1, 'r'` or `'right'`: "
-"compute the right-action of the MPS transfer-operator at `site` on the "
-"input `matrix`"
+"\\textrm{iSwap}(\\theta) =\n"
+"\\begin{pmatrix}\n"
+" 1 & 0 & 0 & 0\\\\\n"
+" 0 & \\cos(\\frac{\\pi}{2} \\theta ) & j \\sin(\\frac{\\pi}{2} \\theta"
+" ) & 0\\\\\n"
+" 0 & j \\sin(\\frac{\\pi}{2} \\theta ) & \\cos(\\frac{\\pi}{2} \\theta"
+" ) & 0\\\\\n"
+" 0 & 0 & 0 & 1\\\\\n"
+"\\end{pmatrix}\n"
+"\n"
msgstr ""
-#: of
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:5
-msgid ""
-"if `1, 'l'` or `'left'`: compute the left-action of the MPS transfer-"
-"operator at `site` on the input `matrix`."
+#: of tensorcircuit.gates.iswap_gate:14
+msgid "iSwap Gate"
msgstr ""
-#: of
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:7
-msgid ""
-"if `-1, 'r'` or `'right'`: compute the right-action of the MPS transfer-"
-"operator at `site` on the input `matrix`"
+#: of tensorcircuit.gates.matrix_for_gate:1
+msgid "Convert Gate to numpy array."
msgstr ""
-#: of
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:9
-msgid "A rank-2 tensor or matrix."
+#: of tensorcircuit.gates.matrix_for_gate:10
+msgid "input Gate"
msgstr ""
-#: of
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:11
-msgid "The result of applying the MPS transfer-operator to `matrix`"
+#: of tensorcircuit.gates.matrix_for_gate:12
+msgid "Corresponding Tensor"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:1
+#: of tensorcircuit.gates.meta_gate:1
msgid ""
-"Apply a two-site gate to an MPS. This routine will in general destroy any"
-" canonical form of the state. If a canonical form is needed, the user can"
-" restore it using `FiniteMPS.position`."
-msgstr ""
-
-#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:5
-msgid "A two-body gate."
-msgstr ""
-
-#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:7
-msgid "The first site where the gate acts."
+"Inner helper function to generate gate functions, such as ``z()`` from "
+"``_z_matrix``"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:9
-msgid "The second site where the gate acts."
+#: of tensorcircuit.gates.multicontrol_gate:1
+msgid ""
+"Multicontrol gate. If the control qubits equal to ``ctrl``, :math:`U` is "
+"applied to the target qubits."
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:15
+#: of tensorcircuit.gates.multicontrol_gate:5
msgid ""
-"An optional value to choose the MPS tensor at `center_position` to be "
-"isometric after the application of the gate. Defaults to `site1`. If the "
-"MPS is canonical (i.e.`BaseMPS.center_position != None`), and if the "
-"orthogonality center coincides with either `site1` or `site2`, the "
-"orthogonality center will be shifted to `center_position` (`site1` by "
-"default). If the orthogonality center does not coincide with `(site1, "
-"site2)` then `MPS.center_position` is set to `None`."
+"E.g., ``multicontrol_gate(tc.gates._zz_matrix, [1, 0, 1])`` returns a "
+"gate of 5 qubits,"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:26
+#: of tensorcircuit.gates.multicontrol_gate:4
msgid ""
-"\"rank of gate is {} but has to be 4\", \"site1 = {} is not between 0 <= "
-"site < N - 1 = {}\", \"site2 = {} is not between 1 <= site < N = "
-"{}\",\"Found site2 ={}, site1={}. Only nearest neighbor gates are "
-"currently supported\", \"f center_position = {center_position} not f in "
-"{(site1, site2)} \", or \"center_position = {}, but gate is applied at "
-"sites {}, {}. Truncation should only be done if the gate is applied at "
-"the center position of the MPS.\""
+"where the last 2 qubits are applied :math:`ZZ` gate, if the first 3 "
+"qubits are :math:`\\ket{101}`."
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:32
-msgid "A scalar tensor containing the truncated weight of the truncation."
+#: of tensorcircuit.gates.multicontrol_gate:9
+msgid "control bit sequence"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.bond_dimension:1
-msgid "The bond dimension of `bond`"
+#: of tensorcircuit.gates.multicontrol_gate:11
+msgid "Multicontrol Gate"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.bond_dimensions:1
-msgid "A list of bond dimensions of `BaseMPS`"
+#: of tensorcircuit.gates.phase_gate:1
+msgid "The phase gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.canonicalize:1
+#: of tensorcircuit.gates.phase_gate:3
msgid ""
-"Bring the MPS into canonical form according to `center_position`. If "
-"`center_position` is `None`, the MPS is canonicalized with "
-"`center_position = 0`."
+"\\textrm{phase}(\\theta) =\n"
+"\\begin{pmatrix}\n"
+" 1 & 0 \\\\\n"
+" 0 & e^{i\\theta} \\\\\n"
+"\\end{pmatrix}\n"
+"\n"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.canonicalize:5
-msgid "If `True`, normalize matrices when shifting the orthogonality center."
+#: of tensorcircuit.gates.phase_gate:10
+msgid "angle in radians, defaults to 0"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.canonicalize:8
-msgid "The norm of the MPS."
+#: of tensorcircuit.gates.phase_gate:12
+msgid "phase gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.check_canonical:1
-msgid "Check whether the MPS is in the expected canonical form."
+#: of tensorcircuit.gates.r_gate:1
+msgid "General single qubit rotation gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.check_canonical:3
-msgid "The L2 norm of the vector of local deviations."
+#: of tensorcircuit.gates.r_gate:3
+msgid ""
+"R(\\theta, \\alpha, \\phi) = j \\cos(\\theta) I\n"
+"- j \\cos(\\phi) \\sin(\\alpha) \\sin(\\theta) X\n"
+"- j \\sin(\\phi) \\sin(\\alpha) \\sin(\\theta) Y\n"
+"- j \\sin(\\theta) \\cos(\\alpha) Z\n"
+"\n"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:1
-msgid "Check orthonormality of tensor at site `site`."
+#: of tensorcircuit.gates.r_gate:16
+msgid "R Gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:3
-msgid ""
-"* if `'l'` or `'left'`: check left orthogonality * if `'r`' or `'right'`:"
-" check right orthogonality"
+#: of tensorcircuit.gates.random_single_qubit_gate:1
+msgid "Random single qubit gate described in https://arxiv.org/abs/2002.07730."
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:4
-msgid "if `'l'` or `'left'`: check left orthogonality"
+#: of tensorcircuit.gates.random_single_qubit_gate:3
+msgid "A random single-qubit gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:5
-msgid "if `'r`' or `'right'`: check right orthogonality"
+#: of tensorcircuit.gates.random_two_qubit_gate:1
+msgid "Returns a random two-qubit gate."
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:6
-msgid "The site of the tensor."
+#: of tensorcircuit.gates.random_two_qubit_gate:3
+msgid "A random two-qubit gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:8
-msgid "The L2 norm of the deviation from identity."
+#: of tensorcircuit.gates.rgate_theoretical:1
+msgid ""
+"Rotation gate implemented by matrix exponential. The output is the same "
+"as `rgate`."
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:9
-msgid "scalar `Tensor`"
+#: of tensorcircuit.gates.rgate_theoretical:3
+msgid ""
+"R(\\theta, \\alpha, \\phi) = e^{-j \\theta \\left[\\sin(\\alpha) "
+"\\cos(\\phi) X\n"
+" + \\sin(\\alpha) \\sin(\\phi) "
+"Y\n"
+" + \\cos(\\alpha) Z\\right]}\n"
+"\n"
msgstr ""
-#: of
-#: tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:11
-msgid "If which is different from 'l','left', 'r' or 'right'."
+#: of tensorcircuit.gates.rgate_theoretical:14
+msgid "Rotation Gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:1
-msgid "Returns the `Tensor` object at `site`."
+#: of tensorcircuit.gates.rx_gate:1
+msgid "Rotation gate along :math:`x` axis."
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:3
+#: of tensorcircuit.gates.rx_gate:3
msgid ""
-"If `site==len(self) - 1` `BaseMPS.connector_matrix` is absorbed fromt the"
-" right-hand side into the returned `Tensor` object."
+"RX(\\theta) = e^{-j\\frac{\\theta}{2}X}\n"
+"\n"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:7
-msgid "The site for which to return the `Node`."
+#: of tensorcircuit.gates.rx_gate:8
+msgid "RX Gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:9
-msgid "The tensor at `site`."
+#: of tensorcircuit.gates.ry_gate:1
+msgid "Rotation gate along :math:`y` axis."
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:1
+#: of tensorcircuit.gates.ry_gate:3
msgid ""
-"Compute left reduced density matrices for site `sites`. This returns a "
-"dict `left_envs` mapping sites (int) to Tensors. `left_envs[site]` is the"
-" left-reduced density matrix to the left of site `site`."
+"RY(\\theta) = e^{-j\\frac{\\theta}{2}Y}\n"
+"\n"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:5
-#: tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:5
-msgid "A list of sites of the MPS."
+#: of tensorcircuit.gates.ry_gate:8
+msgid "RY Gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:8
-msgid "The left-reduced density matrices at each site in `sites`."
+#: of tensorcircuit.gates.rz_gate:1
+msgid "Rotation gate along :math:`z` axis."
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:10
-msgid "The left-reduced density matrices"
+#: of tensorcircuit.gates.rz_gate:3
+msgid ""
+"RZ(\\theta) = e^{-j\\frac{\\theta}{2}Z}\n"
+"\n"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:11
-#: tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:11
-msgid "at each site in `sites`."
+#: of tensorcircuit.gates.rz_gate:8
+msgid "RZ Gate"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:12
-#: tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:12
-msgid "`dict` mapping `int` to `Tensor`"
+#: of tensorcircuit.gates.u_gate:1
+msgid ""
+"IBMQ U gate following the converntion of OpenQASM3.0. See `OpenQASM doc "
+"`_"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_local_operator:1
-msgid "Measure the expectation value of local operators `ops` site `sites`."
+#: of tensorcircuit.gates.u_gate:4
+msgid ""
+"\\begin{split}U(\\theta,\\phi,\\lambda) := \\left(\\begin{array}{cc}\n"
+"\\cos(\\theta/2) & -e^{i\\lambda}\\sin(\\theta/2) \\\\\n"
+"e^{i\\phi}\\sin(\\theta/2) & e^{i(\\phi+\\lambda)}\\cos(\\theta/2) "
+"\\end{array}\\right).\\end{split}"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_local_operator:3
-msgid "A list Tensors of rank 2; the local operators to be measured."
+#: of tensorcircuit.gates.u_gate:10 tensorcircuit.gates.u_gate:12
+#: tensorcircuit.gates.u_gate:14
+msgid "_description_, defaults to 0"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_local_operator:5
-msgid "Sites where `ops` act."
+#: ../../source/api/interfaces.rst:2
+msgid "tensorcircuit.interfaces"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_local_operator:7
-msgid "measurements :math:`\\langle` `ops[n]`:math:`\\rangle` for n in `sites`"
+#: ../../source/api/interfaces/numpy.rst:2
+msgid "tensorcircuit.interfaces.numpy"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:1
-msgid ""
-"Compute the correlator :math:`\\langle` `op1[site1], "
-"op2[s]`:math:`\\rangle` between `site1` and all sites `s` in `sites2`. If"
-" `s == site1`, `op2[s]` will be applied first."
+#: of tensorcircuit.interfaces.numpy:1
+msgid "Interface wraps quantum function as a numpy function"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:6
-msgid "Tensor of rank 2; the local operator at `site1`."
+#: of tensorcircuit.interfaces.numpy.numpy_interface:1
+msgid "Convert ``fun`` on ML backend into a numpy function"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:8
-msgid "Tensor of rank 2; the local operator at `sites2`."
+#: of tensorcircuit.interfaces.numpy.numpy_interface:23
+msgid "The quantum function"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:10
-msgid "The site where `op1` acts"
+#: of tensorcircuit.interfaces.numpy.numpy_interface:25
+#: tensorcircuit.interfaces.scipy.scipy_optimize_interface:39
+msgid "whether to jit ``fun``, defaults to True"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:12
-msgid "Sites where operator `op2` acts."
+#: of tensorcircuit.interfaces.numpy.numpy_interface:27
+msgid "The numpy interface compatible version of ``fun``"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:14
-msgid ""
-"Correlator :math:`\\langle` `op1[site1], op2[s]`:math:`\\rangle` for `s` "
-":math:`\\in` `sites2`."
+#: ../../source/api/interfaces/scipy.rst:2
+msgid "tensorcircuit.interfaces.scipy"
msgstr ""
-#: of tensorcircuit.mps_base.FiniteMPS.physical_dimensions:1
-msgid "A list of physical Hilbert-space dimensions of `BaseMPS`"
+#: of tensorcircuit.interfaces.scipy:1
+msgid "Interface wraps quantum function as a scipy function for optimization"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:1
-msgid "Shift `center_position` to `site`."
+#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:1
+msgid "Convert ``fun`` into a scipy optimize interface compatible version"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:3
-msgid "The site to which FiniteMPS.center_position should be shifted"
+#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:35
+msgid "The quantum function with scalar out that to be optimized"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:4
-msgid "If `True`, normalize matrices when shifting."
+#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:37
+msgid "the shape of parameters that ``fun`` accepts, defaults to None"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:5
-msgid "If not `None`, truncate the MPS bond dimensions to `D`."
+#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:41
+msgid ""
+"whether using gradient-based or gradient free scipy optimize interface, "
+"defaults to True"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:6
-msgid ""
-"if not `None`, truncate each bond dimension, but keeping the truncation "
-"error below `max_truncation_err`."
+#: of tensorcircuit.interfaces.scipy.scipy_optimize_interface:44
+msgid "The scipy interface compatible version of ``fun``"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:9
-msgid "The norm of the tensor at `FiniteMPS.center_position`"
+#: ../../source/api/interfaces/tensorflow.rst:2
+msgid "tensorcircuit.interfaces.tensorflow"
msgstr ""
-#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:12
-msgid "If `center_position` is `None`."
+#: of tensorcircuit.interfaces.tensorflow:1
+msgid "Interface wraps quantum function as a tensorflow function"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:1
+#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:1
msgid ""
-"Initialize a random `FiniteMPS`. The resulting state is normalized. Its "
-"center-position is at 0."
+"Wrap a quantum function on different ML backend with a tensorflow "
+"interface."
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:4
-msgid "A list of physical dimensions."
+#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:22
+#: tensorcircuit.interfaces.torch.torch_interface:28
+msgid "The quantum function with tensor in and tensor out"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:5
-msgid "A list of bond dimensions."
+#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:24
+msgid "output tf dtype or in str"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:6
-msgid "A numpy dtype."
+#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:26
+#: tensorcircuit.interfaces.torch.torch_interface:30
+msgid "whether to jit ``fun``, defaults to False"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:7
-msgid "An optional backend."
+#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:28
+#: tensorcircuit.interfaces.torch.torch_interface:32
+msgid "whether transform tensor backend via dlpack, defaults to False"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:9
-msgid "`FiniteMPS`"
+#: of tensorcircuit.interfaces.tensorflow.tensorflow_interface:30
+#: tensorcircuit.interfaces.torch.torch_interface:34
+msgid ""
+"The same quantum function but now with torch tensor in and torch tensor "
+"out while AD is also supported"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:1
-msgid ""
-"Compute right reduced density matrices for site `sites. This returns a "
-"dict `right_envs` mapping sites (int) to Tensors. `right_envs[site]` is "
-"the right-reduced density matrix to the right of site `site`."
+#: ../../source/api/interfaces/tensortrans.rst:2
+msgid "tensorcircuit.interfaces.tensortrans"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:8
-msgid "The right-reduced density matrices at each site in `sites`."
+#: of tensorcircuit.interfaces.tensortrans:1
+msgid "general function for interfaces transformation"
msgstr ""
-#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:10
-msgid "The right-reduced density matrices"
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:1
+msgid ""
+"Function decorator that automatically convert inputs to tensors on "
+"current backend"
msgstr ""
-#: ../../source/api/mpscircuit.rst:2
-msgid "tensorcircuit.mpscircuit"
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:63
+msgid ""
+"the wrapped function whose arguments in ``argnums`` position are expected"
+" to be tensor format"
msgstr ""
-#: of tensorcircuit.mpscircuit:1
-msgid "Quantum circuit: MPS state simulator"
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:66
+msgid "position of args under the auto conversion, defaults to 0"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit:1
-msgid "``MPSCircuit`` class. Simple usage demo below."
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:68
+msgid ""
+"try reshape all input tensor as matrix with shape rank 2, defaults to "
+"False"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.MPO_to_gate:1
-msgid "Convert MPO to gate"
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:71
+msgid "convert ``Gate`` to tensor, defaults to False"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:1
-msgid "MPSCircuit object based on state simulator."
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:73
+msgid "reshape tensor from ``Gate`` input as matrix, defaults to True"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:5
-msgid "The center position of MPS, default to 0"
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:75
+msgid "convert ``QuOperator`` to tensor, defaults to False"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:7
-msgid ""
-"If not None, the initial state of the circuit is taken as ``tensors`` "
-"instead of :math:`\\vert 0\\rangle^n` qubits, defaults to None. When "
-"``tensors`` are specified, if ``center_position`` is None, then the "
-"tensors are canonicalized, otherwise it is assumed the tensors are "
-"already canonicalized at the ``center_position``"
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:77
+msgid "reshape tensor from ``QuOperator`` input as matrix, defaults to True"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:12
-msgid ""
-"If not None, it is transformed to the MPS form according to the split "
-"rules"
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:79
+msgid "whether cast to backend dtype, defaults to True"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:14
-msgid "Split rules"
+#: of tensorcircuit.interfaces.tensortrans.args_to_tensor:81
+msgid "The wrapped function"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_general_gate:1
-msgid "Apply a general qubit gate on MPS."
+#: of tensorcircuit.interfaces.tensortrans.general_args_to_numpy:1
+msgid "Given a pytree, get the corresponding numpy array pytree"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:3
-#: tensorcircuit.mpscircuit.MPSCircuit.apply_double_gate:3
-#: tensorcircuit.mpscircuit.MPSCircuit.apply_general_gate:3
-msgid "The Gate to be applied"
+#: of tensorcircuit.interfaces.tensortrans.general_args_to_numpy:3
+msgid "pytree"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_general_gate:6
-msgid "Qubit indices of the gate"
+#: of tensorcircuit.interfaces.tensortrans.general_args_to_numpy:5
+msgid "the same format pytree with all tensor replaced by numpy array"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_general_gate:5
-msgid "\"MPS does not support application of gate on > 2 qubits.\""
+#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:1
+msgid "Given a pytree of numpy arrays, get the corresponding tensor pytree"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_MPO:1
-msgid "Apply a MPO to the MPS"
+#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:3
+msgid "pytree of numpy arrays"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:1
-msgid ""
-"Apply a double qubit gate on adjacent qubits of Matrix Product States "
-"(MPS)."
+#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:5
+msgid "str of str of the same pytree shape as args, defaults to None"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:5
-#: tensorcircuit.mpscircuit.MPSCircuit.apply_double_gate:5
-msgid "The first qubit index of the gate"
+#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:7
+msgid ""
+"str or backend object, defaults to None, indicating the current default "
+"backend"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:7
-#: tensorcircuit.mpscircuit.MPSCircuit.apply_double_gate:7
-msgid "The second qubit index of the gate"
+#: of tensorcircuit.interfaces.tensortrans.numpy_args_to_backend:10
+msgid ""
+"the same format pytree with all numpy array replaced by the tensors in "
+"the target backend"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:9
-msgid "Center position of MPS, default is None"
+#: of tensorcircuit.interfaces.tensortrans.which_backend:1
+msgid "Given a tensor ``a``, return the corresponding backend"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_double_gate:1
-msgid "Apply a double qubit gate on MPS."
+#: of tensorcircuit.interfaces.tensortrans.which_backend:5
+msgid ""
+"if true, return backend object, if false, return backend str, defaults to"
+" True"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_nqubit_gate:1
-msgid "Apply a n-qubit gate by transforming the gate to MPO"
+#: of tensorcircuit.interfaces.tensortrans.which_backend:8
+msgid "the backend object or backend str"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_single_gate:1
-msgid "Apply a single qubit gate on MPS; no truncation is needed."
+#: ../../source/api/interfaces/torch.rst:2
+msgid "tensorcircuit.interfaces.torch"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_single_gate:3
-msgid "gate to be applied"
+#: of tensorcircuit.interfaces.torch:1
+msgid "Interface wraps quantum function as a torch function"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.apply_single_gate:5
-msgid "Qubit index of the gate"
+#: of tensorcircuit.interfaces.torch.torch_interface:1
+msgid "Wrap a quantum function on different ML backend with a pytorch interface."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.conj:1
-msgid "Compute the conjugate of the current MPS."
+#: of tensorcircuit.interfaces.torch.torch_interface_kws:1
+msgid ""
+"similar to py:meth:`tensorcircuit.interfaces.torch.torch_interface`, but "
+"now the interface support static arguments for function ``f``, which is "
+"not a tensor and can be used with keyword arguments"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.conj:3
-#: tensorcircuit.mpscircuit.MPSCircuit.copy:3
-#: tensorcircuit.mpscircuit.MPSCircuit.copy_without_tensor:3
-msgid "The constructed MPS"
+#: ../../source/api/keras.rst:2
+msgid "tensorcircuit.keras"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.copy:1
-msgid "Copy the current MPS."
+#: of tensorcircuit.keras:1
+msgid "Keras layer for tc quantum function"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.copy_without_tensor:1
-msgid "Copy the current MPS without the tensors."
+#: of tensorcircuit.keras.HardwareLayer:1
+msgid "Bases: :py:class:`~tensorcircuit.keras.QuantumLayer`"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:1
-msgid "Compute the expectation of corresponding operators in the form of tensor."
+#: of tensorcircuit.keras.HardwareLayer:1
+msgid ""
+"Keras Layer wrapping quantum function with cloud qpu access (using "
+":py:mod:`tensorcircuit.cloud` module)"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:3
+#: of tensorcircuit.keras.QuantumLayer.__init__:1
msgid ""
-"Operator and its position on the circuit, eg. ``(gates.Z(), [1]), "
-"(gates.X(), [2])`` is for operator :math:`Z_1X_2`"
+"`QuantumLayer` wraps the quantum function `f` as a `keras.Layer` so that "
+"tensorcircuit is better integrated with tensorflow. Note that the input "
+"of the layer can be tensors or even list/dict of tensors."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:9
-msgid "If not None, will be used as bra"
+#: of tensorcircuit.keras.QuantumLayer.__init__:5
+msgid "Callabel function."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:11
-msgid "Whether to conjugate the bra state"
+#: of tensorcircuit.keras.QuantumLayer.__init__:7
+msgid "The shape of the weights."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:13
-msgid "Whether to normalize the MPS"
+#: of tensorcircuit.keras.QuantumLayer.__init__:9
+msgid "The initializer of the weights, defaults to \"glorot_uniform\""
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:15
-#: tensorcircuit.mpscircuit.MPSCircuit.set_split_rules:5
-#: tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:5
-msgid "Truncation split"
+#: of tensorcircuit.keras.QuantumLayer.__init__:13
+msgid "The regularizer of the weights, defaults to None"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:17
-msgid "The expectation of corresponding operators"
+#: of tensorcircuit.keras.load_func:1
+msgid ""
+"Load function from the files in the ``tf.savedmodel`` format. We can load"
+" several functions at the same time, as they can be the same function of "
+"different input shapes."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.gate_to_MPO:1
-msgid "Convert gate to MPO form with identities at empty sites"
+#: of tensorcircuit.keras.load_func:24
+msgid ""
+"The fallback function when all functions loaded are failed, defaults to "
+"None"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.get_bond_dimensions:1
-msgid "Get the MPS bond dimensions"
+#: of tensorcircuit.keras.load_func:26
+msgid ""
+"When there is not legal loaded function of the input shape and no "
+"fallback callable."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.get_bond_dimensions:3
-#: tensorcircuit.mpscircuit.MPSCircuit.get_tensors:3
-msgid "MPS tensors"
+#: of tensorcircuit.keras.load_func:27
+msgid ""
+"A function that tries all loaded function against the input until the "
+"first success one."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.get_center_position:1
-msgid "Get the center position of the MPS"
+#: of tensorcircuit.keras.output_asis_loss:1
+msgid "The keras loss function that directly taking the model output as the loss."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.get_center_position:3
-msgid "center position"
+#: of tensorcircuit.keras.output_asis_loss:3
+msgid "Ignoring this parameter."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.get_norm:1
-msgid "Get the normalized Center Position."
+#: of tensorcircuit.keras.output_asis_loss:5
+msgid "Model output."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.get_norm:3
-msgid "Normalized Center Position."
+#: of tensorcircuit.keras.output_asis_loss:7
+msgid "Model output, which is y_pred."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.get_quvector:2
-msgid "Get the representation of the output state in the form of ``QuVector``"
+#: of tensorcircuit.keras.save_func:1
+msgid "Save tf function in the file (``tf.savedmodel`` format)."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.get_quvector:2
-msgid "has to be full contracted in MPS"
+#: of tensorcircuit.keras.save_func:30
+msgid "``tf.function`` ed function with graph building"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.get_tensors:1
-msgid "Get the MPS tensors"
+#: of tensorcircuit.keras.save_func:32
+msgid "the dir path to save the function"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.is_valid:1
-msgid "Check whether the circuit is legal."
+#: ../../source/api/mps_base.rst:2
+msgid "tensorcircuit.mps_base"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.is_valid:3
-msgid "Whether the circuit is legal."
+#: of tensorcircuit.mps_base:1
+msgid "FiniteMPS from tensornetwork with bug fixed"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.measure:1
-msgid "Take measurement to the given quantum lines."
+#: of tensorcircuit.mps_base.FiniteMPS:1
+msgid "Bases: :py:class:`~tensornetwork.matrixproductstates.finite_mps.FiniteMPS`"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.mid_measurement:1
-msgid ""
-"Middle measurement in the z-basis on the circuit, note the wavefunction "
-"output is not normalized with ``mid_measurement`` involved, one should "
-"normalized the state manually if needed."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:4
+msgid "Initialize a `FiniteMPS`. If `canonicalize` is `True` the state"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.mid_measurement:4
-msgid "The index of qubit that the Z direction postselection applied on"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:2
+msgid ""
+"is brought into canonical form, with `BaseMPS.center_position` at "
+"`center_position`. if `center_position` is `None` and `canonicalize = "
+"True`, `BaseMPS.center_position` is set to 0."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.mid_measurement:6
-msgid "0 for spin up, 1 for spin down, defaults to 0"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:6
+msgid "A list of `Tensor` objects."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.normalize:1
-msgid "Normalize MPS Circuit according to the center position."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:7
+msgid "The initial position of the center site."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.position:1
-msgid "Wrapper of tn.FiniteMPS.position. Set orthogonality center."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:8
+msgid "If `True` the mps is canonicalized at initialization."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.position:4
-msgid "The orthogonality center"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.__init__:9
+msgid ""
+"The name of the backend that should be used to perform contractions. "
+"Available backends are currently 'numpy', 'tensorflow', 'pytorch', 'jax'"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.proj_with_mps:1
-msgid "Compute the projection between `other` as bra and `self` as ket."
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_one_site_gate:1
+msgid ""
+"Apply a one-site gate to an MPS. This routine will in general destroy any"
+" canonical form of the state. If a canonical form is needed, the user can"
+" restore it using `FiniteMPS.position` :param gate: a one-body gate "
+":param site: the site where the gate should be applied"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.proj_with_mps:3
-msgid "ket of the other MPS, which will be converted to bra automatically"
+#: of
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:1
+msgid "Compute the action of the MPS transfer-operator at site `site`."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.proj_with_mps:5
-msgid "The projection in form of tensor"
+#: of
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:3
+msgid "A site of the MPS"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.reduce_dimension:1
-msgid "Reduce the bond dimension between two adjacent sites by SVD"
+#: of
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:4
+msgid ""
+"* if `1, 'l'` or `'left'`: compute the left-action of the MPS transfer-"
+"operator at `site` on the input `matrix`. * if `-1, 'r'` or `'right'`: "
+"compute the right-action of the MPS transfer-operator at `site` on the "
+"input `matrix`"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.reduce_tensor_dimension:1
-msgid "Reduce the bond dimension between two general tensors by SVD"
+#: of
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:5
+msgid ""
+"if `1, 'l'` or `'left'`: compute the left-action of the MPS transfer-"
+"operator at `site` on the input `matrix`."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.set_split_rules:1
+#: of
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:7
msgid ""
-"Set truncation split when double qubit gates are applied. If nothing is "
-"specified, no truncation will take place and the bond dimension will keep"
-" growing. For more details, refer to `split_tensor`."
+"if `-1, 'r'` or `'right'`: compute the right-action of the MPS transfer-"
+"operator at `site` on the input `matrix`"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.slice:1
-msgid "Get a slice of the MPS (only for internal use)"
+#: of
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:9
+msgid "A rank-2 tensor or matrix."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction:3
-msgid "the str indicating the form of the output wavefunction"
+#: of
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator:11
+msgid "The result of applying the MPS transfer-operator to `matrix`"
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction:5
-msgid "Tensor with shape [1, -1]"
+#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:1
+msgid ""
+"Apply a two-site gate to an MPS. This routine will in general destroy any"
+" canonical form of the state. If a canonical form is needed, the user can"
+" restore it using `FiniteMPS.position`."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction:9
-msgid "i--A--B--j -> i--XX--j"
+#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:5
+msgid "A two-body gate."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:1
-msgid "Construct the MPS tensors from a given wavefunction."
+#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:7
+msgid "The first site where the gate acts."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:3
-msgid "The given wavefunction (any shape is OK)"
+#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:9
+msgid "The second site where the gate acts."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:7
-msgid "Physical dimension, 2 for MPS and 4 for MPO"
+#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:15
+msgid ""
+"An optional value to choose the MPS tensor at `center_position` to be "
+"isometric after the application of the gate. Defaults to `site1`. If the "
+"MPS is canonical (i.e.`BaseMPS.center_position != None`), and if the "
+"orthogonality center coincides with either `site1` or `site2`, the "
+"orthogonality center will be shifted to `center_position` (`site1` by "
+"default). If the orthogonality center does not coincide with `(site1, "
+"site2)` then `MPS.center_position` is set to `None`."
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:9
-msgid "Whether to normalize the wavefunction"
+#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:26
+msgid ""
+"\"rank of gate is {} but has to be 4\", \"site1 = {} is not between 0 <= "
+"site < N - 1 = {}\", \"site2 = {} is not between 1 <= site < N = "
+"{}\",\"Found site2 ={}, site1={}. Only nearest neighbor gates are "
+"currently supported\", \"f center_position = {center_position} not f in "
+"{(site1, site2)} \", or \"center_position = {}, but gate is applied at "
+"sites {}, {}. Truncation should only be done if the gate is applied at "
+"the center position of the MPS.\""
msgstr ""
-#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:11
-msgid "The tensors"
+#: of tensorcircuit.mps_base.FiniteMPS.apply_two_site_gate:32
+msgid "A scalar tensor containing the truncated weight of the truncation."
msgstr ""
-#: of tensorcircuit.mpscircuit.split_tensor:1
-msgid "Split the tensor by SVD or QR depends on whether a truncation is required."
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.bond_dimension:1
+msgid "The bond dimension of `bond`"
msgstr ""
-#: of tensorcircuit.mpscircuit.split_tensor:3
-msgid "The input tensor to split."
+#: of tensorcircuit.mps_base.FiniteMPS.bond_dimensions:1
+msgid "A list of bond dimensions of `BaseMPS`"
msgstr ""
-#: of tensorcircuit.mpscircuit.split_tensor:5
-msgid "Determine the orthogonal center is on the left tensor or the right tensor."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.canonicalize:1
+msgid ""
+"Bring the MPS into canonical form according to `center_position`. If "
+"`center_position` is `None`, the MPS is canonicalized with "
+"`center_position = 0`."
msgstr ""
-#: of tensorcircuit.mpscircuit.split_tensor:7
-msgid "Two tensors after splitting"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.canonicalize:5
+msgid "If `True`, normalize matrices when shifting the orthogonality center."
msgstr ""
-#: ../../source/api/noisemodel.rst:2
-msgid "tensorcircuit.noisemodel"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.canonicalize:8
+msgid "The norm of the MPS."
msgstr ""
-#: of tensorcircuit.noisemodel:1
-msgid "General Noise Model Construction."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.check_canonical:1
+msgid "Check whether the MPS is in the expected canonical form."
msgstr ""
-#: of tensorcircuit.noisemodel.NoiseConf:1
-msgid "``Noise Configuration`` class."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.check_canonical:3
+msgid "The L2 norm of the vector of local deviations."
msgstr ""
-#: of tensorcircuit.noisemodel.NoiseConf.__init__:1
-msgid "Establish a noise configuration."
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:1
+msgid "Check orthonormality of tensor at site `site`."
msgstr ""
-#: of tensorcircuit.noisemodel.NoiseConf.add_noise:1
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:3
msgid ""
-"Add noise channels on specific gates and specific qubits in form of Kraus"
-" operators."
+"* if `'l'` or `'left'`: check left orthogonality * if `'r`' or `'right'`:"
+" check right orthogonality"
msgstr ""
-#: of tensorcircuit.noisemodel.NoiseConf.add_noise:3
-msgid "noisy gate"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:4
+msgid "if `'l'` or `'left'`: check left orthogonality"
msgstr ""
-#: of tensorcircuit.noisemodel.NoiseConf.add_noise:5
-msgid "noise channel"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:5
+msgid "if `'r`' or `'right'`: check right orthogonality"
msgstr ""
-#: of tensorcircuit.noisemodel.NoiseConf.add_noise:7
-msgid ""
-"the list of noisy qubit, defaults to None, indicating applying the noise "
-"channel on all qubits"
-msgstr ""
-
-#: of tensorcircuit.noisemodel.apply_qir_with_noise:1
-msgid "A newly defined circuit"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:6
+msgid "The site of the tensor."
msgstr ""
-#: of tensorcircuit.noisemodel.apply_qir_with_noise:3
-msgid "The qir of the clean circuit"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:8
+msgid "The L2 norm of the deviation from identity."
msgstr ""
-#: of tensorcircuit.noisemodel.apply_qir_with_noise:5
-#: tensorcircuit.noisemodel.circuit_with_noise:5
-msgid "Noise Configuration"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:9
+msgid "scalar `Tensor`"
msgstr ""
-#: of tensorcircuit.noisemodel.apply_qir_with_noise:7
-#: tensorcircuit.noisemodel.circuit_with_noise:7
-msgid "The status for Monte Carlo sampling, defaults to None"
+#: of
+#: tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality:11
+msgid "If which is different from 'l','left', 'r' or 'right'."
msgstr ""
-#: of tensorcircuit.noisemodel.apply_qir_with_noise:9
-#: tensorcircuit.noisemodel.circuit_with_noise:9
-msgid "A newly constructed circuit with noise"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:1
+msgid "Returns the `Tensor` object at `site`."
msgstr ""
-#: of tensorcircuit.noisemodel.circuit_with_noise:1
-msgid "Noisify a clean circuit."
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:3
+msgid ""
+"If `site==len(self) - 1` `BaseMPS.connector_matrix` is absorbed fromt the"
+" right-hand side into the returned `Tensor` object."
msgstr ""
-#: of tensorcircuit.noisemodel.circuit_with_noise:3
-msgid "A clean circuit"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:7
+msgid "The site for which to return the `Node`."
msgstr ""
-#: of tensorcircuit.noisemodel.expectation_noisfy:1
-msgid "Calculate expectation value with noise configuration."
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.get_tensor:9
+msgid "The tensor at `site`."
msgstr ""
-#: of tensorcircuit.noisemodel.expectation_noisfy:3
-#: tensorcircuit.noisemodel.sample_expectation_ps_noisfy:3
-msgid "The clean circuit"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:1
+msgid ""
+"Compute left reduced density matrices for site `sites`. This returns a "
+"dict `left_envs` mapping sites (int) to Tensors. `left_envs[site]` is the"
+" left-reduced density matrix to the left of site `site`."
msgstr ""
-#: of tensorcircuit.noisemodel.expectation_noisfy:12
-msgid "expectation value with noise"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:5
+#: tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:5
+msgid "A list of sites of the MPS."
msgstr ""
-#: of tensorcircuit.noisemodel.sample_expectation_ps_noisfy:1
-msgid "Calculate sample_expectation_ps with noise configuration."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:8
+msgid "The left-reduced density matrices at each site in `sites`."
msgstr ""
-#: of tensorcircuit.noisemodel.sample_expectation_ps_noisfy:13
-msgid ""
-"repetition time for Monte Carlo sampling for noisfy calculation, "
-"defaults to 1000"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:10
+msgid "The left-reduced density matrices"
msgstr ""
-#: of tensorcircuit.noisemodel.sample_expectation_ps_noisfy:20
-msgid ""
-"external randomness given by tensor uniformly from [0, 1], defaults to "
-"None, used for measurement sampling"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:11
+#: tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:11
+msgid "at each site in `sites`."
msgstr ""
-#: of tensorcircuit.noisemodel.sample_expectation_ps_noisfy:23
-msgid "sample expectation value with noise"
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.left_envs:12
+#: tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:12
+msgid "`dict` mapping `int` to `Tensor`"
msgstr ""
-#: ../../source/api/quantum.rst:2
-msgid "tensorcircuit.quantum"
+#: of tensorcircuit.mps_base.FiniteMPS.measure_local_operator:1
+msgid "Measure the expectation value of local operators `ops` site `sites`."
msgstr ""
-#: of tensorcircuit.quantum:1
-msgid "Quantum state and operator class backend by tensornetwork"
+#: of tensorcircuit.mps_base.FiniteMPS.measure_local_operator:3
+msgid "A list Tensors of rank 2; the local operators to be measured."
msgstr ""
-#: of tensorcircuit.quantum
-msgid "IMPORT"
+#: of tensorcircuit.mps_base.FiniteMPS.measure_local_operator:5
+msgid "Sites where `ops` act."
msgstr ""
-#: of tensorcircuit.quantum.PauliString2COO:1
-#: tensorcircuit.quantum.PauliStringSum2COO_tf:1
-msgid "Generate tensorflow sparse matrix from Pauli string sum"
+#: of tensorcircuit.mps_base.FiniteMPS.measure_local_operator:7
+msgid "measurements :math:`\\langle` `ops[n]`:math:`\\rangle` for n in `sites`"
msgstr ""
-#: of tensorcircuit.quantum.PauliString2COO:3
+#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:1
msgid ""
-"1D Tensor representing for a Pauli string, e.g. [1, 0, 0, 3, 2] is for "
-":math:`X_0Z_3Y_4`"
+"Compute the correlator :math:`\\langle` `op1[site1], "
+"op2[s]`:math:`\\rangle` between `site1` and all sites `s` in `sites2`. If"
+" `s == site1`, `op2[s]` will be applied first."
msgstr ""
-#: of tensorcircuit.quantum.PauliString2COO:6
-msgid ""
-"the weight for the Pauli string defaults to None (all Pauli strings "
-"weight 1.0)"
+#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:6
+msgid "Tensor of rank 2; the local operator at `site1`."
msgstr ""
-#: of tensorcircuit.quantum.PauliString2COO:9
-msgid "the tensorflow sparse matrix"
+#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:8
+msgid "Tensor of rank 2; the local operator at `sites2`."
msgstr ""
-#: of tensorcircuit.quantum.PauliStringSum2COO:1
-#: tensorcircuit.quantum.PauliStringSum2COO_numpy:1
-msgid ""
-"Generate sparse tensor from Pauli string sum. Currently requires "
-"tensorflow installed"
+#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:10
+msgid "The site where `op1` acts"
msgstr ""
-#: of tensorcircuit.quantum.PauliStringSum2COO:4
-#: tensorcircuit.quantum.PauliStringSum2COO_numpy:4
-#: tensorcircuit.quantum.PauliStringSum2COO_tf:3
-#: tensorcircuit.quantum.PauliStringSum2Dense:5
-msgid ""
-"2D Tensor, each row is for a Pauli string, e.g. [1, 0, 0, 3, 2] is for "
-":math:`X_0Z_3Y_4`"
+#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:12
+msgid "Sites where operator `op2` acts."
msgstr ""
-#: of tensorcircuit.quantum.PauliStringSum2COO:7
-#: tensorcircuit.quantum.PauliStringSum2COO_numpy:7
-#: tensorcircuit.quantum.PauliStringSum2COO_tf:6
-#: tensorcircuit.quantum.PauliStringSum2Dense:8
+#: of tensorcircuit.mps_base.FiniteMPS.measure_two_body_correlator:14
msgid ""
-"1D Tensor, each element corresponds the weight for each Pauli string "
-"defaults to None (all Pauli strings weight 1.0)"
+"Correlator :math:`\\langle` `op1[site1], op2[s]`:math:`\\rangle` for `s` "
+":math:`\\in` `sites2`."
msgstr ""
-#: of tensorcircuit.quantum.PauliStringSum2COO:10
-#: tensorcircuit.quantum.PauliStringSum2COO_numpy:10
-#: tensorcircuit.quantum.PauliStringSum2Dense:11
-msgid ""
-"default False. If True, return numpy coo else return backend compatible "
-"sparse tensor"
+#: of tensorcircuit.mps_base.FiniteMPS.physical_dimensions:1
+msgid "A list of physical Hilbert-space dimensions of `BaseMPS`"
msgstr ""
-#: of tensorcircuit.quantum.PauliStringSum2COO:13
-#: tensorcircuit.quantum.PauliStringSum2COO_numpy:13
-msgid "the scipy coo sparse matrix"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:1
+msgid "Shift `center_position` to `site`."
msgstr ""
-#: of tensorcircuit.quantum.PauliStringSum2COO_tf:9
-msgid "the tensorflow coo sparse matrix"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:3
+msgid "The site to which FiniteMPS.center_position should be shifted"
msgstr ""
-#: of tensorcircuit.quantum.PauliStringSum2Dense:1
-msgid ""
-"Generate dense matrix from Pauli string sum. Currently requires "
-"tensorflow installed."
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:4
+msgid "If `True`, normalize matrices when shifting."
msgstr ""
-#: of tensorcircuit.quantum.PauliStringSum2Dense:14
-msgid "the tensorflow dense matrix"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:5
+msgid "If not `None`, truncate the MPS bond dimensions to `D`."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector:1 tensorcircuit.quantum.QuScalar:1
-#: tensorcircuit.quantum.QuVector:1
-msgid "Bases: :py:class:`tensorcircuit.quantum.QuOperator`"
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:6
+msgid ""
+"if not `None`, truncate each bond dimension, but keeping the truncation "
+"error below `max_truncation_err`."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector:1
-msgid "Represents an adjoint (row) vector via a tensor network."
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:9
+msgid "The norm of the tensor at `FiniteMPS.center_position`"
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.__init__:1
+#: of tensornetwork.matrixproductstates.base_mps.BaseMPS.position:12
+msgid "If `center_position` is `None`."
+msgstr ""
+
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:1
msgid ""
-"Constructs a new `QuAdjointVector` from a tensor network. This "
-"encapsulates an existing tensor network, interpreting it as an adjoint "
-"vector (row vector)."
+"Initialize a random `FiniteMPS`. The resulting state is normalized. Its "
+"center-position is at 0."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.__init__:5
-#: tensorcircuit.quantum.QuOperator.__init__:9
-msgid "The edges of the network to be used as the input edges."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:4
+msgid "A list of physical dimensions."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.__init__:7
-#: tensorcircuit.quantum.QuOperator.__init__:11
-#: tensorcircuit.quantum.QuVector.__init__:6
-msgid ""
-"Nodes used to refer to parts of the tensor network that are not connected"
-" to any input or output edges (for example: a scalar factor)."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:5
+msgid "A list of bond dimensions."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.__init__:10
-#: tensorcircuit.quantum.QuScalar.__init__:7
-#: tensorcircuit.quantum.QuVector.__init__:9
-msgid "Optional collection of edges to ignore when performing consistency checks."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:6
+msgid "A numpy dtype."
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.adjoint:1
-msgid ""
-"The adjoint of the operator. This creates a new `QuOperator` with "
-"complex-conjugate copies of all tensors in the network and with the input"
-" and output edges switched."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:7
+msgid "An optional backend."
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.adjoint:5
-msgid "The adjoint of the operator."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.random:9
+msgid "`FiniteMPS`"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.check_network:1
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:1
msgid ""
-"Check that the network has the expected dimensionality. This checks that "
-"all input and output edges are dangling and that there are no other "
-"dangling edges (except any specified in `ignore_edges`). If not, an "
-"exception is raised."
+"Compute right reduced density matrices for site `sites. This returns a "
+"dict `right_envs` mapping sites (int) to Tensors. `right_envs[site]` is "
+"the right-reduced density matrix to the right of site `site`."
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.contract:1
-msgid ""
-"Contract the tensor network in place. This modifies the tensor network "
-"representation of the operator (or vector, or scalar), reducing it to a "
-"single tensor, without changing the value."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:8
+msgid "The right-reduced density matrices at each site in `sites`."
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.contract:5
-msgid "Manually specify the axis ordering of the final tensor."
+#: of tensornetwork.matrixproductstates.finite_mps.FiniteMPS.right_envs:10
+msgid "The right-reduced density matrices"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.contract:7
-msgid "The present object."
+#: ../../source/api/mpscircuit.rst:2
+msgid "tensorcircuit.mpscircuit"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.copy:1
-msgid "The deep copy of the operator."
+#: of tensorcircuit.mpscircuit:1
+msgid "Quantum circuit: MPS state simulator"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.copy:3
-msgid "The new copy of the operator."
+#: of tensorcircuit.mpscircuit.MPSCircuit:1
+msgid "``MPSCircuit`` class. Simple usage demo below."
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.eval:1
-msgid ""
-"Contracts the tensor network in place and returns the final tensor. Note "
-"that this modifies the tensor network representing the operator. The "
-"default ordering for the axes of the final tensor is: `*out_edges, "
-"*in_edges`. If there are any \"ignored\" edges, their axes come first: "
-"`*ignored_edges, *out_edges, *in_edges`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.MPO_to_gate:1
+msgid "Convert MPO to gate"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.eval:8
-#: tensorcircuit.quantum.QuOperator.eval_matrix:6
-msgid ""
-"Manually specify the axis ordering of the final tensor. The default "
-"ordering is determined by `out_edges` and `in_edges` (see above)."
+#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:1
+msgid "MPSCircuit object based on state simulator."
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.eval:11
-#: tensorcircuit.quantum.QuOperator.eval_matrix:9
-msgid "Node count '{}' > 1 after contraction!"
+#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:5
+msgid "The center position of MPS, default to 0"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.eval:12
-msgid "The final tensor representing the operator."
+#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:7
+msgid ""
+"If not None, the initial state of the circuit is taken as ``tensors`` "
+"instead of :math:`\\vert 0\\rangle^n` qubits, defaults to None. When "
+"``tensors`` are specified, if ``center_position`` is None, then the "
+"tensors are canonicalized, otherwise it is assumed the tensors are "
+"already canonicalized at the ``center_position``"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.eval_matrix:1
+#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:12
msgid ""
-"Contracts the tensor network in place and returns the final tensor in two"
-" dimentional matrix. The default ordering for the axes of the final "
-"tensor is: (:math:`\\prod` dimension of out_edges, :math:`\\prod` "
-"dimension of in_edges)"
+"If not None, it is transformed to the MPS form according to the split "
+"rules"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.eval_matrix:10
-msgid "The two-dimentional tensor representing the operator."
+#: of tensorcircuit.mpscircuit.MPSCircuit.__init__:14
+msgid "Split rules"
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.from_tensor:1
-msgid ""
-"Construct a `QuAdjointVector` directly from a single tensor. This first "
-"wraps the tensor in a `Node`, then constructs the `QuAdjointVector` from "
-"that `Node`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_general_gate:1
+msgid "Apply a general qubit gate on MPS."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.from_tensor:27
-msgid "The tensor for constructing an QuAdjointVector."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:3
+#: tensorcircuit.mpscircuit.MPSCircuit.apply_double_gate:3
+#: tensorcircuit.mpscircuit.MPSCircuit.apply_general_gate:3
+msgid "The Gate to be applied"
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.from_tensor:29
-msgid ""
-"Sequence of integer indices specifying the order in which to interpret "
-"the axes as subsystems (input edges). If not specified, the axes are "
-"taken in ascending order."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_general_gate:6
+msgid "Qubit indices of the gate"
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.from_tensor:33
-msgid "The new constructed QuAdjointVector give from the given tensor."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_general_gate:5
+msgid "\"MPS does not support application of gate on > 2 qubits.\""
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.is_adjoint_vector:1
-msgid ""
-"Returns a bool indicating if QuOperator is an adjoint vector. Examples "
-"can be found in the `QuOperator.from_tensor`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_MPO:1
+msgid "Apply a MPO to the MPS"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.is_scalar:1
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:1
msgid ""
-"Returns a bool indicating if QuOperator is a scalar. Examples can be "
-"found in the `QuOperator.from_tensor`."
+"Apply a double qubit gate on adjacent qubits of Matrix Product States "
+"(MPS)."
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.is_vector:1
-msgid ""
-"Returns a bool indicating if QuOperator is a vector. Examples can be "
-"found in the `QuOperator.from_tensor`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:5
+#: tensorcircuit.mpscircuit.MPSCircuit.apply_double_gate:5
+msgid "The first qubit index of the gate"
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.nodes:1
-#: tensorcircuit.quantum.QuOperator.nodes:1
-#: tensorcircuit.quantum.QuScalar.nodes:1
-#: tensorcircuit.quantum.QuVector.nodes:1
-msgid "All tensor-network nodes involved in the operator."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:7
+#: tensorcircuit.mpscircuit.MPSCircuit.apply_double_gate:7
+msgid "The second qubit index of the gate"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.norm:1
-msgid ""
-"The norm of the operator. This is the 2-norm (also known as the Frobenius"
-" or Hilbert-Schmidt norm)."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_adjacent_double_gate:9
+msgid "Center position of MPS, default is None"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.partial_trace:1
-msgid ""
-"The partial trace of the operator. Subsystems to trace out are supplied "
-"as indices, so that dangling edges are connected to each other as: "
-"`out_edges[i] ^ in_edges[i] for i in subsystems_to_trace_out` This does "
-"not modify the original network. The original ordering of the remaining "
-"subsystems is maintained."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_double_gate:1
+msgid "Apply a double qubit gate on MPS."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:16
-#: tensorcircuit.quantum.QuOperator.partial_trace:8
-#: tensorcircuit.quantum.QuVector.reduced_density:16
-msgid "Indices of subsystems to trace out."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_nqubit_gate:1
+msgid "Apply a n-qubit gate by transforming the gate to MPO"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.partial_trace:10
-msgid "A new QuOperator or QuScalar representing the result."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_single_gate:1
+msgid "Apply a single qubit gate on MPS; no truncation is needed."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.projector:1
-#: tensorcircuit.quantum.QuVector.projector:1
-msgid ""
-"The projector of the operator. The operator, as a linear operator, on the"
-" adjoint of the operator."
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_single_gate:3
+msgid "gate to be applied"
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.projector:4
-msgid ""
-"Set :math:`A` is the operator in matrix form, then the projector of "
-"operator is defined as: :math:`A^\\dagger A`"
+#: of tensorcircuit.mpscircuit.MPSCircuit.apply_single_gate:5
+msgid "Qubit index of the gate"
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.projector:6
-#: tensorcircuit.quantum.QuVector.projector:6
-msgid "The projector of the operator."
+#: of tensorcircuit.mpscircuit.MPSCircuit.conj:1
+msgid "Compute the conjugate of the current MPS."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:1
-#: tensorcircuit.quantum.QuVector.reduced_density:1
-msgid "The reduced density of the operator."
+#: of tensorcircuit.mpscircuit.MPSCircuit.conj:3
+#: tensorcircuit.mpscircuit.MPSCircuit.copy:3
+#: tensorcircuit.mpscircuit.MPSCircuit.copy_without_tensor:3
+msgid "The constructed MPS"
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:3
-#: tensorcircuit.quantum.QuVector.reduced_density:3
-msgid ""
-"Set :math:`A` is the matrix of the operator, then the reduced density is "
-"defined as:"
+#: of tensorcircuit.mpscircuit.MPSCircuit.copy:1
+msgid "Copy the current MPS."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:5
-msgid "\\mathrm{Tr}_{subsystems}(A^\\dagger A)"
+#: of tensorcircuit.mpscircuit.MPSCircuit.copy_without_tensor:1
+msgid "Copy the current MPS without the tensors."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:9
-#: tensorcircuit.quantum.QuVector.reduced_density:9
-msgid ""
-"Firstly, take the projector of the operator, then trace out the "
-"subsystems to trace out are supplied as indices, so that dangling edges "
-"are connected to each other as: `out_edges[i] ^ in_edges[i] for i in "
-"subsystems_to_trace_out` This does not modify the original network. The "
-"original ordering of the remaining subsystems is maintained."
+#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:1
+msgid "Compute the expectation of corresponding operators in the form of tensor."
msgstr ""
-#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:18
-#: tensorcircuit.quantum.QuVector.reduced_density:18
+#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:3
msgid ""
-"The QuOperator of the reduced density of the operator with given "
-"subsystems."
+"Operator and its position on the circuit, eg. ``(gates.Z(), [1]), "
+"(gates.X(), [2])`` is for operator :math:`Z_1X_2`"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.tensor_product:1
-msgid ""
-"Tensor product with another operator. Given two operators `A` and `B`, "
-"produces a new operator `AB` representing :math:`A ⊗ B`. The `out_edges` "
-"(`in_edges`) of `AB` is simply the concatenation of the `out_edges` "
-"(`in_edges`) of `A.copy()` with that of `B.copy()`: `new_out_edges = "
-"[*out_edges_A_copy, *out_edges_B_copy]` `new_in_edges = "
-"[*in_edges_A_copy, *in_edges_B_copy]`"
+#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:9
+msgid "If not None, will be used as bra"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.tensor_product:20
-msgid "The other operator (`B`)."
+#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:11
+msgid "Whether to conjugate the bra state"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.tensor_product:22
-msgid "The result (`AB`)."
+#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:13
+msgid "Whether to normalize the MPS"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.trace:1
-msgid "The trace of the operator."
+#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:15
+#: tensorcircuit.mpscircuit.MPSCircuit.set_split_rules:5
+#: tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:5
+msgid "Truncation split"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator:1
-msgid ""
-"Represents a linear operator via a tensor network. To interpret a tensor "
-"network as a linear operator, some of the dangling edges must be "
-"designated as `out_edges` (output edges) and the rest as `in_edges` "
-"(input edges). Considered as a matrix, the `out_edges` represent the row "
-"index and the `in_edges` represent the column index. The (right) action "
-"of the operator on another then consists of connecting the `in_edges` of "
-"the first operator to the `out_edges` of the second. Can be used to do "
-"simple linear algebra with tensor networks."
+#: of tensorcircuit.mpscircuit.MPSCircuit.expectation:17
+msgid "The expectation of corresponding operators"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.__init__:1
-msgid ""
-"Creates a new `QuOperator` from a tensor network. This encapsulates an "
-"existing tensor network, interpreting it as a linear operator. The "
-"network is checked for consistency: All dangling edges must either be in "
-"`out_edges`, `in_edges`, or `ignore_edges`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.gate_to_MPO:1
+msgid "Convert gate to MPO form with identities at empty sites"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.__init__:7
-#: tensorcircuit.quantum.QuVector.__init__:4
-msgid "The edges of the network to be used as the output edges."
+#: of tensorcircuit.mpscircuit.MPSCircuit.get_bond_dimensions:1
+msgid "Get the MPS bond dimensions"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.__init__:15
-msgid ""
-"Optional collection of dangling edges to ignore when performing "
-"consistency checks."
+#: of tensorcircuit.mpscircuit.MPSCircuit.get_bond_dimensions:3
+#: tensorcircuit.mpscircuit.MPSCircuit.get_tensors:3
+msgid "MPS tensors"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.__init__:18
-msgid ""
-"At least one reference node is required to specify a scalar. None "
-"provided!"
+#: of tensorcircuit.mpscircuit.MPSCircuit.get_center_position:1
+msgid "Get the center position of the MPS"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.from_tensor:1
-msgid ""
-"Construct a `QuOperator` directly from a single tensor. This first wraps "
-"the tensor in a `Node`, then constructs the `QuOperator` from that "
-"`Node`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.get_center_position:3
+msgid "center position"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.from_tensor:28
-msgid "The tensor."
+#: of tensorcircuit.mpscircuit.MPSCircuit.get_norm:1
+msgid "Get the normalized Center Position."
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.from_tensor:30
-msgid "The axis indices of `tensor` to use as `out_edges`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.get_norm:3
+msgid "Normalized Center Position."
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.from_tensor:32
-msgid "The axis indices of `tensor` to use as `in_edges`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.get_quvector:2
+msgid "Get the representation of the output state in the form of ``QuVector``"
msgstr ""
-#: of tensorcircuit.quantum.QuOperator.from_tensor:34
-msgid "The new operator."
+#: of tensorcircuit.mpscircuit.MPSCircuit.get_quvector:2
+msgid "has to be full contracted in MPS"
msgstr ""
-#: of tensorcircuit.quantum.QuScalar:1
-msgid "Represents a scalar via a tensor network."
+#: of tensorcircuit.mpscircuit.MPSCircuit.get_tensors:1
+msgid "Get the MPS tensors"
msgstr ""
-#: of tensorcircuit.quantum.QuScalar.__init__:1
-msgid ""
-"Constructs a new `QuScalar` from a tensor network. This encapsulates an "
-"existing tensor network, interpreting it as a scalar."
+#: of tensorcircuit.mpscircuit.MPSCircuit.is_valid:1
+msgid "Check whether the circuit is legal."
msgstr ""
-#: of tensorcircuit.quantum.QuScalar.__init__:4
-msgid ""
-"Nodes used to refer to the tensor network (need not be exhaustive - one "
-"node from each disconnected subnetwork is sufficient)."
-msgstr ""
-
-#: of tensorcircuit.quantum.QuScalar.from_tensor:1
-msgid ""
-"Construct a `QuScalar` directly from a single tensor. This first wraps "
-"the tensor in a `Node`, then constructs the `QuScalar` from that `Node`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.is_valid:3
+msgid "Whether the circuit is legal."
msgstr ""
-#: of tensorcircuit.quantum.QuScalar.from_tensor:22
-msgid "The tensor for constructing a new QuScalar."
+#: of tensorcircuit.mpscircuit.MPSCircuit.measure:1
+msgid "Take measurement to the given quantum lines."
msgstr ""
-#: of tensorcircuit.quantum.QuScalar.from_tensor:24
-msgid "The new constructed QuScalar from the given tensor."
+#: of tensorcircuit.mpscircuit.MPSCircuit.mid_measurement:1
+msgid ""
+"Middle measurement in the z-basis on the circuit, note the wavefunction "
+"output is not normalized with ``mid_measurement`` involved, one should "
+"normalized the state manually if needed."
msgstr ""
-#: of tensorcircuit.quantum.QuVector:1
-msgid "Represents a (column) vector via a tensor network."
+#: of tensorcircuit.mpscircuit.MPSCircuit.mid_measurement:4
+msgid "The index of qubit that the Z direction postselection applied on"
msgstr ""
-#: of tensorcircuit.quantum.QuVector.__init__:1
-msgid ""
-"Constructs a new `QuVector` from a tensor network. This encapsulates an "
-"existing tensor network, interpreting it as a (column) vector."
+#: of tensorcircuit.mpscircuit.MPSCircuit.mid_measurement:6
+msgid "0 for spin up, 1 for spin down, defaults to 0"
msgstr ""
-#: of tensorcircuit.quantum.QuVector.from_tensor:1
-msgid ""
-"Construct a `QuVector` directly from a single tensor. This first wraps "
-"the tensor in a `Node`, then constructs the `QuVector` from that `Node`."
+#: of tensorcircuit.mpscircuit.MPSCircuit.normalize:1
+msgid "Normalize MPS Circuit according to the center position."
msgstr ""
-#: of tensorcircuit.quantum.QuVector.from_tensor:28
-msgid "The tensor for constructing a \"QuVector\"."
+#: of tensorcircuit.mpscircuit.MPSCircuit.position:1
+msgid "Wrapper of tn.FiniteMPS.position. Set orthogonality center."
msgstr ""
-#: of tensorcircuit.quantum.QuVector.from_tensor:30
-msgid ""
-"Sequence of integer indices specifying the order in which to interpret "
-"the axes as subsystems (output edges). If not specified, the axes are "
-"taken in ascending order."
+#: of tensorcircuit.mpscircuit.MPSCircuit.position:4
+msgid "The orthogonality center"
msgstr ""
-#: of tensorcircuit.quantum.QuVector.from_tensor:34
-msgid "The new constructed QuVector from the given tensor."
+#: of tensorcircuit.mpscircuit.MPSCircuit.proj_with_mps:1
+msgid "Compute the projection between `other` as bra and `self` as ket."
msgstr ""
-#: of tensorcircuit.quantum.QuVector.projector:4
-msgid ""
-"Set :math:`A` is the operator in matrix form, then the projector of "
-"operator is defined as: :math:`A A^\\dagger`"
+#: of tensorcircuit.mpscircuit.MPSCircuit.proj_with_mps:3
+msgid "ket of the other MPS, which will be converted to bra automatically"
msgstr ""
-#: of tensorcircuit.quantum.QuVector.reduced_density:5
-msgid "\\mathrm{Tr}_{subsystems}(A A^\\dagger)"
+#: of tensorcircuit.mpscircuit.MPSCircuit.proj_with_mps:5
+msgid "The projection in form of tensor"
msgstr ""
-#: of tensorcircuit.quantum.check_spaces:1
-msgid ""
-"Check the vector spaces represented by two lists of edges are compatible."
-" The number of edges must be the same and the dimensions of each pair of "
-"edges must match. Otherwise, an exception is raised."
+#: of tensorcircuit.mpscircuit.MPSCircuit.reduce_dimension:1
+msgid "Reduce the bond dimension between two adjacent sites by SVD"
msgstr ""
-#: of tensorcircuit.quantum.check_spaces:5 tensorcircuit.quantum.check_spaces:7
-msgid "List of edges representing a many-body Hilbert space."
+#: of tensorcircuit.mpscircuit.MPSCircuit.reduce_tensor_dimension:1
+msgid "Reduce the bond dimension between two general tensors by SVD"
msgstr ""
-#: of tensorcircuit.quantum.check_spaces:10
+#: of tensorcircuit.mpscircuit.MPSCircuit.set_split_rules:1
msgid ""
-"Hilbert-space mismatch: \"Cannot connect {} subsystems with {} "
-"subsystems\", or \"Input dimension {} != output dimension {}.\""
+"Set truncation split when double qubit gates are applied. If nothing is "
+"specified, no truncation will take place and the bond dimension will keep"
+" growing. For more details, refer to `split_tensor`."
msgstr ""
-#: of tensorcircuit.quantum.correlation_from_counts:1
-msgid ""
-"Compute :math:`\\prod_{i\\in \\\\text{index}} s_i`, where the probability"
-" for each bitstring is given as a vector ``results``. Results is in the "
-"format of \"count_vector\""
+#: of tensorcircuit.mpscircuit.MPSCircuit.slice:1
+msgid "Get a slice of the MPS (only for internal use)"
msgstr ""
-#: of tensorcircuit.quantum.correlation_from_counts:13
-#: tensorcircuit.quantum.correlation_from_samples:4
-msgid "list of int, indicating the position in the bitstring"
+#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction:3
+msgid "the str indicating the form of the output wavefunction"
msgstr ""
-#: of tensorcircuit.quantum.correlation_from_counts:15
-msgid "probability vector of shape 2^n"
+#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction:5
+msgid "Tensor with shape [1, -1]"
msgstr ""
-#: of tensorcircuit.quantum.correlation_from_counts:17
-msgid "Correlation expectation from measurement shots."
+#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction:9
+msgid "i--A--B--j -> i--XX--j"
msgstr ""
-#: of tensorcircuit.quantum.correlation_from_samples:1
-msgid ""
-"Compute :math:`\\prod_{i\\in \\\\text{index}} s_i (s=\\pm 1)`, Results is"
-" in the format of \"sample_int\" or \"sample_bin\""
+#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:1
+msgid "Construct the MPS tensors from a given wavefunction."
msgstr ""
-#: of tensorcircuit.quantum.correlation_from_samples:6
-msgid "sample tensor"
+#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:3
+msgid "The given wavefunction (any shape is OK)"
msgstr ""
-#: of tensorcircuit.quantum.correlation_from_samples:10
-msgid "Correlation expectation from measurement shots"
+#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:7
+msgid "Physical dimension, 2 for MPS and 4 for MPO"
msgstr ""
-#: of tensorcircuit.quantum.count_d2s:1
-msgid ""
-"measurement shots results, dense representation to sparse tuple "
-"representation non-jittable due to the non fixed return shape count_tuple"
-" to count_vector"
+#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:9
+msgid "Whether to normalize the wavefunction"
msgstr ""
-#: of tensorcircuit.quantum.count_d2s:12
-msgid "cutoff to determine nonzero elements, defaults to 1e-7"
+#: of tensorcircuit.mpscircuit.MPSCircuit.wavefunction_to_tensors:11
+msgid "The tensors"
msgstr ""
-#: of tensorcircuit.quantum.count_s2d:1
-msgid ""
-"measurement shots results, sparse tuple representation to dense "
-"representation count_vector to count_tuple"
+#: of tensorcircuit.mpscircuit.split_tensor:1
+msgid "Split the tensor by SVD or QR depends on whether a truncation is required."
msgstr ""
-#: of tensorcircuit.quantum.count_tuple2dict:1
-msgid "count_tuple to count_dict_bin or count_dict_int"
+#: of tensorcircuit.mpscircuit.split_tensor:3
+msgid "The input tensor to split."
msgstr ""
-#: of tensorcircuit.quantum.count_tuple2dict:3
-msgid "count_tuple format"
+#: of tensorcircuit.mpscircuit.split_tensor:5
+msgid "Determine the orthogonal center is on the left tensor or the right tensor."
msgstr ""
-#: of tensorcircuit.quantum.count_tuple2dict:7
-#: tensorcircuit.quantum.count_vector2dict:7
-msgid "can be \"int\" or \"bin\", defaults to \"bin\""
+#: of tensorcircuit.mpscircuit.split_tensor:7
+msgid "Two tensors after splitting"
msgstr ""
-#: of tensorcircuit.quantum.count_tuple2dict:9
-msgid "count_dict"
+#: ../../source/api/noisemodel.rst:2
+msgid "tensorcircuit.noisemodel"
msgstr ""
-#: of tensorcircuit.quantum.count_vector2dict:1
-msgid "convert_vector to count_dict_bin or count_dict_int"
+#: of tensorcircuit.noisemodel:1
+msgid "General Noise Model Construction."
msgstr ""
-#: of tensorcircuit.quantum.count_vector2dict:3
-msgid "tensor in shape [2**n]"
+#: of tensorcircuit.noisemodel.NoiseConf:1
+msgid "``Noise Configuration`` class."
msgstr ""
-#: of tensorcircuit.quantum.double_state:1
-msgid "Compute the double state of the given Hamiltonian operator ``h``."
+#: of tensorcircuit.noisemodel.NoiseConf.__init__:1
+msgid "Establish a noise configuration."
msgstr ""
-#: of tensorcircuit.quantum.double_state:3 tensorcircuit.quantum.gibbs_state:3
-#: tensorcircuit.quantum.truncated_free_energy:5
-msgid "Hamiltonian operator in form of Tensor."
+#: of tensorcircuit.noisemodel.NoiseConf.add_noise:1
+msgid ""
+"Add noise channels on specific gates and specific qubits in form of Kraus"
+" operators."
msgstr ""
-#: of tensorcircuit.quantum.double_state:5 tensorcircuit.quantum.free_energy:17
-#: tensorcircuit.quantum.gibbs_state:5
-#: tensorcircuit.quantum.renyi_free_energy:16
-#: tensorcircuit.quantum.truncated_free_energy:7
-msgid "Constant for the optimization, default is 1."
+#: of tensorcircuit.noisemodel.NoiseConf.add_noise:3
+msgid "noisy gate"
msgstr ""
-#: of tensorcircuit.quantum.double_state:7
-msgid "The double state of ``h`` with the given ``beta``."
+#: of tensorcircuit.noisemodel.NoiseConf.add_noise:5
+msgid "noise channel"
msgstr ""
-#: of tensorcircuit.quantum.eliminate_identities:1
+#: of tensorcircuit.noisemodel.NoiseConf.add_noise:7
msgid ""
-"Eliminates any connected CopyNodes that are identity matrices. This will "
-"modify the network represented by `nodes`. Only identities that are "
-"connected to other nodes are eliminated."
+"the list of noisy qubit, defaults to None, indicating applying the noise "
+"channel on all qubits"
msgstr ""
-#: of tensorcircuit.quantum.eliminate_identities:5
-msgid "Collection of nodes to search."
+#: of tensorcircuit.noisemodel.NoiseConf.add_noise_by_condition:1
+msgid "Add noise based on specified condition"
msgstr ""
-#: of tensorcircuit.quantum.eliminate_identities:7
-msgid ""
-"The Dictionary mapping remaining Nodes to any replacements, Dictionary "
-"specifying all dangling-edge replacements."
+#: of tensorcircuit.noisemodel.NoiseConf.add_noise_by_condition:3
+msgid "a function to decide if the noise should be added to the qir."
msgstr ""
-#: of tensorcircuit.quantum.entropy:1
-msgid "Compute the entropy from the given density matrix ``rho``."
+#: of tensorcircuit.noisemodel.NoiseConf.add_noise_by_condition:5
+msgid "the error channel"
msgstr ""
-#: of tensorcircuit.quantum.entropy:30 tensorcircuit.quantum.free_energy:13
-#: tensorcircuit.quantum.renyi_entropy:3
-#: tensorcircuit.quantum.renyi_free_energy:12
-msgid "The density matrix in form of Tensor or QuOperator."
+#: of tensorcircuit.noisemodel.NoiseConf.add_noise_by_condition:7
+msgid "the name of the condition. A metadata that does not affect the numerics."
msgstr ""
-#: of tensorcircuit.quantum.entropy:32 tensorcircuit.quantum.free_energy:19
-msgid "Epsilon, default is 1e-12."
+#: of tensorcircuit.noisemodel.NoiseConf.channel_count:1
+msgid "Count the total number of channels in a given circuit"
msgstr ""
-#: of tensorcircuit.quantum.entropy:34
-msgid "Entropy on the given density matrix."
+#: of tensorcircuit.noisemodel.NoiseConf.channel_count:3
+msgid "the circuit to be counted"
msgstr ""
-#: of tensorcircuit.quantum.fidelity:1
-msgid "Return fidelity scalar between two states rho and rho0."
+#: of tensorcircuit.noisemodel.NoiseConf.channel_count:5
+msgid "the count"
msgstr ""
-#: of tensorcircuit.quantum.fidelity:3
-msgid "\\operatorname{Tr}(\\sqrt{\\sqrt{rho} rho_0 \\sqrt{rho}})"
+#: of tensorcircuit.noisemodel.apply_qir_with_noise:1
+msgid "A newly defined circuit"
msgstr ""
-#: of tensorcircuit.quantum.fidelity:7 tensorcircuit.quantum.fidelity:9
-#: tensorcircuit.quantum.mutual_information:3 tensorcircuit.quantum.taylorlnm:3
-#: tensorcircuit.quantum.trace_distance:3
-#: tensorcircuit.quantum.trace_distance:5
-#: tensorcircuit.quantum.truncated_free_energy:3
-msgid "The density matrix in form of Tensor."
+#: of tensorcircuit.noisemodel.apply_qir_with_noise:3
+msgid "The qir of the clean circuit"
msgstr ""
-#: of tensorcircuit.quantum.fidelity:11
-msgid "The sqrtm of a Hermitian matrix ``a``."
+#: of tensorcircuit.noisemodel.apply_qir_with_noise:5
+#: tensorcircuit.noisemodel.circuit_with_noise:5
+msgid "Noise Configuration"
msgstr ""
-#: of tensorcircuit.quantum.free_energy:1
-msgid "Compute the free energy of the given density matrix."
+#: of tensorcircuit.noisemodel.apply_qir_with_noise:7
+#: tensorcircuit.noisemodel.circuit_with_noise:7
+msgid "The status for Monte Carlo sampling, defaults to None"
msgstr ""
-#: of tensorcircuit.quantum.free_energy:15
-#: tensorcircuit.quantum.renyi_free_energy:14
-msgid "Hamiltonian operator in form of Tensor or QuOperator."
+#: of tensorcircuit.noisemodel.apply_qir_with_noise:9
+#: tensorcircuit.noisemodel.circuit_with_noise:9
+msgid "A newly constructed circuit with noise"
msgstr ""
-#: of tensorcircuit.quantum.free_energy:22
-msgid "The free energy of the given density matrix with the Hamiltonian operator."
+#: of tensorcircuit.noisemodel.circuit_with_noise:1
+msgid "Noisify a clean circuit."
msgstr ""
-#: of tensorcircuit.quantum.generate_local_hamiltonian:1
-msgid ""
-"Generate a local Hamiltonian operator based on the given sequence of "
-"Tensor. Note: further jit is recommended. For large Hilbert space, sparse"
-" Hamiltonian is recommended"
+#: of tensorcircuit.noisemodel.circuit_with_noise:3
+msgid "A clean circuit"
msgstr ""
-#: of tensorcircuit.quantum.generate_local_hamiltonian:5
-msgid "A sequence of Tensor."
+#: of tensorcircuit.noisemodel.expectation_noisfy:1
+msgid "Calculate expectation value with noise configuration."
msgstr ""
-#: of tensorcircuit.quantum.generate_local_hamiltonian:7
-msgid "Return Hamiltonian operator in form of matrix, defaults to True."
+#: of tensorcircuit.noisemodel.expectation_noisfy:3
+#: tensorcircuit.noisemodel.sample_expectation_ps_noisfy:3
+msgid "The clean circuit"
msgstr ""
-#: of tensorcircuit.quantum.generate_local_hamiltonian:9
-msgid "The Hamiltonian operator in form of QuOperator or matrix."
+#: of tensorcircuit.noisemodel.expectation_noisfy:12
+msgid "expectation value with noise"
msgstr ""
-#: of tensorcircuit.quantum.gibbs_state:1
-msgid "Compute the Gibbs state of the given Hamiltonian operator ``h``."
+#: of tensorcircuit.noisemodel.sample_expectation_ps_noisfy:1
+msgid "Calculate sample_expectation_ps with noise configuration."
msgstr ""
-#: of tensorcircuit.quantum.gibbs_state:7
-msgid "The Gibbs state of ``h`` with the given ``beta``."
+#: of tensorcircuit.noisemodel.sample_expectation_ps_noisfy:13
+msgid ""
+"repetition time for Monte Carlo sampling for noisfy calculation, "
+"defaults to 1000"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:1
+#: of tensorcircuit.noisemodel.sample_expectation_ps_noisfy:20
msgid ""
-"Generate Heisenberg Hamiltonian with possible external fields. Currently "
-"requires tensorflow installed"
+"external randomness given by tensor uniformly from [0, 1], defaults to "
+"None, used for measurement sampling"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:13
-msgid "input circuit graph"
+#: of tensorcircuit.noisemodel.sample_expectation_ps_noisfy:23
+msgid "sample expectation value with noise"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:15
-msgid "zz coupling, default is 1.0"
+#: ../../source/api/quantum.rst:2
+msgid "tensorcircuit.quantum"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:17
-msgid "xx coupling, default is 1.0"
+#: of tensorcircuit.quantum:1
+msgid "Quantum state and operator class backend by tensornetwork"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:19
-msgid "yy coupling, default is 1.0"
+#: of tensorcircuit.quantum
+msgid "IMPORT"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:21
-msgid "External field on z direction, default is 0.0"
+#: of tensorcircuit.quantum.PauliString2COO:1
+#: tensorcircuit.quantum.PauliStringSum2COO_tf:1
+msgid "Generate tensorflow sparse matrix from Pauli string sum"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:23
-msgid "External field on y direction, default is 0.0"
+#: of tensorcircuit.quantum.PauliString2COO:3
+msgid ""
+"1D Tensor representing for a Pauli string, e.g. [1, 0, 0, 3, 2] is for "
+":math:`X_0Z_3Y_4`"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:25
-msgid "External field on x direction, default is 0.0"
+#: of tensorcircuit.quantum.PauliString2COO:6
+msgid ""
+"the weight for the Pauli string defaults to None (all Pauli strings "
+"weight 1.0)"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:27
-msgid "Whether to return sparse Hamiltonian operator, default is True."
+#: of tensorcircuit.quantum.PauliString2COO:9
+msgid "the tensorflow sparse matrix"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:29
-msgid "whether return the matrix in numpy or tensorflow form"
+#: of tensorcircuit.quantum.PauliStringSum2COO:1
+#: tensorcircuit.quantum.PauliStringSum2COO_numpy:1
+msgid ""
+"Generate sparse tensor from Pauli string sum. Currently requires "
+"tensorflow installed"
msgstr ""
-#: of tensorcircuit.quantum.heisenberg_hamiltonian:32
-msgid "Hamiltonian measurements"
+#: of tensorcircuit.quantum.PauliStringSum2COO:4
+#: tensorcircuit.quantum.PauliStringSum2COO_numpy:4
+#: tensorcircuit.quantum.PauliStringSum2COO_tf:3
+#: tensorcircuit.quantum.PauliStringSum2Dense:5
+msgid ""
+"2D Tensor, each row is for a Pauli string, e.g. [1, 0, 0, 3, 2] is for "
+":math:`X_0Z_3Y_4`"
msgstr ""
-#: of tensorcircuit.quantum.identity:1
+#: of tensorcircuit.quantum.PauliStringSum2COO:7
+#: tensorcircuit.quantum.PauliStringSum2COO_numpy:7
+#: tensorcircuit.quantum.PauliStringSum2COO_tf:6
+#: tensorcircuit.quantum.PauliStringSum2Dense:8
msgid ""
-"Construct a 'QuOperator' representing the identity on a given space. "
-"Internally, this is done by constructing 'CopyNode's for each edge, with "
-"dimension according to 'space'."
+"1D Tensor, each element corresponds the weight for each Pauli string "
+"defaults to None (all Pauli strings weight 1.0)"
msgstr ""
-#: of tensorcircuit.quantum.identity:26
+#: of tensorcircuit.quantum.PauliStringSum2COO:10
+#: tensorcircuit.quantum.PauliStringSum2COO_numpy:10
+#: tensorcircuit.quantum.PauliStringSum2Dense:11
msgid ""
-"A sequence of integers for the dimensions of the tensor product factors "
-"of the space (the edges in the tensor network)."
+"default False. If True, return numpy coo else return backend compatible "
+"sparse tensor"
msgstr ""
-#: of tensorcircuit.quantum.identity:29
-msgid ""
-"The data type by np.* (for conversion to dense). defaults None to tc "
-"dtype."
+#: of tensorcircuit.quantum.PauliStringSum2COO:13
+#: tensorcircuit.quantum.PauliStringSum2COO_numpy:13
+msgid "the scipy coo sparse matrix"
msgstr ""
-#: of tensorcircuit.quantum.identity:31
-msgid "The desired identity operator."
+#: of tensorcircuit.quantum.PauliStringSum2COO_tf:9
+msgid "the tensorflow coo sparse matrix"
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:1
+#: of tensorcircuit.quantum.PauliStringSum2Dense:1
msgid ""
-"Simulate the measuring of each qubit of ``p`` in the computational basis,"
-" thus producing output like that of ``qiskit``."
+"Generate dense matrix from Pauli string sum. Currently requires "
+"tensorflow installed."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:4
-msgid "Six formats of measurement counts results:"
+#: of tensorcircuit.quantum.PauliStringSum2Dense:14
+msgid "the tensorflow dense matrix"
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:6
-msgid "\"sample_int\": # np.array([0, 0])"
+#: of tensorcircuit.quantum.QuAdjointVector:1 tensorcircuit.quantum.QuScalar:1
+#: tensorcircuit.quantum.QuVector:1
+msgid "Bases: :py:class:`~tensorcircuit.quantum.QuOperator`"
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:8
-msgid "\"sample_bin\": # [np.array([1, 0]), np.array([1, 0])]"
+#: of tensorcircuit.quantum.QuAdjointVector:1
+msgid "Represents an adjoint (row) vector via a tensor network."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:10
-msgid "\"count_vector\": # np.array([2, 0, 0, 0])"
+#: of tensorcircuit.quantum.QuAdjointVector.__init__:1
+msgid ""
+"Constructs a new `QuAdjointVector` from a tensor network. This "
+"encapsulates an existing tensor network, interpreting it as an adjoint "
+"vector (row vector)."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:12
-msgid "\"count_tuple\": # (np.array([0]), np.array([2]))"
+#: of tensorcircuit.quantum.QuAdjointVector.__init__:5
+#: tensorcircuit.quantum.QuOperator.__init__:9
+msgid "The edges of the network to be used as the input edges."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:14
-msgid "\"count_dict_bin\": # {\"00\": 2, \"01\": 0, \"10\": 0, \"11\": 0}"
+#: of tensorcircuit.quantum.QuAdjointVector.__init__:7
+#: tensorcircuit.quantum.QuOperator.__init__:11
+#: tensorcircuit.quantum.QuVector.__init__:6
+msgid ""
+"Nodes used to refer to parts of the tensor network that are not connected"
+" to any input or output edges (for example: a scalar factor)."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:16
-msgid "\"count_dict_int\": # {0: 2, 1: 0, 2: 0, 3: 0}"
+#: of tensorcircuit.quantum.QuAdjointVector.__init__:10
+#: tensorcircuit.quantum.QuScalar.__init__:7
+#: tensorcircuit.quantum.QuVector.__init__:9
+msgid "Optional collection of edges to ignore when performing consistency checks."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:37
+#: of tensorcircuit.quantum.QuOperator.adjoint:1
msgid ""
-"The quantum state, assumed to be normalized, as either a ket or density "
-"operator."
-msgstr ""
-
-#: of tensorcircuit.quantum.measurement_counts:39
-msgid "The number of counts to perform."
+"The adjoint of the operator. This creates a new `QuOperator` with "
+"complex-conjugate copies of all tensors in the network and with the input"
+" and output edges switched."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:41
-msgid "alias for the argument ``counts``"
+#: of tensorcircuit.quantum.QuOperator.adjoint:5
+msgid "The adjoint of the operator."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:43
-msgid "defaults to be \"direct\", see supported format above"
+#: of tensorcircuit.quantum.QuOperator.check_network:1
+msgid ""
+"Check that the network has the expected dimensionality. This checks that "
+"all input and output edges are dangling and that there are no other "
+"dangling edges (except any specified in `ignore_edges`). If not, an "
+"exception is raised."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:47
+#: of tensorcircuit.quantum.QuOperator.contract:1
msgid ""
-"if True, the `state` is directly regarded as a probability list, defaults"
-" to be False"
+"Contract the tensor network in place. This modifies the tensor network "
+"representation of the operator (or vector, or scalar), reducing it to a "
+"single tensor, without changing the value."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:55
-msgid "if True, jax backend try using a jittable count, defaults to False"
+#: of tensorcircuit.quantum.QuOperator.contract:5
+msgid "Manually specify the axis ordering of the final tensor."
msgstr ""
-#: of tensorcircuit.quantum.measurement_counts:57
-msgid "The counts for each bit string measured."
+#: of tensorcircuit.quantum.QuOperator.contract:7
+msgid "The present object."
msgstr ""
-#: of tensorcircuit.quantum.mutual_information:1
-msgid "Mutual information between AB subsystem described by ``cut``."
+#: of tensorcircuit.quantum.QuOperator.copy:1
+msgid "The deep copy of the operator."
msgstr ""
-#: of tensorcircuit.quantum.mutual_information:5
-msgid "The AB subsystem."
+#: of tensorcircuit.quantum.QuOperator.copy:3
+msgid "The new copy of the operator."
msgstr ""
-#: of tensorcircuit.quantum.mutual_information:7
-msgid "The mutual information between AB subsystem described by ``cut``."
+#: of tensorcircuit.quantum.QuOperator.eval:1
+msgid ""
+"Contracts the tensor network in place and returns the final tensor. Note "
+"that this modifies the tensor network representing the operator. The "
+"default ordering for the axes of the final tensor is: `*out_edges, "
+"*in_edges`. If there are any \"ignored\" edges, their axes come first: "
+"`*ignored_edges, *out_edges, *in_edges`."
msgstr ""
-#: of tensorcircuit.quantum.quantum_constructor:1
+#: of tensorcircuit.quantum.QuOperator.eval:8
+#: tensorcircuit.quantum.QuOperator.eval_matrix:6
msgid ""
-"Constructs an appropriately specialized QuOperator. If there are no "
-"edges, creates a QuScalar. If the are only output (input) edges, creates "
-"a QuVector (QuAdjointVector). Otherwise creates a QuOperator."
+"Manually specify the axis ordering of the final tensor. The default "
+"ordering is determined by `out_edges` and `in_edges` (see above)."
msgstr ""
-#: of tensorcircuit.quantum.quantum_constructor:48
-msgid "A list of output edges."
+#: of tensorcircuit.quantum.QuOperator.eval:11
+#: tensorcircuit.quantum.QuOperator.eval_matrix:9
+msgid "Node count '{}' > 1 after contraction!"
msgstr ""
-#: of tensorcircuit.quantum.quantum_constructor:50
-msgid "A list of input edges."
+#: of tensorcircuit.quantum.QuOperator.eval:12
+msgid "The final tensor representing the operator."
msgstr ""
-#: of tensorcircuit.quantum.quantum_constructor:52
+#: of tensorcircuit.quantum.QuOperator.eval_matrix:1
msgid ""
-"Reference nodes for the tensor network (needed if there is a. scalar "
-"component)."
-msgstr ""
-
-#: of tensorcircuit.quantum.quantum_constructor:55
-msgid "Edges to ignore when checking the dimensionality of the tensor network."
+"Contracts the tensor network in place and returns the final tensor in two"
+" dimentional matrix. The default ordering for the axes of the final "
+"tensor is: (:math:`\\prod` dimension of out_edges, :math:`\\prod` "
+"dimension of in_edges)"
msgstr ""
-#: of tensorcircuit.quantum.quantum_constructor:58
-msgid "The new created QuOperator object."
+#: of tensorcircuit.quantum.QuOperator.eval_matrix:10
+msgid "The two-dimentional tensor representing the operator."
msgstr ""
-#: of tensorcircuit.quantum.quimb2qop:1
-msgid "Convert MPO in Quimb package to QuOperator."
+#: of tensorcircuit.quantum.QuAdjointVector.from_tensor:1
+msgid ""
+"Construct a `QuAdjointVector` directly from a single tensor. This first "
+"wraps the tensor in a `Node`, then constructs the `QuAdjointVector` from "
+"that `Node`."
msgstr ""
-#: of tensorcircuit.quantum.quimb2qop:3
-msgid "MPO in the form of Quimb package"
+#: of tensorcircuit.quantum.QuAdjointVector.from_tensor:27
+msgid "The tensor for constructing an QuAdjointVector."
msgstr ""
-#: of tensorcircuit.quantum.quimb2qop:5 tensorcircuit.quantum.tn2qop:5
-msgid "MPO in the form of QuOperator"
+#: of tensorcircuit.quantum.QuAdjointVector.from_tensor:29
+msgid ""
+"Sequence of integer indices specifying the order in which to interpret "
+"the axes as subsystems (input edges). If not specified, the axes are "
+"taken in ascending order."
msgstr ""
-#: of tensorcircuit.quantum.reduced_density_matrix:1
-msgid "Compute the reduced density matrix from the quantum state ``state``."
+#: of tensorcircuit.quantum.QuAdjointVector.from_tensor:33
+msgid "The new constructed QuAdjointVector give from the given tensor."
msgstr ""
-#: of tensorcircuit.quantum.reduced_density_matrix:3
-msgid "The quantum state in form of Tensor or QuOperator."
+#: of tensorcircuit.quantum.QuOperator.is_adjoint_vector:1
+msgid ""
+"Returns a bool indicating if QuOperator is an adjoint vector. Examples "
+"can be found in the `QuOperator.from_tensor`."
msgstr ""
-#: of tensorcircuit.quantum.reduced_density_matrix:5
+#: of tensorcircuit.quantum.QuOperator.is_scalar:1
msgid ""
-"the index list that is traced out, if cut is a int, it indicates [0, cut]"
-" as the traced out region"
+"Returns a bool indicating if QuOperator is a scalar. Examples can be "
+"found in the `QuOperator.from_tensor`."
msgstr ""
-#: of tensorcircuit.quantum.reduced_density_matrix:8
-msgid "probability decoration, default is None."
+#: of tensorcircuit.quantum.QuOperator.is_vector:1
+msgid ""
+"Returns a bool indicating if QuOperator is a vector. Examples can be "
+"found in the `QuOperator.from_tensor`."
msgstr ""
-#: of tensorcircuit.quantum.reduced_density_matrix:10
-msgid "The reduced density matrix."
+#: of tensorcircuit.quantum.QuAdjointVector.nodes:1
+#: tensorcircuit.quantum.QuOperator.nodes:1
+#: tensorcircuit.quantum.QuScalar.nodes:1
+#: tensorcircuit.quantum.QuVector.nodes:1
+msgid "All tensor-network nodes involved in the operator."
msgstr ""
-#: of tensorcircuit.quantum.renyi_entropy:1
-msgid "Compute the Rényi entropy of order :math:`k` by given density matrix."
+#: of tensorcircuit.quantum.QuOperator.norm:1
+msgid ""
+"The norm of the operator. This is the 2-norm (also known as the Frobenius"
+" or Hilbert-Schmidt norm)."
msgstr ""
-#: of tensorcircuit.quantum.renyi_entropy:5
-#: tensorcircuit.quantum.renyi_free_energy:18
-msgid "The order of Rényi entropy, default is 2."
+#: of tensorcircuit.quantum.QuOperator.partial_trace:1
+msgid ""
+"The partial trace of the operator. Subsystems to trace out are supplied "
+"as indices, so that dangling edges are connected to each other as: "
+"`out_edges[i] ^ in_edges[i] for i in subsystems_to_trace_out` This does "
+"not modify the original network. The original ordering of the remaining "
+"subsystems is maintained."
msgstr ""
-#: of tensorcircuit.quantum.renyi_entropy:7
-#: tensorcircuit.quantum.renyi_free_energy:20
-msgid "The :math:`k` th order of Rényi entropy."
+#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:16
+#: tensorcircuit.quantum.QuOperator.partial_trace:8
+#: tensorcircuit.quantum.QuVector.reduced_density:16
+msgid "Indices of subsystems to trace out."
msgstr ""
-#: of tensorcircuit.quantum.renyi_free_energy:1
+#: of tensorcircuit.quantum.QuOperator.partial_trace:10
+msgid "A new QuOperator or QuScalar representing the result."
+msgstr ""
+
+#: of tensorcircuit.quantum.QuAdjointVector.projector:1
+#: tensorcircuit.quantum.QuVector.projector:1
msgid ""
-"Compute the Rényi free energy of the corresponding density matrix and "
-"Hamiltonian."
+"The projector of the operator. The operator, as a linear operator, on the"
+" adjoint of the operator."
msgstr ""
-#: of tensorcircuit.quantum.sample2all:1
+#: of tensorcircuit.quantum.QuAdjointVector.projector:4
msgid ""
-"transform ``sample_int`` or ``sample_bin`` form results to other forms "
-"specified by ``format``"
+"Set :math:`A` is the operator in matrix form, then the projector of "
+"operator is defined as: :math:`A^\\dagger A`"
msgstr ""
-#: of tensorcircuit.quantum.sample2all:3
-msgid "measurement shots results in ``sample_int`` or ``sample_bin`` format"
+#: of tensorcircuit.quantum.QuAdjointVector.projector:6
+#: tensorcircuit.quantum.QuVector.projector:6
+msgid "The projector of the operator."
msgstr ""
-#: of tensorcircuit.quantum.sample2all:7
-msgid ""
-"see the doc in the doc in "
-":py:meth:`tensorcircuit.quantum.measurement_results`, defaults to "
-"\"count_vector\""
+#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:1
+#: tensorcircuit.quantum.QuVector.reduced_density:1
+msgid "The reduced density of the operator."
msgstr ""
-#: of tensorcircuit.quantum.sample2all:12
-msgid "only applicable to count transformation in jax backend, defaults to False"
+#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:3
+#: tensorcircuit.quantum.QuVector.reduced_density:3
+msgid ""
+"Set :math:`A` is the matrix of the operator, then the reduced density is "
+"defined as:"
msgstr ""
-#: of tensorcircuit.quantum.sample2all:14
-msgid "measurement results specified as ``format``"
+#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:5
+msgid "\\mathrm{Tr}_{subsystems}(A^\\dagger A)"
msgstr ""
-#: of tensorcircuit.quantum.sample2count:1
-msgid "sample_int to count_tuple"
+#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:9
+#: tensorcircuit.quantum.QuVector.reduced_density:9
+msgid ""
+"Firstly, take the projector of the operator, then trace out the "
+"subsystems to trace out are supplied as indices, so that dangling edges "
+"are connected to each other as: `out_edges[i] ^ in_edges[i] for i in "
+"subsystems_to_trace_out` This does not modify the original network. The "
+"original ordering of the remaining subsystems is maintained."
msgstr ""
-#: of tensorcircuit.quantum.sample_bin2int:1
-msgid "bin sample to int sample"
+#: of tensorcircuit.quantum.QuAdjointVector.reduced_density:18
+#: tensorcircuit.quantum.QuVector.reduced_density:18
+msgid ""
+"The QuOperator of the reduced density of the operator with given "
+"subsystems."
msgstr ""
-#: of tensorcircuit.quantum.sample_bin2int:3
-msgid "in shape [trials, n] of elements (0, 1)"
+#: of tensorcircuit.quantum.QuOperator.tensor_product:1
+msgid ""
+"Tensor product with another operator. Given two operators `A` and `B`, "
+"produces a new operator `AB` representing :math:`A ⊗ B`. The `out_edges` "
+"(`in_edges`) of `AB` is simply the concatenation of the `out_edges` "
+"(`in_edges`) of `A.copy()` with that of `B.copy()`: `new_out_edges = "
+"[*out_edges_A_copy, *out_edges_B_copy]` `new_in_edges = "
+"[*in_edges_A_copy, *in_edges_B_copy]`"
msgstr ""
-#: of tensorcircuit.quantum.sample_bin2int:7
-msgid "in shape [trials]"
+#: of tensorcircuit.quantum.QuOperator.tensor_product:20
+msgid "The other operator (`B`)."
msgstr ""
-#: of tensorcircuit.quantum.sample_int2bin:1
-msgid "int sample to bin sample"
+#: of tensorcircuit.quantum.QuOperator.tensor_product:22
+msgid "The result (`AB`)."
msgstr ""
-#: of tensorcircuit.quantum.sample_int2bin:3
-msgid "in shape [trials] of int elements in the range [0, 2**n)"
+#: of tensorcircuit.quantum.QuOperator.trace:1
+msgid "The trace of the operator."
msgstr ""
-#: of tensorcircuit.quantum.sample_int2bin:7
-msgid "in shape [trials, n] of element (0, 1)"
+#: of tensorcircuit.quantum.QuOperator:1
+msgid ""
+"Represents a linear operator via a tensor network. To interpret a tensor "
+"network as a linear operator, some of the dangling edges must be "
+"designated as `out_edges` (output edges) and the rest as `in_edges` "
+"(input edges). Considered as a matrix, the `out_edges` represent the row "
+"index and the `in_edges` represent the column index. The (right) action "
+"of the operator on another then consists of connecting the `in_edges` of "
+"the first operator to the `out_edges` of the second. Can be used to do "
+"simple linear algebra with tensor networks."
msgstr ""
-#: of tensorcircuit.quantum.spin_by_basis:1
+#: of tensorcircuit.quantum.QuOperator.__init__:1
msgid ""
-"Generate all n-bitstrings as an array, each row is a bitstring basis. "
-"Return m-th col."
+"Creates a new `QuOperator` from a tensor network. This encapsulates an "
+"existing tensor network, interpreting it as a linear operator. The "
+"network is checked for consistency: All dangling edges must either be in "
+"`out_edges`, `in_edges`, or `ignore_edges`."
msgstr ""
-#: of tensorcircuit.quantum.spin_by_basis:9
-msgid "length of a bitstring"
+#: of tensorcircuit.quantum.QuOperator.__init__:7
+#: tensorcircuit.quantum.QuVector.__init__:4
+msgid "The edges of the network to be used as the output edges."
msgstr ""
-#: of tensorcircuit.quantum.spin_by_basis:11
-msgid "m is transformed as "
-"(01>-|10>)"
+#: of tensorcircuit.quantum.measurement_counts:10
+msgid "\"count_vector\": # np.array([2, 0, 0, 0])"
msgstr ""
-#: of tensorcircuit.templates.blocks.Bell_pair_block:3
-#: tensorcircuit.templates.blocks.qft:3
-msgid "Circuit in"
+#: of tensorcircuit.quantum.measurement_counts:12
+msgid "\"count_tuple\": # (np.array([0]), np.array([2]))"
msgstr ""
-#: of tensorcircuit.templates.blocks.Bell_pair_block:5
-msgid ""
-"pairs indices for Bell pairs, defaults to None, corresponds to neighbor "
-"links"
+#: of tensorcircuit.quantum.measurement_counts:14
+msgid "\"count_dict_bin\": # {\"00\": 2, \"01\": 0, \"10\": 0, \"11\": 0}"
msgstr ""
-#: of tensorcircuit.templates.blocks.Bell_pair_block:7
-msgid "Circuit out"
+#: of tensorcircuit.quantum.measurement_counts:16
+msgid "\"count_dict_int\": # {0: 2, 1: 0, 2: 0, 3: 0}"
msgstr ""
-#: of tensorcircuit.templates.blocks.example_block:1
+#: of tensorcircuit.quantum.measurement_counts:37
msgid ""
-"The circuit ansatz is firstly one layer of Hadamard gates and then we "
-"have ``nlayers`` blocks of :math:`e^{i\\theta Z_iZ_{i+1}}` two-qubit gate"
-" in ladder layout, following rx gate."
+"The quantum state, assumed to be normalized, as either a ket or density "
+"operator."
msgstr ""
-#: of tensorcircuit.templates.blocks.example_block:5
-msgid "The circuit"
+#: of tensorcircuit.quantum.measurement_counts:39
+msgid "The number of counts to perform."
msgstr ""
-#: of tensorcircuit.templates.blocks.example_block:7
-msgid "paramter tensor with 2*nlayer*n elements"
+#: of tensorcircuit.quantum.measurement_counts:41
+msgid "alias for the argument ``counts``"
msgstr ""
-#: of tensorcircuit.templates.blocks.example_block:9
-msgid "number of ZZ+RX blocks, defaults to 2"
+#: of tensorcircuit.quantum.measurement_counts:43
+msgid "defaults to be \"direct\", see supported format above"
msgstr ""
-#: of tensorcircuit.templates.blocks.example_block:11
-msgid "whether use SVD split to reduce ZZ gate bond dimension, defaults to False"
+#: of tensorcircuit.quantum.measurement_counts:47
+msgid ""
+"if True, the `state` is directly regarded as a probability list, defaults"
+" to be False"
msgstr ""
-#: of tensorcircuit.templates.blocks.example_block:14
-msgid "The circuit with example ansatz attached"
+#: of tensorcircuit.quantum.measurement_counts:55
+msgid "if True, jax backend try using a jittable count, defaults to False"
msgstr ""
-#: of tensorcircuit.templates.blocks.qft:1
-msgid ""
-"This function applies quantum fourier transformation (QFT) to the "
-"selected circuit lines"
+#: of tensorcircuit.quantum.measurement_counts:57
+msgid "The counts for each bit string measured."
msgstr ""
-#: of tensorcircuit.templates.blocks.qft:5
-msgid "the indices of the circuit lines to apply QFT"
+#: of tensorcircuit.quantum.mutual_information:1
+msgid "Mutual information between AB subsystem described by ``cut``."
msgstr ""
-#: of tensorcircuit.templates.blocks.qft:7
-msgid "Whether to include the final swaps in the QFT"
+#: of tensorcircuit.quantum.mutual_information:5
+msgid "The AB subsystem."
msgstr ""
-#: of tensorcircuit.templates.blocks.qft:9
-msgid "If True, the inverse Fourier transform is constructed"
+#: of tensorcircuit.quantum.mutual_information:7
+msgid "The mutual information between AB subsystem described by ``cut``."
msgstr ""
-#: of tensorcircuit.templates.blocks.qft:11
-msgid "If True, barriers are inserted as visualization improvement"
+#: of tensorcircuit.quantum.ps2xyz:1
+msgid "pauli string list to xyz dict"
msgstr ""
-#: of tensorcircuit.templates.blocks.qft:13
-msgid "Circuit c"
+#: of tensorcircuit.quantum.ps2xyz:3
+msgid "# ps2xyz([1, 2, 2, 0]) = {\"x\": [0], \"y\": [1, 2], \"z\": []}"
msgstr ""
-#: of tensorcircuit.templates.blocks.state_centric:1
+#: of tensorcircuit.quantum.quantum_constructor:1
msgid ""
-"Function decorator wraps the function with the first input and output in "
-"the format of circuit, the wrapped function has the first input and the "
-"output as the state tensor."
+"Constructs an appropriately specialized QuOperator. If there are no "
+"edges, creates a QuScalar. If the are only output (input) edges, creates "
+"a QuVector (QuAdjointVector). Otherwise creates a QuOperator."
msgstr ""
-#: of tensorcircuit.templates.blocks.state_centric:4
-msgid "Function with the fist input and the output as ``Circuit`` object."
+#: of tensorcircuit.quantum.quantum_constructor:48
+msgid "A list of output edges."
msgstr ""
-#: of tensorcircuit.templates.blocks.state_centric:6
+#: of tensorcircuit.quantum.quantum_constructor:50
+msgid "A list of input edges."
+msgstr ""
+
+#: of tensorcircuit.quantum.quantum_constructor:52
msgid ""
-"Wrapped function with the first input and the output as the state tensor "
-"correspondingly."
+"Reference nodes for the tensor network (needed if there is a. scalar "
+"component)."
msgstr ""
-#: ../../source/api/templates/chems.rst:2
-msgid "tensorcircuit.templates.chems"
+#: of tensorcircuit.quantum.quantum_constructor:55
+msgid "Edges to ignore when checking the dimensionality of the tensor network."
msgstr ""
-#: of tensorcircuit.templates.chems:1
-msgid "Useful utilities for quantum chemistry related task"
+#: of tensorcircuit.quantum.quantum_constructor:58
+msgid "The new created QuOperator object."
msgstr ""
-#: of tensorcircuit.templates.chems.get_ps:1
-msgid ""
-"Get Pauli string array and weights array for a qubit Hamiltonian as a sum"
-" of Pauli strings defined in openfermion ``QubitOperator``."
+#: of tensorcircuit.quantum.quimb2qop:1
+msgid "Convert MPO in Quimb package to QuOperator."
msgstr ""
-#: of tensorcircuit.templates.chems.get_ps:4
-msgid "``openfermion.ops.operators.qubit_operator.QubitOperator``"
+#: of tensorcircuit.quantum.quimb2qop:3
+msgid "MPO in the form of Quimb package"
msgstr ""
-#: of tensorcircuit.templates.chems.get_ps:6
-msgid "The number of qubits"
+#: of tensorcircuit.quantum.quimb2qop:5 tensorcircuit.quantum.tn2qop:5
+msgid "MPO in the form of QuOperator"
msgstr ""
-#: of tensorcircuit.templates.chems.get_ps:8
-msgid "Pauli String array and weights array"
+#: of tensorcircuit.quantum.reduced_density_matrix:1
+msgid "Compute the reduced density matrix from the quantum state ``state``."
msgstr ""
-#: ../../source/api/templates/dataset.rst:2
-msgid "tensorcircuit.templates.dataset"
+#: of tensorcircuit.quantum.reduced_density_matrix:3
+msgid "The quantum state in form of Tensor or QuOperator."
msgstr ""
-#: of tensorcircuit.templates.dataset:1
-msgid "Quantum machine learning related data preprocessing and embedding"
+#: of tensorcircuit.quantum.reduced_density_matrix:5
+msgid ""
+"the index list that is traced out, if cut is a int, it indicates [0, cut]"
+" as the traced out region"
msgstr ""
-#: ../../source/api/templates/graphs.rst:2
-msgid "tensorcircuit.templates.graphs"
+#: of tensorcircuit.quantum.reduced_density_matrix:8
+msgid "probability decoration, default is None."
msgstr ""
-#: of tensorcircuit.templates.graphs:1
-msgid "Some common graphs and lattices"
+#: of tensorcircuit.quantum.reduced_density_matrix:10
+msgid "The reduced density matrix."
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord:1
-msgid "Two-dimensional grid lattice"
+#: of tensorcircuit.quantum.renyi_entropy:1
+msgid "Compute the Rényi entropy of order :math:`k` by given density matrix."
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord.__init__:1
-msgid "number of rows"
+#: of tensorcircuit.quantum.renyi_entropy:5
+#: tensorcircuit.quantum.renyi_free_energy:18
+msgid "The order of Rényi entropy, default is 2."
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord.__init__:3
-msgid "number of cols"
+#: of tensorcircuit.quantum.renyi_entropy:7
+#: tensorcircuit.quantum.renyi_free_energy:20
+msgid "The :math:`k` th order of Rényi entropy."
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord.all_cols:1
-msgid "return all col edge with 1d index encoding"
+#: of tensorcircuit.quantum.renyi_free_energy:1
+msgid ""
+"Compute the Rényi free energy of the corresponding density matrix and "
+"Hamiltonian."
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord.all_cols:3
-#: tensorcircuit.templates.graphs.Grid2DCoord.all_rows:3
+#: of tensorcircuit.quantum.sample2all:1
msgid ""
-"whether to include pbc edges (periodic boundary condition), defaults to "
-"False"
+"transform ``sample_int`` or ``sample_bin`` form results to other forms "
+"specified by ``format``"
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord.all_cols:6
-msgid "list of col edge"
+#: of tensorcircuit.quantum.sample2all:3
+msgid "measurement shots results in ``sample_int`` or ``sample_bin`` format"
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord.all_rows:1
-msgid "return all row edge with 1d index encoding"
+#: of tensorcircuit.quantum.sample2all:7
+msgid ""
+"see the doc in the doc in "
+":py:meth:`tensorcircuit.quantum.measurement_results`, defaults to "
+"\"count_vector\""
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord.all_rows:6
-msgid "list of row edge"
+#: of tensorcircuit.quantum.sample2all:12
+msgid "only applicable to count transformation in jax backend, defaults to False"
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph:1
-msgid "Get the 2D grid lattice in ``nx.Graph`` format"
+#: of tensorcircuit.quantum.sample2all:14
+msgid "measurement results specified as ``format``"
msgstr ""
-#: of tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph:3
-msgid ""
-"whether to include pbc edges (periodic boundary condition), defaults to "
-"True"
+#: of tensorcircuit.quantum.sample2count:1
+msgid "sample_int to count_tuple"
msgstr ""
-#: of tensorcircuit.templates.graphs.Line1D:1
-msgid "1D chain with ``n`` sites"
+#: of tensorcircuit.quantum.sample_bin2int:1
+msgid "bin sample to int sample"
msgstr ""
-#: of tensorcircuit.templates.graphs.Line1D:5
-#: tensorcircuit.templates.measurements.heisenberg_measurements:34
-msgid "[description], defaults to True"
+#: of tensorcircuit.quantum.sample_bin2int:3
+msgid "in shape [trials, n] of elements (0, 1)"
msgstr ""
-#: ../../source/api/templates/measurements.rst:2
-msgid "tensorcircuit.templates.measurements"
+#: of tensorcircuit.quantum.sample_bin2int:7
+msgid "in shape [trials]"
msgstr ""
-#: of tensorcircuit.templates.measurements.any_local_measurements:1
-msgid ""
-"This measurements pattern is specifically suitable for vmap. Parameterize"
-" the local Pauli string to be measured."
+#: of tensorcircuit.quantum.sample_int2bin:1
+msgid "int sample to bin sample"
msgstr ""
-#: of tensorcircuit.templates.measurements.any_local_measurements:19
-#: tensorcircuit.templates.measurements.any_measurements:26
-msgid "The circuit to be measured"
+#: of tensorcircuit.quantum.sample_int2bin:3
+msgid "in shape [trials] of int elements in the range [0, 2**n)"
msgstr ""
-#: of tensorcircuit.templates.measurements.any_local_measurements:21
-#: tensorcircuit.templates.measurements.any_measurements:28
-msgid ""
-"parameter tensors determines what Pauli string to be measured, shape is "
-"[nwires, 4] if ``onehot`` is False and [nwires] if ``onehot`` is True."
+#: of tensorcircuit.quantum.sample_int2bin:7
+msgid "in shape [trials, n] of element (0, 1)"
msgstr ""
-#: of tensorcircuit.templates.measurements.any_local_measurements:24
-#: tensorcircuit.templates.measurements.any_measurements:31
+#: of tensorcircuit.quantum.spin_by_basis:1
msgid ""
-"defaults to False. If set to be True, structures will first go through "
-"onehot procedure."
+"Generate all n-bitstrings as an array, each row is a bitstring basis. "
+"Return m-th col."
msgstr ""
-#: of tensorcircuit.templates.measurements.any_local_measurements:27
-msgid ""
-"reuse the wavefunction when computing the expectations, defaults to be "
-"True"
+#: of tensorcircuit.quantum.spin_by_basis:9
+msgid "length of a bitstring"
msgstr ""
-#: of tensorcircuit.templates.measurements.any_local_measurements:29
-#: tensorcircuit.templates.measurements.any_measurements:36
-msgid "The expectation value of given Pauli string by the tensor ``structures``."
+#: of tensorcircuit.quantum.spin_by_basis:11
+msgid "m`."
+#: ../../source/api/results/readout_mitigation.rst:2
+msgid "tensorcircuit.results.readout_mitigation"
msgstr ""
-#: of torch.nn.modules.module.Module.eval:10
-msgid ""
-"See :ref:`locally-disable-grad-doc` for a comparison between `.eval()` "
-"and several similar mechanisms that may be confused with it."
+#: of tensorcircuit.results.readout_mitigation:1
+msgid "readout error mitigation functionalities"
msgstr ""
-#: of torch.nn.modules.module.Module.extra_repr:1
-msgid "Set the extra representation of the module"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.__init__:1
+msgid "The Class for readout error mitigation"
msgstr ""
-#: of torch.nn.modules.module.Module.extra_repr:3
-msgid ""
-"To print customized extra information, you should re-implement this "
-"method in your own modules. Both single-line and multi-line strings are "
-"acceptable."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.__init__:3
+msgid "execute function to run the cirucit"
msgstr ""
-#: of torch.nn.modules.module.Module.float:1
-msgid "Casts all floating point parameters and buffers to ``float`` datatype."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.__init__:5
+msgid "iteration threshold, defaults to 4096"
msgstr ""
-#: of tensorcircuit.torchnn.QuantumNet.forward:1
-msgid "Defines the computation performed at every call."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:1
+msgid "Main readout mitigation program for all methods."
msgstr ""
-#: of tensorcircuit.torchnn.QuantumNet.forward:3
-msgid "Should be overridden by all subclasses."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:3
+msgid "raw count"
msgstr ""
-#: of tensorcircuit.torchnn.QuantumNet.forward:6
-msgid ""
-"Although the recipe for forward pass needs to be defined within this "
-"function, one should call the :class:`Module` instance afterwards instead"
-" of this since the former takes care of running the registered hooks "
-"while the latter silently ignores them."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:5
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.mapping_preprocess:6
+msgid "user-defined logical qubits to show final mitted results"
msgstr ""
-#: of torch.nn.modules.module.Module.get_buffer:1
-msgid ""
-"Returns the buffer given by ``target`` if it exists, otherwise throws an "
-"error."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:7
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.expectation:11
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.mapping_preprocess:8
+msgid "positional_logical_mapping, defaults to None."
msgstr ""
-#: of torch.nn.modules.module.Module.get_buffer:4
-#: torch.nn.modules.module.Module.get_parameter:4
-msgid ""
-"See the docstring for ``get_submodule`` for a more detailed explanation "
-"of this method's functionality as well as how to correctly specify "
-"``target``."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:9
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.expectation:13
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.mapping_preprocess:10
+msgid "logical_physical_mapping, defaults to None"
msgstr ""
-#: of torch.nn.modules.module.Module.get_buffer:8
-msgid ""
-"The fully-qualified string name of the buffer to look for. (See "
-"``get_submodule`` for how to specify a fully-qualified string.)"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:11
+msgid "defaults to None"
msgstr ""
-#: of torch.nn.modules.module.Module.get_buffer:12
-msgid "The buffer referenced by ``target``"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:13
+msgid "mitigation method, defaults to \"square\""
msgstr ""
-#: of torch.nn.modules.module.Module.get_buffer:15
-msgid ""
-"If the target string references an invalid path or resolves to "
-"something that is not a buffer"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:15
+msgid "defaults to 25"
msgstr ""
-#: of torch.nn.modules.module.Module.get_extra_state:1
-msgid ""
-"Returns any extra state to include in the module's state_dict. Implement "
-"this and a corresponding :func:`set_extra_state` for your module if you "
-"need to store extra state. This function is called when building the "
-"module's `state_dict()`."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:17
+msgid "defaults to 1e-5"
msgstr ""
-#: of torch.nn.modules.module.Module.get_extra_state:6
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction:19
msgid ""
-"Note that extra state should be pickleable to ensure working "
-"serialization of the state_dict. We only provide provide backwards "
-"compatibility guarantees for serializing Tensors; other objects may break"
-" backwards compatibility if their serialized pickled form changes."
+":param return_mitigation_overhead:defaults to False :type "
+"return_mitigation_overhead: bool, optional :param details: defaults to "
+"False :type details: bool, optional :return: mitigated count :rtype: ct"
msgstr ""
-#: of torch.nn.modules.module.Module.get_extra_state:11
-msgid "Any extra state to store in the module's state_dict"
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation:1
+msgid "Main readout mitigation program for method=\"inverse\" or \"square\""
msgstr ""
-#: of torch.nn.modules.module.Module.get_parameter:1
-msgid ""
-"Returns the parameter given by ``target`` if it exists, otherwise throws "
-"an error."
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation:3
+msgid "the raw count"
msgstr ""
-#: of torch.nn.modules.module.Module.get_parameter:8
-msgid ""
-"The fully-qualified string name of the Parameter to look for. (See "
-"``get_submodule`` for how to specify a fully-qualified string.)"
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation:5
+msgid "mitigation method, defaults to \"inverse\""
msgstr ""
-#: of torch.nn.modules.module.Module.get_parameter:12
-msgid "The Parameter referenced by ``target``"
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation:7
+msgid "mitigated count"
msgstr ""
-#: of torch.nn.modules.module.Module.get_parameter:15
-msgid ""
-"If the target string references an invalid path or resolves to "
-"something that is not an ``nn.Parameter``"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.cals_from_api:1
+msgid "Get local calibriation matrix from cloud API from tc supported providers"
msgstr ""
-#: of torch.nn.modules.module.Module.get_submodule:1
-msgid ""
-"Returns the submodule given by ``target`` if it exists, otherwise throws "
-"an error."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.cals_from_api:3
+msgid "list of physical qubits to be calibriated"
msgstr ""
-#: of torch.nn.modules.module.Module.get_submodule:4
-msgid ""
-"For example, let's say you have an ``nn.Module`` ``A`` that looks like "
-"this:"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.cals_from_api:5
+msgid "the device str to qurey for the info, defaults to None"
msgstr ""
-#: of torch.nn.modules.module.Module.get_submodule:18
-msgid ""
-"(The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested submodule "
-"``net_b``, which itself has two submodules ``net_c`` and ``linear``. "
-"``net_c`` then has a submodule ``conv``.)"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.cals_from_system:1
+msgid "Get calibrattion information from system."
msgstr ""
-#: of torch.nn.modules.module.Module.get_submodule:22
-msgid ""
-"To check whether or not we have the ``linear`` submodule, we would call "
-"``get_submodule(\"net_b.linear\")``. To check whether we have the "
-"``conv`` submodule, we would call "
-"``get_submodule(\"net_b.net_c.conv\")``."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.cals_from_system:3
+msgid "calibration qubit list (physical qubits on device)"
msgstr ""
-#: of torch.nn.modules.module.Module.get_submodule:27
-msgid ""
-"The runtime of ``get_submodule`` is bounded by the degree of module "
-"nesting in ``target``. A query against ``named_modules`` achieves the "
-"same result, but it is O(N) in the number of transitive modules. So, for "
-"a simple check to see if some submodule exists, ``get_submodule`` should "
-"always be used."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.cals_from_system:5
+msgid "shots used for runing the circuit, defaults to 8192"
msgstr ""
-#: of torch.nn.modules.module.Module.get_submodule:34
-msgid ""
-"The fully-qualified string name of the submodule to look for. (See above "
-"example for how to specify a fully-qualified string.)"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.cals_from_system:7
+msgid "calibration method, defaults to \"local\", it can also be \"global\""
msgstr ""
-#: of torch.nn.modules.module.Module.get_submodule:38
-msgid "The submodule referenced by ``target``"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.expectation:1
+msgid "Calculate expectation value after readout error mitigation"
msgstr ""
-#: of torch.nn.modules.module.Module.get_submodule:41
-msgid ""
-"If the target string references an invalid path or resolves to "
-"something that is not an ``nn.Module``"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.expectation:3
+msgid "raw counts"
msgstr ""
-#: of torch.nn.modules.module.Module.half:1
-msgid "Casts all floating point parameters and buffers to ``half`` datatype."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.expectation:15
+msgid "readout mitigation method, defaults to \"constrained_least_square\""
msgstr ""
-#: of torch.nn.modules.module.Module.load_state_dict:1
-msgid ""
-"Copies parameters and buffers from :attr:`state_dict` into this module "
-"and its descendants. If :attr:`strict` is ``True``, then the keys of "
-":attr:`state_dict` must exactly match the keys returned by this module's "
-":meth:`~torch.nn.Module.state_dict` function."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.expectation:17
+msgid "expectation value after readout error mitigation"
msgstr ""
-#: of torch.nn.modules.module.Module.load_state_dict:6
-msgid "a dict containing parameters and persistent buffers."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.get_matrix:1
+msgid "Calculate cal_matrix according to use qubit list."
msgstr ""
-#: of torch.nn.modules.module.Module.load_state_dict:9
-msgid ""
-"whether to strictly enforce that the keys in :attr:`state_dict` match the"
-" keys returned by this module's :meth:`~torch.nn.Module.state_dict` "
-"function. Default: ``True``"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.get_matrix:3
+msgid "used qubit list, defaults to None"
msgstr ""
-#: of torch.nn.modules.module.Module.load_state_dict:14
-msgid ""
-"* **missing_keys** is a list of str containing the missing keys * "
-"**unexpected_keys** is a list of str containing the unexpected keys"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.get_matrix:5
+msgid "cal_matrix"
msgstr ""
-#: of torch.nn.modules.module.Module.load_state_dict:14
-msgid "**missing_keys** is a list of str containing the missing keys"
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.global_miti_readout_circ:1
+msgid "Generate circuits for global calibration."
msgstr ""
-#: of torch.nn.modules.module.Module.load_state_dict:15
-msgid "**unexpected_keys** is a list of str containing the unexpected keys"
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.global_miti_readout_circ:3
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.local_miti_readout_circ:3
+msgid "circuit list"
msgstr ""
-#: of torch.nn.modules.module.Module.load_state_dict:16
-msgid "``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields"
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.local_miti_readout_circ:1
+msgid "Generate circuits for local calibration."
msgstr ""
-#: of torch.nn.modules.module.Module.load_state_dict:20
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.mapping_preprocess:1
msgid ""
-"If a parameter or buffer is registered as ``None`` and its corresponding "
-"key exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a "
-"``RuntimeError``."
+"Preprocessing to deal with qubit mapping, including "
+"positional_logical_mapping and logical_physical_mapping. Return "
+"self.use_qubits(physical) and corresponding counts."
msgstr ""
-#: of torch.nn.modules.module.Module.modules:1
-msgid "Returns an iterator over all modules in the network."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.mapping_preprocess:4
+msgid "raw_counts on positional_qubits"
msgstr ""
-#: of torch.nn.modules.module.Module.modules:3
-msgid "*Module* -- a module in the network"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.mapping_preprocess:12
+msgid "counts on self.use_qubit(physical)"
msgstr ""
-#: of torch.nn.modules.module.Module.modules:7
-#: torch.nn.modules.module.Module.named_modules:13
-msgid ""
-"Duplicate modules are returned only once. In the following example, ``l``"
-" will be returned only once."
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.mitigate_probability:1
+msgid "Get the mitigated probability."
msgstr ""
-#: of torch.nn.modules.module.Module.named_buffers:1
-msgid ""
-"Returns an iterator over module buffers, yielding both the name of the "
-"buffer as well as the buffer itself."
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.mitigate_probability:3
+msgid "probability of raw count"
msgstr ""
-#: of torch.nn.modules.module.Module.named_buffers:4
-msgid "prefix to prepend to all buffer names."
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.mitigate_probability:5
+msgid "mitigation methods, defaults to \"inverse\", it can also be \"square\""
msgstr ""
-#: of torch.nn.modules.module.Module.named_buffers:11
-msgid "*(string, torch.Tensor)* -- Tuple containing the name and buffer"
+#: of
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.mitigate_probability:7
+msgid "mitigated probability"
msgstr ""
-#: of torch.nn.modules.module.Module.named_children:1
-msgid ""
-"Returns an iterator over immediate children modules, yielding both the "
-"name of the module as well as the module itself."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.newrange:1
+msgid "Rerange the order according to used qubit list."
msgstr ""
-#: of torch.nn.modules.module.Module.named_children:4
-msgid "*(string, Module)* -- Tuple containing a name and child module"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.newrange:3
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.ubs:3
+msgid "index"
msgstr ""
-#: of torch.nn.modules.module.Module.named_modules:1
-msgid ""
-"Returns an iterator over all modules in the network, yielding both the "
-"name of the module as well as the module itself."
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.newrange:5
+#: tensorcircuit.results.readout_mitigation.ReadoutMit.ubs:5
+msgid "used qubit list"
msgstr ""
-#: of torch.nn.modules.module.Module.named_modules:4
-msgid "a memo to store the set of modules already added to the result"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.newrange:7
+msgid "new index"
msgstr ""
-#: of torch.nn.modules.module.Module.named_modules:5
-msgid "a prefix that will be added to the name of the module"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.ubs:1
+msgid "Help omit calibration results that not in used qubit list."
msgstr ""
-#: of torch.nn.modules.module.Module.named_modules:6
-msgid "whether to remove the duplicated module instances in the result or not"
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.ubs:7
+msgid "omitation related value"
msgstr ""
-#: of torch.nn.modules.module.Module.named_modules:9
-msgid "*(string, Module)* -- Tuple of name and module"
+#: ../../source/api/simplify.rst:2
+msgid "tensorcircuit.simplify"
msgstr ""
-#: of torch.nn.modules.module.Module.named_parameters:1
+#: of tensorcircuit.simplify:1
+msgid "Tensornetwork Simplification"
+msgstr ""
+
+#: of tensorcircuit.simplify.infer_new_shape:1
msgid ""
-"Returns an iterator over module parameters, yielding both the name of the"
-" parameter as well as the parameter itself."
+"Get the new shape of two nodes, also supporting to return original shapes"
+" of two nodes."
msgstr ""
-#: of torch.nn.modules.module.Module.named_parameters:4
-msgid "prefix to prepend to all parameter names."
+#: of tensorcircuit.simplify.infer_new_shape:13
+msgid "node one"
msgstr ""
-#: of torch.nn.modules.module.Module.named_parameters:6
-#: torch.nn.modules.module.Module.parameters:5
-msgid ""
-"if True, then yields parameters of this module and all submodules. "
-"Otherwise, yields only parameters that are direct members of this module."
+#: of tensorcircuit.simplify.infer_new_shape:15
+msgid "node two"
msgstr ""
-#: of torch.nn.modules.module.Module.named_parameters:11
-msgid "*(string, Parameter)* -- Tuple containing the name and parameter"
+#: of tensorcircuit.simplify.infer_new_shape:17
+msgid "Whether to include original shape of two nodes, default is True."
msgstr ""
-#: of torch.nn.modules.module.Module.parameters:1
-msgid "Returns an iterator over module parameters."
+#: of tensorcircuit.simplify.infer_new_shape:19
+msgid "The new shape of the two nodes."
msgstr ""
-#: of torch.nn.modules.module.Module.parameters:3
-msgid "This is typically passed to an optimizer."
+#: of tensorcircuit.simplify.pseudo_contract_between:1
+msgid ""
+"Contract between Node ``a`` and ``b``, with correct shape only and no "
+"calculation"
msgstr ""
-#: of torch.nn.modules.module.Module.parameters:10
-msgid "*Parameter* -- module parameter"
+#: ../../source/api/templates.rst:2
+msgid "tensorcircuit.templates"
msgstr ""
-#: of torch.nn.modules.module.Module.register_backward_hook:1
-#: torch.nn.modules.module.Module.register_full_backward_hook:1
-msgid "Registers a backward hook on the module."
+#: ../../source/api/templates/blocks.rst:2
+msgid "tensorcircuit.templates.blocks"
msgstr ""
-#: of torch.nn.modules.module.Module.register_backward_hook:3
-msgid ""
-"This function is deprecated in favor of "
-":meth:`~torch.nn.Module.register_full_backward_hook` and the behavior of "
-"this function will change in future versions."
+#: of tensorcircuit.templates.blocks:1 tensorcircuit.templates.measurements:1
+msgid "Shortcuts for measurement patterns on circuit"
msgstr ""
-#: of torch.nn.modules.module.Module.register_backward_hook:6
-#: torch.nn.modules.module.Module.register_forward_hook:14
-#: torch.nn.modules.module.Module.register_forward_pre_hook:14
-#: torch.nn.modules.module.Module.register_full_backward_hook:25
+#: of tensorcircuit.templates.blocks.Bell_pair_block:1
msgid ""
-"a handle that can be used to remove the added hook by calling "
-"``handle.remove()``"
+"For each pair in links, the input product state |00> is transformed as "
+"(01>-|10>)"
msgstr ""
-#: of torch.nn.modules.module.Module.register_backward_hook:8
-#: torch.nn.modules.module.Module.register_forward_hook:16
-#: torch.nn.modules.module.Module.register_forward_pre_hook:16
-#: torch.nn.modules.module.Module.register_full_backward_hook:27
-msgid ":class:`torch.utils.hooks.RemovableHandle`"
+#: of tensorcircuit.templates.blocks.Bell_pair_block:3
+#: tensorcircuit.templates.blocks.qft:3
+msgid "Circuit in"
msgstr ""
-#: of torch.nn.modules.module.Module.register_buffer:1
-msgid "Adds a buffer to the module."
+#: of tensorcircuit.templates.blocks.Bell_pair_block:5
+msgid ""
+"pairs indices for Bell pairs, defaults to None, corresponds to neighbor "
+"links"
msgstr ""
-#: of torch.nn.modules.module.Module.register_buffer:3
+#: of tensorcircuit.templates.blocks.Bell_pair_block:7
+msgid "Circuit out"
+msgstr ""
+
+#: of tensorcircuit.templates.blocks.example_block:1
msgid ""
-"This is typically used to register a buffer that should not to be "
-"considered a model parameter. For example, BatchNorm's ``running_mean`` "
-"is not a parameter, but is part of the module's state. Buffers, by "
-"default, are persistent and will be saved alongside parameters. This "
-"behavior can be changed by setting :attr:`persistent` to ``False``. The "
-"only difference between a persistent buffer and a non-persistent buffer "
-"is that the latter will not be a part of this module's "
-":attr:`state_dict`."
+"The circuit ansatz is firstly one layer of Hadamard gates and then we "
+"have ``nlayers`` blocks of :math:`e^{i\\theta Z_iZ_{i+1}}` two-qubit gate"
+" in ladder layout, following rx gate."
msgstr ""
-#: of torch.nn.modules.module.Module.register_buffer:12
-msgid "Buffers can be accessed as attributes using given names."
+#: of tensorcircuit.templates.blocks.example_block:5
+msgid "The circuit"
msgstr ""
-#: of torch.nn.modules.module.Module.register_buffer:14
-msgid ""
-"name of the buffer. The buffer can be accessed from this module using the"
-" given name"
+#: of tensorcircuit.templates.blocks.example_block:7
+msgid "paramter tensor with 2*nlayer*n elements"
msgstr ""
-#: of torch.nn.modules.module.Module.register_buffer:17
-msgid ""
-"buffer to be registered. If ``None``, then operations that run on "
-"buffers, such as :attr:`cuda`, are ignored. If ``None``, the buffer is "
-"**not** included in the module's :attr:`state_dict`."
+#: of tensorcircuit.templates.blocks.example_block:9
+msgid "number of ZZ+RX blocks, defaults to 2"
msgstr ""
-#: of torch.nn.modules.module.Module.register_buffer:21
-msgid "whether the buffer is part of this module's :attr:`state_dict`."
+#: of tensorcircuit.templates.blocks.example_block:11
+msgid "whether use SVD split to reduce ZZ gate bond dimension, defaults to False"
msgstr ""
-#: of torch.nn.modules.module.Module.register_forward_hook:1
-msgid "Registers a forward hook on the module."
+#: of tensorcircuit.templates.blocks.example_block:14
+msgid "The circuit with example ansatz attached"
msgstr ""
-#: of torch.nn.modules.module.Module.register_forward_hook:3
+#: of tensorcircuit.templates.blocks.qft:1
msgid ""
-"The hook will be called every time after :func:`forward` has computed an "
-"output. It should have the following signature::"
+"This function applies quantum fourier transformation (QFT) to the "
+"selected circuit lines"
msgstr ""
-#: of torch.nn.modules.module.Module.register_forward_hook:8
-msgid ""
-"The input contains only the positional arguments given to the module. "
-"Keyword arguments won't be passed to the hooks and only to the "
-"``forward``. The hook can modify the output. It can modify the input "
-"inplace but it will not have effect on forward since this is called after"
-" :func:`forward` is called."
+#: of tensorcircuit.templates.blocks.qft:5
+msgid "the indices of the circuit lines to apply QFT"
msgstr ""
-#: of torch.nn.modules.module.Module.register_forward_pre_hook:1
-msgid "Registers a forward pre-hook on the module."
+#: of tensorcircuit.templates.blocks.qft:7
+msgid "Whether to include the final swaps in the QFT"
msgstr ""
-#: of torch.nn.modules.module.Module.register_forward_pre_hook:3
-msgid ""
-"The hook will be called every time before :func:`forward` is invoked. It "
-"should have the following signature::"
+#: of tensorcircuit.templates.blocks.qft:9
+msgid "If True, the inverse Fourier transform is constructed"
msgstr ""
-#: of torch.nn.modules.module.Module.register_forward_pre_hook:8
-msgid ""
-"The input contains only the positional arguments given to the module. "
-"Keyword arguments won't be passed to the hooks and only to the "
-"``forward``. The hook can modify the input. User can either return a "
-"tuple or a single modified value in the hook. We will wrap the value into"
-" a tuple if a single value is returned(unless that value is already a "
-"tuple)."
+#: of tensorcircuit.templates.blocks.qft:11
+msgid "If True, barriers are inserted as visualization improvement"
msgstr ""
-#: of torch.nn.modules.module.Module.register_full_backward_hook:3
-msgid ""
-"The hook will be called every time the gradients with respect to module "
-"inputs are computed. The hook should have the following signature::"
+#: of tensorcircuit.templates.blocks.qft:13
+msgid "Circuit c"
msgstr ""
-#: of torch.nn.modules.module.Module.register_full_backward_hook:8
+#: of tensorcircuit.templates.blocks.state_centric:1
msgid ""
-"The :attr:`grad_input` and :attr:`grad_output` are tuples that contain "
-"the gradients with respect to the inputs and outputs respectively. The "
-"hook should not modify its arguments, but it can optionally return a new "
-"gradient with respect to the input that will be used in place of "
-":attr:`grad_input` in subsequent computations. :attr:`grad_input` will "
-"only correspond to the inputs given as positional arguments and all kwarg"
-" arguments are ignored. Entries in :attr:`grad_input` and "
-":attr:`grad_output` will be ``None`` for all non-Tensor arguments."
+"Function decorator wraps the function with the first input and output in "
+"the format of circuit, the wrapped function has the first input and the "
+"output as the state tensor."
msgstr ""
-#: of torch.nn.modules.module.Module.register_full_backward_hook:17
-msgid ""
-"For technical reasons, when this hook is applied to a Module, its forward"
-" function will receive a view of each Tensor passed to the Module. "
-"Similarly the caller will receive a view of each Tensor returned by the "
-"Module's forward function."
+#: of tensorcircuit.templates.blocks.state_centric:4
+msgid "Function with the fist input and the output as ``Circuit`` object."
msgstr ""
-#: of torch.nn.modules.module.Module.register_full_backward_hook:22
+#: of tensorcircuit.templates.blocks.state_centric:6
msgid ""
-"Modifying inputs or outputs inplace is not allowed when using backward "
-"hooks and will raise an error."
+"Wrapped function with the first input and the output as the state tensor "
+"correspondingly."
msgstr ""
-#: of torch.nn.modules.module.Module.register_module:1
-msgid "Alias for :func:`add_module`."
+#: ../../source/api/templates/chems.rst:2
+msgid "tensorcircuit.templates.chems"
msgstr ""
-#: of torch.nn.modules.module.Module.register_parameter:1
-msgid "Adds a parameter to the module."
+#: of tensorcircuit.templates.chems:1
+msgid "Useful utilities for quantum chemistry related task"
msgstr ""
-#: of torch.nn.modules.module.Module.register_parameter:3
-msgid "The parameter can be accessed as an attribute using given name."
+#: ../../source/api/templates/dataset.rst:2
+msgid "tensorcircuit.templates.dataset"
msgstr ""
-#: of torch.nn.modules.module.Module.register_parameter:5
-msgid ""
-"name of the parameter. The parameter can be accessed from this module "
-"using the given name"
+#: of tensorcircuit.templates.dataset:1
+msgid "Quantum machine learning related data preprocessing and embedding"
msgstr ""
-#: of torch.nn.modules.module.Module.register_parameter:8
-msgid ""
-"parameter to be added to the module. If ``None``, then operations that "
-"run on parameters, such as :attr:`cuda`, are ignored. If ``None``, the "
-"parameter is **not** included in the module's :attr:`state_dict`."
+#: ../../source/api/templates/ensemble.rst:2
+msgid "tensorcircuit.templates.ensemble"
msgstr ""
-#: of torch.nn.modules.module.Module.requires_grad_:1
-msgid "Change if autograd should record operations on parameters in this module."
+#: of tensorcircuit.templates.ensemble:1
+msgid "Useful utilities for ensemble"
msgstr ""
-#: of torch.nn.modules.module.Module.requires_grad_:4
-msgid ""
-"This method sets the parameters' :attr:`requires_grad` attributes in-"
-"place."
+#: of tensorcircuit.templates.ensemble.bagging.append:1
+msgid "Add model to the voting method"
msgstr ""
-#: of torch.nn.modules.module.Module.requires_grad_:7
+#: of tensorcircuit.templates.ensemble.bagging.eval:1
msgid ""
-"This method is helpful for freezing part of the module for finetuning or "
-"training parts of a model individually (e.g., GAN training)."
+"Expect input data to be a 2D array which a 1D array of yTrue followed by "
+"a 1D array of yPred is expected to be the components of the 2D array"
msgstr ""
-#: of torch.nn.modules.module.Module.requires_grad_:10
+#: of tensorcircuit.templates.ensemble.bagging.predict:1
msgid ""
-"See :ref:`locally-disable-grad-doc` for a comparison between "
-"`.requires_grad_()` and several similar mechanisms that may be confused "
-"with it."
+"Input data is expected to be a 2D array that the first layer is different"
+" input data (into the trained models)"
msgstr ""
-#: of torch.nn.modules.module.Module.requires_grad_:13
+#: of tensorcircuit.templates.ensemble.bagging.train:1
msgid ""
-"whether autograd should record operations on parameters in this module. "
-"Default: ``True``."
+"Train all models in the class, **kwargs expect to receive the argus that "
+"can be directly sent to tf.fit Expected to be run after finishing compile"
msgstr ""
-#: of torch.nn.modules.module.Module.set_extra_state:1
-msgid ""
-"This function is called from :func:`load_state_dict` to handle any extra "
-"state found within the `state_dict`. Implement this function and a "
-"corresponding :func:`get_extra_state` for your module if you need to "
-"store extra state within its `state_dict`."
+#: ../../source/api/templates/graphs.rst:2
+msgid "tensorcircuit.templates.graphs"
msgstr ""
-#: of torch.nn.modules.module.Module.set_extra_state:6
-msgid "Extra state from the `state_dict`"
+#: of tensorcircuit.templates.graphs:1
+msgid "Some common graphs and lattices"
msgstr ""
-#: of torch.nn.modules.module.Module.share_memory:1
-msgid "See :meth:`torch.Tensor.share_memory_`"
+#: of tensorcircuit.templates.graphs.Grid2DCoord:1
+msgid "Two-dimensional grid lattice"
msgstr ""
-#: of torch.nn.modules.module.Module.state_dict:1
-msgid "Returns a dictionary containing a whole state of the module."
+#: of tensorcircuit.templates.graphs.Grid2DCoord.__init__:1
+msgid "number of rows"
msgstr ""
-#: of torch.nn.modules.module.Module.state_dict:3
-msgid ""
-"Both parameters and persistent buffers (e.g. running averages) are "
-"included. Keys are corresponding parameter and buffer names. Parameters "
-"and buffers set to ``None`` are not included."
+#: of tensorcircuit.templates.graphs.Grid2DCoord.__init__:3
+msgid "number of cols"
msgstr ""
-#: of torch.nn.modules.module.Module.state_dict:7
-msgid "a dictionary containing a whole state of the module"
+#: of tensorcircuit.templates.graphs.Grid2DCoord.all_cols:1
+msgid "return all col edge with 1d index encoding"
msgstr ""
-#: of torch.nn.modules.module.Module.to:1
-msgid "Moves and/or casts the parameters and buffers."
+#: of tensorcircuit.templates.graphs.Grid2DCoord.all_cols:3
+#: tensorcircuit.templates.graphs.Grid2DCoord.all_rows:3
+msgid ""
+"whether to include pbc edges (periodic boundary condition), defaults to "
+"False"
msgstr ""
-#: of torch.nn.modules.module.Module.to:3
-msgid "This can be called as"
+#: of tensorcircuit.templates.graphs.Grid2DCoord.all_cols:6
+msgid "list of col edge"
msgstr ""
-#: of torch.nn.modules.module.Module.to:17
-msgid ""
-"Its signature is similar to :meth:`torch.Tensor.to`, but only accepts "
-"floating point or complex :attr:`dtype`\\ s. In addition, this method "
-"will only cast the floating point or complex parameters and buffers to "
-":attr:`dtype` (if given). The integral parameters and buffers will be "
-"moved :attr:`device`, if that is given, but with dtypes unchanged. When "
-":attr:`non_blocking` is set, it tries to convert/move asynchronously with"
-" respect to the host if possible, e.g., moving CPU Tensors with pinned "
-"memory to CUDA devices."
+#: of tensorcircuit.templates.graphs.Grid2DCoord.all_rows:1
+msgid "return all row edge with 1d index encoding"
msgstr ""
-#: of torch.nn.modules.module.Module.to:26
-msgid "See below for examples."
+#: of tensorcircuit.templates.graphs.Grid2DCoord.all_rows:6
+msgid "list of row edge"
msgstr ""
-#: of torch.nn.modules.module.Module.to:31
-msgid "the desired device of the parameters and buffers in this module"
+#: of tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph:1
+msgid "Get the 2D grid lattice in ``nx.Graph`` format"
msgstr ""
-#: of torch.nn.modules.module.Module.to:34
+#: of tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph:3
msgid ""
-"the desired floating point or complex dtype of the parameters and buffers"
-" in this module"
+"whether to include pbc edges (periodic boundary condition), defaults to "
+"True"
msgstr ""
-#: of torch.nn.modules.module.Module.to:37
-msgid ""
-"Tensor whose dtype and device are the desired dtype and device for all "
-"parameters and buffers in this module"
+#: of tensorcircuit.templates.graphs.Line1D:1
+msgid "1D chain with ``n`` sites"
msgstr ""
-#: of torch.nn.modules.module.Module.to:40
-msgid ""
-"the desired memory format for 4D parameters and buffers in this module "
-"(keyword only argument)"
+#: of tensorcircuit.templates.graphs.Line1D:5
+#: tensorcircuit.templates.measurements.heisenberg_measurements:34
+msgid "[description], defaults to True"
msgstr ""
-#: of torch.nn.modules.module.Module.to:48
-msgid "Examples::"
+#: ../../source/api/templates/measurements.rst:2
+msgid "tensorcircuit.templates.measurements"
msgstr ""
-#: of torch.nn.modules.module.Module.to_empty:1
+#: of tensorcircuit.templates.measurements.any_local_measurements:1
msgid ""
-"Moves the parameters and buffers to the specified device without copying "
-"storage."
+"This measurements pattern is specifically suitable for vmap. Parameterize"
+" the local Pauli string to be measured."
msgstr ""
-#: of torch.nn.modules.module.Module.to_empty:3
-msgid "The desired device of the parameters and buffers in this module."
+#: of tensorcircuit.templates.measurements.any_local_measurements:19
+#: tensorcircuit.templates.measurements.any_measurements:26
+msgid "The circuit to be measured"
msgstr ""
-#: of torch.nn.modules.module.Module.train:1
-msgid "Sets the module in training mode."
+#: of tensorcircuit.templates.measurements.any_local_measurements:21
+#: tensorcircuit.templates.measurements.any_measurements:28
+msgid ""
+"parameter tensors determines what Pauli string to be measured, shape is "
+"[nwires, 4] if ``onehot`` is False and [nwires] if ``onehot`` is True."
msgstr ""
-#: of torch.nn.modules.module.Module.train:8
+#: of tensorcircuit.templates.measurements.any_local_measurements:24
+#: tensorcircuit.templates.measurements.any_measurements:31
msgid ""
-"whether to set training mode (``True``) or evaluation mode (``False``). "
-"Default: ``True``."
+"defaults to False. If set to be True, structures will first go through "
+"onehot procedure."
msgstr ""
-#: of torch.nn.modules.module.Module.type:1
-msgid "Casts all parameters and buffers to :attr:`dst_type`."
+#: of tensorcircuit.templates.measurements.any_local_measurements:27
+msgid ""
+"reuse the wavefunction when computing the expectations, defaults to be "
+"True"
msgstr ""
-#: of torch.nn.modules.module.Module.type:6
-msgid "the desired type"
+#: of tensorcircuit.templates.measurements.any_local_measurements:29
+#: tensorcircuit.templates.measurements.any_measurements:36
+msgid "The expectation value of given Pauli string by the tensor ``structures``."
msgstr ""
-#: of torch.nn.modules.module.Module.xpu:1
-msgid "Moves all model parameters and buffers to the XPU."
+#: of tensorcircuit.templates.measurements.any_measurements:1
+msgid ""
+"This measurements pattern is specifically suitable for vmap. Parameterize"
+" the Pauli string to be measured."
msgstr ""
-#: of torch.nn.modules.module.Module.xpu:3
+#: of tensorcircuit.templates.measurements.any_measurements:34
msgid ""
-"This also makes associated parameters and buffers different objects. So "
-"it should be called before constructing optimizer if the module will live"
-" on XPU while being optimized."
+"reuse the wavefunction when computing the expectations, defaults to be "
+"False"
msgstr ""
-#: of torch.nn.modules.module.Module.zero_grad:1
+#: of tensorcircuit.templates.measurements.heisenberg_measurements:1
msgid ""
-"Sets gradients of all model parameters to zero. See similar function "
-"under :class:`torch.optim.Optimizer` for more context."
+"Evaluate Heisenberg energy expectation, whose Hamiltonian is defined on "
+"the lattice graph ``g`` as follows: (e are edges in graph ``g`` where e1 "
+"and e2 are two nodes for edge e and v are nodes in graph ``g``)"
msgstr ""
-#: of torch.nn.modules.module.Module.zero_grad:4
+#: of tensorcircuit.templates.measurements.heisenberg_measurements:4
msgid ""
-"instead of setting to zero, set the grads to None. See "
-":meth:`torch.optim.Optimizer.zero_grad` for details."
+"H = \\sum_{e\\in g} w_e (h_{xx} X_{e1}X_{e2} + h_{yy} Y_{e1}Y_{e2} + "
+"h_{zz} Z_{e1}Z_{e2})\n"
+" + \\sum_{v\\in g} (h_x X_v + h_y Y_v + h_z Z_v)"
msgstr ""
-#: ../../source/api/translation.rst:2
-msgid "tensorcircuit.translation"
+#: of tensorcircuit.templates.measurements.heisenberg_measurements:18
+msgid "Circuit to be measured"
msgstr ""
-#: of tensorcircuit.translation:1
-msgid "Circuit object translation in different packages"
+#: of tensorcircuit.templates.measurements.heisenberg_measurements:20
+msgid "Lattice graph defining Heisenberg Hamiltonian"
msgstr ""
-#: of tensorcircuit.translation.eqasm2tc:1
-msgid "Translation qexe/eqasm instruction to tensorcircuit Circuit object"
+#: of tensorcircuit.templates.measurements.heisenberg_measurements:22
+#: tensorcircuit.templates.measurements.heisenberg_measurements:24
+#: tensorcircuit.templates.measurements.heisenberg_measurements:26
+msgid "[description], defaults to 1.0"
msgstr ""
-#: of tensorcircuit.translation.eqasm2tc:7
-msgid "lines of ignored code at the head and the tail, defaults to (6, 1)"
+#: of tensorcircuit.templates.measurements.heisenberg_measurements:28
+#: tensorcircuit.templates.measurements.heisenberg_measurements:30
+#: tensorcircuit.templates.measurements.heisenberg_measurements:32
+msgid "[description], defaults to 0.0"
msgstr ""
-#: of tensorcircuit.translation.perm_matrix:1
+#: of tensorcircuit.templates.measurements.heisenberg_measurements:36
+msgid "Value of Heisenberg energy"
+msgstr ""
+
+#: of tensorcircuit.templates.measurements.mpo_expectation:1
msgid ""
-"Generate a permutation matrix P. Due to the different convention or "
-"qubits' order in qiskit and tensorcircuit, the unitary represented by the"
-" same circuit is different. They are related by this permutation matrix "
-"P: P @ U_qiskit @ P = U_tc"
+"Evaluate expectation of operator ``mpo`` defined in ``QuOperator`` MPO "
+"format with the output quantum state from circuit ``c``."
msgstr ""
-#: of tensorcircuit.translation.perm_matrix:7
-#: tensorcircuit.translation.qir2cirq:15
-#: tensorcircuit.translation.qir2qiskit:16
-#: tensorcircuit.translation.qiskit2tc:14 tensorcircuit.vis.qir2tex:12
-msgid "# of qubits"
+#: of tensorcircuit.templates.measurements.mpo_expectation:4
+msgid "The circuit for the output state"
msgstr ""
-#: of tensorcircuit.translation.perm_matrix:9
-msgid "The permutation matrix P"
+#: of tensorcircuit.templates.measurements.mpo_expectation:6
+msgid "MPO operator"
msgstr ""
-#: of tensorcircuit.translation.qir2cirq:1
-msgid ""
-"Generate a cirq circuit using the quantum intermediate representation "
-"(qir) in tensorcircuit."
+#: of tensorcircuit.templates.measurements.mpo_expectation:8
+#: tensorcircuit.templates.measurements.operator_expectation:7
+#: tensorcircuit.templates.measurements.sparse_expectation:7
+msgid "a real and scalar tensor of shape [] as the expectation value"
msgstr ""
-#: of tensorcircuit.translation.qir2cirq:17
-#: tensorcircuit.translation.qir2qiskit:18
+#: of tensorcircuit.templates.measurements.operator_expectation:1
msgid ""
-"The extra quantum IR of tc circuit including measure and reset on "
-"hardware, defaults to None"
+"Evaluate Hamiltonian expectation where ``hamiltonian`` can be dense "
+"matrix, sparse matrix or MPO."
msgstr ""
-#: of tensorcircuit.translation.qir2cirq:20
-msgid "qiskit cirq object"
+#: of tensorcircuit.templates.measurements.operator_expectation:3
+#: tensorcircuit.templates.measurements.sparse_expectation:3
+msgid "The circuit whose output state is used to evaluate the expectation"
msgstr ""
-#: of tensorcircuit.translation.qir2cirq:23
-msgid ""
-"#TODO(@erertertet): add default theta to iswap gate add more cirq built-"
-"in gate instead of customized add unitary test with tolerance add support"
-" of cirq built-in ControlledGate for multiplecontroll support more "
-"element in qir, e.g. barrier, measure..."
+#: of tensorcircuit.templates.measurements.operator_expectation:5
+#: tensorcircuit.templates.measurements.sparse_expectation:5
+msgid "Hamiltonian matrix in COO_sparse_matrix form"
msgstr ""
-#: of tensorcircuit.translation.qir2json:1
+#: of tensorcircuit.templates.measurements.sparse_expectation:1
msgid ""
-"transform qir to json compatible list of dict where array is replaced by "
-"real and imaginary list"
+"Evaluate Hamiltonian expectation where ``hamiltonian`` is kept in sparse "
+"matrix form to save space"
msgstr ""
-#: of tensorcircuit.translation.qir2qiskit:1
+#: of tensorcircuit.templates.measurements.spin_glass_measurements:1
msgid ""
-"Generate a qiskit quantum circuit using the quantum intermediate "
-"representation (qir) in tensorcircuit."
+"Compute spin glass energy defined on graph ``g`` expectation for output "
+"state of the circuit ``c``. The Hamiltonian to be evaluated is defined as"
+" (first term is determined by node weights while the second term is "
+"determined by edge weights of the graph):"
msgstr ""
-#: of tensorcircuit.translation.qir2qiskit:21
-msgid "qiskit QuantumCircuit object"
+#: of tensorcircuit.templates.measurements.spin_glass_measurements:5
+msgid "H = \\sum_{v\\in g} w_v Z_v + \\sum_{e\\in g} w_e Z_{e1} Z_{e2}"
msgstr ""
-#: of tensorcircuit.translation.qiskit2tc:1
-msgid "Generate a tensorcircuit circuit using the quantum circuit data in qiskit."
+#: of tensorcircuit.templates.measurements.spin_glass_measurements:28
+msgid "The quantum circuit"
msgstr ""
-#: of tensorcircuit.translation.qiskit2tc:12
-msgid "Quantum circuit data from qiskit."
+#: of tensorcircuit.templates.measurements.spin_glass_measurements:30
+msgid "The graph for spin glass Hamiltonian definition"
msgstr ""
-#: of tensorcircuit.translation.qiskit2tc:16
-msgid "Input state of the circuit. Default is None."
-msgstr ""
+#: of tensorcircuit.templates.measurements.spin_glass_measurements:32
+msgid ""
+"Whether measure the circuit with reusing the wavefunction, defaults to "
+"True"
+msgstr ""
-#: of tensorcircuit.translation.qiskit2tc:18
-msgid "``Circuit``, ``DMCircuit`` or ``MPSCircuit``"
+#: of tensorcircuit.templates.measurements.spin_glass_measurements:34
+msgid "The spin glass energy expectation value"
msgstr ""
-#: of tensorcircuit.translation.qiskit2tc:26
-msgid "A quantum circuit in tensorcircuit"
+#: ../../source/api/torchnn.rst:2
+msgid "tensorcircuit.torchnn"
msgstr ""
-#: of tensorcircuit.translation.qiskit_from_qasm_str_ordered_measure:1
-msgid ""
-"qiskit ``from_qasm_str`` method cannot keep the order of measure as the "
-"qasm file, we provide this alternative function in case the order of "
-"measure instruction matters"
+#: of tensorcircuit.torchnn:1
+msgid "PyTorch nn Module wrapper for quantum function"
msgstr ""
-#: of tensorcircuit.translation.qiskit_from_qasm_str_ordered_measure:4
-msgid "open qasm str"
+#: of tensorcircuit.torchnn.HardwareNet:1
+msgid "Bases: :py:class:`~tensorcircuit.torchnn.QuantumNet`"
msgstr ""
-#: of tensorcircuit.translation.qiskit_from_qasm_str_ordered_measure:6
-msgid "``qiskit.circuit.QuantumCircuit``"
+#: of tensorcircuit.torchnn.HardwareNet:1
+msgid ""
+"PyTorch Layer wrapping quantum function with cloud qpu access (using "
+":py:mod:`tensorcircuit.cloud` module)"
msgstr ""
-#: ../../source/api/utils.rst:2
-msgid "tensorcircuit.utils"
+#: of tensorcircuit.torchnn.HardwareNet.__init__:1
+#: tensorcircuit.torchnn.QuantumNet.__init__:1
+msgid "PyTorch nn Module wrapper on quantum function ``f``."
msgstr ""
-#: of tensorcircuit.utils:1
-msgid "Helper functions"
+#: of tensorcircuit.torchnn.HardwareNet.__init__:32
+#: tensorcircuit.torchnn.QuantumNet.__init__:32
+msgid "Quantum function with tensor in (input and weights) and tensor out."
msgstr ""
-#: of tensorcircuit.utils.append:1
-msgid "Functional programming paradigm to build function pipeline"
+#: of tensorcircuit.torchnn.HardwareNet.__init__:34
+#: tensorcircuit.torchnn.QuantumNet.__init__:34
+msgid ""
+"list of shape tuple for different weights as the non-first parameters for"
+" ``f``"
msgstr ""
-#: of tensorcircuit.utils.append:9
-msgid "The function which are attached with other functions"
+#: of tensorcircuit.torchnn.HardwareNet.__init__:36
+#: tensorcircuit.torchnn.QuantumNet.__init__:36
+msgid "function that gives the shape tuple returns torch tensor, defaults to None"
msgstr ""
-#: of tensorcircuit.utils.append:11
-msgid "Function to be attached"
+#: of tensorcircuit.torchnn.HardwareNet.__init__:38
+#: tensorcircuit.torchnn.QuantumNet.__init__:38
+msgid "whether apply vmap (batch input) on ``f``, defaults to True"
msgstr ""
-#: of tensorcircuit.utils.append:13
-msgid "The final results after function pipeline"
+#: of tensorcircuit.torchnn.HardwareNet.__init__:40
+#: tensorcircuit.torchnn.QuantumNet.__init__:40
+msgid ""
+"which position of input should be batched, need to be customized when "
+"multiple inputs for the torch model, defaults to be 0."
msgstr ""
-#: of tensorcircuit.utils.arg_alias:1
-msgid "function argument alias decorator with new docstring"
+#: of tensorcircuit.torchnn.HardwareNet.__init__:43
+#: tensorcircuit.torchnn.QuantumNet.__init__:43
+msgid "whether transform ``f`` with torch interface, defaults to True"
msgstr ""
-#: of tensorcircuit.utils.arg_alias:7
-msgid "whether to add doc for these new alias arguments, defaults True"
+#: of tensorcircuit.torchnn.HardwareNet.__init__:45
+#: tensorcircuit.torchnn.QuantumNet.__init__:45
+msgid "whether jit ``f``, defaults to True"
msgstr ""
-#: of tensorcircuit.utils.arg_alias:9
-msgid "the decorated function"
+#: of tensorcircuit.torchnn.HardwareNet.__init__:47
+#: tensorcircuit.torchnn.QuantumNet.__init__:47
+msgid "whether enbale dlpack in interfaces, defaults to False"
msgstr ""
-#: of tensorcircuit.utils.benchmark:1
-msgid "benchmark jittable function with staging time and running time"
+#: of torch.nn.modules.module.Module.add_module:1
+msgid "Adds a child module to the current module."
msgstr ""
-#: of tensorcircuit.utils.benchmark:5
-msgid "_description_, defaults to 5"
+#: of torch.nn.modules.module.Module.add_module:3
+msgid "The module can be accessed as an attribute using the given name."
msgstr ""
-#: of tensorcircuit.utils.is_m1mac:1
-msgid "check whether the running platform is MAC with M1 chip"
+#: of torch.nn.modules.module.Module.add_module:5
+msgid ""
+"name of the child module. The child module can be accessed from this "
+"module using the given name"
msgstr ""
-#: of tensorcircuit.utils.is_m1mac:3
-msgid "True for MAC M1 platform"
+#: of torch.nn.modules.module.Module.add_module:8
+msgid "child module to be added to the module."
msgstr ""
-#: of tensorcircuit.utils.return_partial:1
+#: of torch.nn.modules.module.Module.apply:1
msgid ""
-"Return a callable function for output ith parts of the original output "
-"along the first axis. Original output supports List and Tensor."
+"Applies ``fn`` recursively to every submodule (as returned by "
+"``.children()``) as well as self. Typical use includes initializing the "
+"parameters of a model (see also :ref:`nn-init-doc`)."
msgstr ""
-#: of tensorcircuit.utils.return_partial:20
-msgid "The function to be applied this method"
+#: of torch.nn.modules.module.Module.apply:5
+msgid "function to be applied to each submodule"
msgstr ""
-#: of tensorcircuit.utils.return_partial:22
-msgid "The ith parts of original output along the first axis (axis=0 or dim=0)"
+#: of torch.nn.modules.module.Module.apply:8
+#: torch.nn.modules.module.Module.bfloat16:6
+#: torch.nn.modules.module.Module.cpu:6 torch.nn.modules.module.Module.cuda:14
+#: torch.nn.modules.module.Module.double:6
+#: torch.nn.modules.module.Module.eval:13
+#: torch.nn.modules.module.Module.float:6 torch.nn.modules.module.Module.half:6
+#: torch.nn.modules.module.Module.ipu:14
+#: torch.nn.modules.module.Module.requires_grad_:17
+#: torch.nn.modules.module.Module.to:45
+#: torch.nn.modules.module.Module.to_empty:7
+#: torch.nn.modules.module.Module.train:12
+#: torch.nn.modules.module.Module.type:9 torch.nn.modules.module.Module.xpu:14
+msgid "self"
msgstr ""
-#: of tensorcircuit.utils.return_partial:24
-msgid "The modified callable function"
+#: of torch.nn.modules.module.Module.apply:11
+#: torch.nn.modules.module.Module.buffers:10
+#: torch.nn.modules.module.Module.modules:10
+#: torch.nn.modules.module.Module.named_buffers:15
+#: torch.nn.modules.module.Module.named_children:6
+#: torch.nn.modules.module.Module.named_modules:16
+#: torch.nn.modules.module.Module.named_parameters:16
+#: torch.nn.modules.module.Module.parameters:12
+#: torch.nn.modules.module.Module.register_buffer:25
+#: torch.nn.modules.module.Module.state_dict:38
+msgid "Example::"
msgstr ""
-#: ../../source/api/vis.rst:2
-msgid "tensorcircuit.vis"
+#: of torch.nn.modules.module.Module.bfloat16:1
+msgid "Casts all floating point parameters and buffers to ``bfloat16`` datatype."
msgstr ""
-#: of tensorcircuit.vis:1
-msgid "Visualization on circuits"
+#: of torch.nn.modules.module.Module.bfloat16:4
+#: torch.nn.modules.module.Module.cpu:4 torch.nn.modules.module.Module.cuda:8
+#: torch.nn.modules.module.Module.double:4
+#: torch.nn.modules.module.Module.float:4 torch.nn.modules.module.Module.half:4
+#: torch.nn.modules.module.Module.ipu:8 torch.nn.modules.module.Module.to:29
+#: torch.nn.modules.module.Module.type:4 torch.nn.modules.module.Module.xpu:8
+msgid "This method modifies the module in-place."
msgstr ""
-#: of tensorcircuit.vis.gate_name_trans:1
+#: of torch.nn.modules.module.Module.buffers:1
+msgid "Returns an iterator over module buffers."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.buffers:3
msgid ""
-"Translating from the gate name to gate information including the number "
-"of control qubits and the reduced gate name."
+"if True, then yields buffers of this module and all submodules. "
+"Otherwise, yields only buffers that are direct members of this module."
msgstr ""
-#: of tensorcircuit.vis.gate_name_trans:10
-msgid "String of gate name"
+#: of torch.nn.modules.module.Module.buffers
+#: torch.nn.modules.module.Module.children
+#: torch.nn.modules.module.Module.modules
+#: torch.nn.modules.module.Module.named_buffers
+#: torch.nn.modules.module.Module.named_children
+#: torch.nn.modules.module.Module.named_modules
+#: torch.nn.modules.module.Module.named_parameters
+#: torch.nn.modules.module.Module.parameters
+msgid "Yields"
msgstr ""
-#: of tensorcircuit.vis.gate_name_trans:12
-msgid "# of control qubits, reduced gate name"
+#: of torch.nn.modules.module.Module.buffers:8
+msgid "*torch.Tensor* -- module buffer"
msgstr ""
-#: of tensorcircuit.vis.qir2tex:1
+#: of torch.nn.modules.module.Module.children:1
+msgid "Returns an iterator over immediate children modules."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.children:3
+msgid "*Module* -- a child module"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.cpu:1
+msgid "Moves all model parameters and buffers to the CPU."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.cuda:1
+msgid "Moves all model parameters and buffers to the GPU."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.cuda:3
msgid ""
-"Generate Tex code from 'qir' string to illustrate the circuit structure. "
-"This visualization is based on quantikz package."
+"This also makes associated parameters and buffers different objects. So "
+"it should be called before constructing optimizer if the module will live"
+" on GPU while being optimized."
msgstr ""
-#: of tensorcircuit.vis.qir2tex:10
-msgid "The quantum intermediate representation of a circuit in tensorcircuit."
+#: of torch.nn.modules.module.Module.cuda:10
+#: torch.nn.modules.module.Module.ipu:10 torch.nn.modules.module.Module.xpu:10
+msgid "if specified, all parameters will be copied to that device"
msgstr ""
-#: of tensorcircuit.vis.qir2tex:14
-msgid "Initial state, default is an all zero state '000...000'."
+#: of torch.nn.modules.module.Module.double:1
+msgid "Casts all floating point parameters and buffers to ``double`` datatype."
msgstr ""
-#: of tensorcircuit.vis.qir2tex:16
+#: of torch.nn.modules.module.Module.eval:1
+msgid "Sets the module in evaluation mode."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.eval:3
+#: torch.nn.modules.module.Module.train:3
msgid ""
-"Measurement Basis, default is None which means no measurement in the end "
-"of the circuit."
+"This has any effect only on certain modules. See documentations of "
+"particular modules for details of their behaviors in training/evaluation "
+"mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, "
+"etc."
msgstr ""
-#: of tensorcircuit.vis.qir2tex:19
+#: of torch.nn.modules.module.Module.eval:8
+msgid "This is equivalent with :meth:`self.train(False) `."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.eval:10
msgid ""
-"If true, a right compression of the circuit will be conducted. A right "
-"compression means we will try to shift gates from right to left if "
-"possible."
+"See :ref:`locally-disable-grad-doc` for a comparison between `.eval()` "
+"and several similar mechanisms that may be confused with it."
msgstr ""
-#: of tensorcircuit.vis.qir2tex:21
+#: of torch.nn.modules.module.Module.extra_repr:1
+msgid "Set the extra representation of the module"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.extra_repr:3
msgid ""
-"Default is false. :type rcompress: bool :param lcompress: If true, a left"
-" compression of the circuit will be conducted."
+"To print customized extra information, you should re-implement this "
+"method in your own modules. Both single-line and multi-line strings are "
+"acceptable."
msgstr ""
-#: of tensorcircuit.vis.qir2tex:24
+#: of torch.nn.modules.module.Module.float:1
+msgid "Casts all floating point parameters and buffers to ``float`` datatype."
+msgstr ""
+
+#: of tensorcircuit.torchnn.HardwareNet.forward:1
+#: tensorcircuit.torchnn.QuantumNet.forward:1
+msgid "Defines the computation performed at every call."
+msgstr ""
+
+#: of tensorcircuit.torchnn.HardwareNet.forward:3
+#: tensorcircuit.torchnn.QuantumNet.forward:3
+msgid "Should be overridden by all subclasses."
+msgstr ""
+
+#: of tensorcircuit.torchnn.HardwareNet.forward:6
+#: tensorcircuit.torchnn.QuantumNet.forward:6
msgid ""
-"A left compression means we will try to shift gates from left to right if"
-" possible. Default is false."
+"Although the recipe for forward pass needs to be defined within this "
+"function, one should call the :class:`Module` instance afterwards instead"
+" of this since the former takes care of running the registered hooks "
+"while the latter silently ignores them."
msgstr ""
-#: of tensorcircuit.vis.qir2tex:27
+#: of torch.nn.modules.module.Module.get_buffer:1
msgid ""
-"If true, the tex code will be designed to generate a standalone document."
-" Default is false which means the generated tex code is just a quantikz "
-"code block."
+"Returns the buffer given by ``target`` if it exists, otherwise throws an "
+"error."
msgstr ""
-#: of tensorcircuit.vis.qir2tex:30
+#: of torch.nn.modules.module.Module.get_buffer:4
+#: torch.nn.modules.module.Module.get_parameter:4
msgid ""
-"If true, a string table of tex code will also be returned. Default is "
-"false."
+"See the docstring for ``get_submodule`` for a more detailed explanation "
+"of this method's functionality as well as how to correctly specify "
+"``target``."
msgstr ""
-#: of tensorcircuit.vis.qir2tex:33
+#: of torch.nn.modules.module.Module.get_buffer:8
msgid ""
-"Tex code of circuit visualization based on quantikz package. If "
-"return_string_table is true, a string table of tex code will also be "
-"returned."
+"The fully-qualified string name of the buffer to look for. (See "
+"``get_submodule`` for how to specify a fully-qualified string.)"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_buffer:12
+msgid "The buffer referenced by ``target``"
msgstr ""
-#: of tensorcircuit.vis.render_pdf:1
-msgid ""
-"Generate the PDF file with given latex string and filename. Latex command"
-" and file path can be specified. When notebook is True, convert the "
-"output PDF file to image and return a Image object."
-msgstr ""
+#: of torch.nn.modules.module.Module.get_buffer:15
+msgid ""
+"If the target string references an invalid path or resolves to "
+"something that is not a buffer"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_extra_state:1
+msgid ""
+"Returns any extra state to include in the module's state_dict. Implement "
+"this and a corresponding :func:`set_extra_state` for your module if you "
+"need to store extra state. This function is called when building the "
+"module's `state_dict()`."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_extra_state:6
+msgid ""
+"Note that extra state should be picklable to ensure working serialization"
+" of the state_dict. We only provide provide backwards compatibility "
+"guarantees for serializing Tensors; other objects may break backwards "
+"compatibility if their serialized pickled form changes."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_extra_state:11
+msgid "Any extra state to store in the module's state_dict"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_parameter:1
+msgid ""
+"Returns the parameter given by ``target`` if it exists, otherwise throws "
+"an error."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_parameter:8
+msgid ""
+"The fully-qualified string name of the Parameter to look for. (See "
+"``get_submodule`` for how to specify a fully-qualified string.)"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_parameter:12
+msgid "The Parameter referenced by ``target``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_parameter:15
+msgid ""
+"If the target string references an invalid path or resolves to "
+"something that is not an ``nn.Parameter``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_submodule:1
+msgid ""
+"Returns the submodule given by ``target`` if it exists, otherwise throws "
+"an error."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_submodule:4
+msgid ""
+"For example, let's say you have an ``nn.Module`` ``A`` that looks like "
+"this:"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_submodule:18
+msgid ""
+"(The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested submodule "
+"``net_b``, which itself has two submodules ``net_c`` and ``linear``. "
+"``net_c`` then has a submodule ``conv``.)"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_submodule:22
+msgid ""
+"To check whether or not we have the ``linear`` submodule, we would call "
+"``get_submodule(\"net_b.linear\")``. To check whether we have the "
+"``conv`` submodule, we would call "
+"``get_submodule(\"net_b.net_c.conv\")``."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_submodule:27
+msgid ""
+"The runtime of ``get_submodule`` is bounded by the degree of module "
+"nesting in ``target``. A query against ``named_modules`` achieves the "
+"same result, but it is O(N) in the number of transitive modules. So, for "
+"a simple check to see if some submodule exists, ``get_submodule`` should "
+"always be used."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_submodule:34
+msgid ""
+"The fully-qualified string name of the submodule to look for. (See above "
+"example for how to specify a fully-qualified string.)"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_submodule:38
+msgid "The submodule referenced by ``target``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.get_submodule:41
+msgid ""
+"If the target string references an invalid path or resolves to "
+"something that is not an ``nn.Module``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.half:1
+msgid "Casts all floating point parameters and buffers to ``half`` datatype."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.ipu:1
+msgid "Moves all model parameters and buffers to the IPU."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.ipu:3
+msgid ""
+"This also makes associated parameters and buffers different objects. So "
+"it should be called before constructing optimizer if the module will live"
+" on IPU while being optimized."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.load_state_dict:1
+msgid ""
+"Copies parameters and buffers from :attr:`state_dict` into this module "
+"and its descendants. If :attr:`strict` is ``True``, then the keys of "
+":attr:`state_dict` must exactly match the keys returned by this module's "
+":meth:`~torch.nn.Module.state_dict` function."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.load_state_dict:6
+msgid "a dict containing parameters and persistent buffers."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.load_state_dict:9
+msgid ""
+"whether to strictly enforce that the keys in :attr:`state_dict` match the"
+" keys returned by this module's :meth:`~torch.nn.Module.state_dict` "
+"function. Default: ``True``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.load_state_dict:14
+msgid ""
+"* **missing_keys** is a list of str containing the missing keys * "
+"**unexpected_keys** is a list of str containing the unexpected keys"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.load_state_dict:14
+msgid "**missing_keys** is a list of str containing the missing keys"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.load_state_dict:15
+msgid "**unexpected_keys** is a list of str containing the unexpected keys"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.load_state_dict:16
+msgid "``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.load_state_dict:20
+msgid ""
+"If a parameter or buffer is registered as ``None`` and its corresponding "
+"key exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a "
+"``RuntimeError``."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.modules:1
+msgid "Returns an iterator over all modules in the network."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.modules:3
+msgid "*Module* -- a module in the network"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.modules:7
+#: torch.nn.modules.module.Module.named_modules:13
+msgid ""
+"Duplicate modules are returned only once. In the following example, ``l``"
+" will be returned only once."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_buffers:1
+msgid ""
+"Returns an iterator over module buffers, yielding both the name of the "
+"buffer as well as the buffer itself."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_buffers:4
+msgid "prefix to prepend to all buffer names."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_buffers:6
+msgid ""
+"if True, then yields buffers of this module and all submodules. "
+"Otherwise, yields only buffers that are direct members of this module. "
+"Defaults to True."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_buffers:10
+msgid "whether to remove the duplicated buffers in the result. Defaults to True."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_buffers:13
+msgid "*(str, torch.Tensor)* -- Tuple containing the name and buffer"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_children:1
+msgid ""
+"Returns an iterator over immediate children modules, yielding both the "
+"name of the module as well as the module itself."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_children:4
+msgid "*(str, Module)* -- Tuple containing a name and child module"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_modules:1
+msgid ""
+"Returns an iterator over all modules in the network, yielding both the "
+"name of the module as well as the module itself."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_modules:4
+msgid "a memo to store the set of modules already added to the result"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_modules:5
+msgid "a prefix that will be added to the name of the module"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_modules:6
+msgid "whether to remove the duplicated module instances in the result or not"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_modules:9
+msgid "*(str, Module)* -- Tuple of name and module"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_parameters:1
+msgid ""
+"Returns an iterator over module parameters, yielding both the name of the"
+" parameter as well as the parameter itself."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_parameters:4
+msgid "prefix to prepend to all parameter names."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_parameters:6
+#: torch.nn.modules.module.Module.parameters:5
+msgid ""
+"if True, then yields parameters of this module and all submodules. "
+"Otherwise, yields only parameters that are direct members of this module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_parameters:10
+msgid ""
+"whether to remove the duplicated parameters in the result. Defaults to "
+"True."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.named_parameters:14
+msgid "*(str, Parameter)* -- Tuple containing the name and parameter"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.parameters:1
+msgid "Returns an iterator over module parameters."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.parameters:3
+msgid "This is typically passed to an optimizer."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.parameters:10
+msgid "*Parameter* -- module parameter"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_backward_hook:1
+#: torch.nn.modules.module.Module.register_full_backward_hook:1
+msgid "Registers a backward hook on the module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_backward_hook:3
+msgid ""
+"This function is deprecated in favor of "
+":meth:`~torch.nn.Module.register_full_backward_hook` and the behavior of "
+"this function will change in future versions."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_backward_hook:6
+#: torch.nn.modules.module.Module.register_forward_hook:37
+#: torch.nn.modules.module.Module.register_forward_pre_hook:40
+#: torch.nn.modules.module.Module.register_full_backward_hook:39
+#: torch.nn.modules.module.Module.register_full_backward_pre_hook:34
+#: torch.nn.modules.module.Module.register_load_state_dict_post_hook:21
+msgid ""
+"a handle that can be used to remove the added hook by calling "
+"``handle.remove()``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_backward_hook:8
+#: torch.nn.modules.module.Module.register_forward_hook:39
+#: torch.nn.modules.module.Module.register_forward_pre_hook:42
+#: torch.nn.modules.module.Module.register_full_backward_hook:41
+#: torch.nn.modules.module.Module.register_full_backward_pre_hook:36
+#: torch.nn.modules.module.Module.register_load_state_dict_post_hook:23
+msgid ":class:`torch.utils.hooks.RemovableHandle`"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_buffer:1
+msgid "Adds a buffer to the module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_buffer:3
+msgid ""
+"This is typically used to register a buffer that should not to be "
+"considered a model parameter. For example, BatchNorm's ``running_mean`` "
+"is not a parameter, but is part of the module's state. Buffers, by "
+"default, are persistent and will be saved alongside parameters. This "
+"behavior can be changed by setting :attr:`persistent` to ``False``. The "
+"only difference between a persistent buffer and a non-persistent buffer "
+"is that the latter will not be a part of this module's "
+":attr:`state_dict`."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_buffer:12
+msgid "Buffers can be accessed as attributes using given names."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_buffer:14
+msgid ""
+"name of the buffer. The buffer can be accessed from this module using the"
+" given name"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_buffer:17
+msgid ""
+"buffer to be registered. If ``None``, then operations that run on "
+"buffers, such as :attr:`cuda`, are ignored. If ``None``, the buffer is "
+"**not** included in the module's :attr:`state_dict`."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_buffer:21
+msgid "whether the buffer is part of this module's :attr:`state_dict`."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_hook:1
+msgid "Registers a forward hook on the module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_hook:3
+msgid ""
+"The hook will be called every time after :func:`forward` has computed an "
+"output."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_hook:5
+msgid ""
+"If ``with_kwargs`` is ``False`` or not specified, the input contains only"
+" the positional arguments given to the module. Keyword arguments won't be"
+" passed to the hooks and only to the ``forward``. The hook can modify the"
+" output. It can modify the input inplace but it will not have effect on "
+"forward since this is called after :func:`forward` is called. The hook "
+"should have the following signature::"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_hook:14
+msgid ""
+"If ``with_kwargs`` is ``True``, the forward hook will be passed the "
+"``kwargs`` given to the forward function and be expected to return the "
+"output possibly modified. The hook should have the following signature::"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_hook:20
+#: torch.nn.modules.module.Module.register_forward_pre_hook:23
+msgid "The user defined hook to be registered."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_hook:22
+msgid ""
+"If ``True``, the provided ``hook`` will be fired before all existing "
+"``forward`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, "
+"the provided ``hook`` will be fired after all existing ``forward`` hooks "
+"on this :class:`torch.nn.modules.Module`. Note that global ``forward`` "
+"hooks registered with :func:`register_module_forward_hook` will fire "
+"before all hooks registered by this method. Default: ``False``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_hook:32
+msgid ""
+"If ``True``, the ``hook`` will be passed the kwargs given to the forward "
+"function. Default: ``False``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_pre_hook:1
+msgid "Registers a forward pre-hook on the module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_pre_hook:3
+msgid "The hook will be called every time before :func:`forward` is invoked."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_pre_hook:6
+msgid ""
+"If ``with_kwargs`` is false or not specified, the input contains only the"
+" positional arguments given to the module. Keyword arguments won't be "
+"passed to the hooks and only to the ``forward``. The hook can modify the "
+"input. User can either return a tuple or a single modified value in the "
+"hook. We will wrap the value into a tuple if a single value is returned "
+"(unless that value is already a tuple). The hook should have the "
+"following signature::"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_pre_hook:16
+msgid ""
+"If ``with_kwargs`` is true, the forward pre-hook will be passed the "
+"kwargs given to the forward function. And if the hook modifies the input,"
+" both the args and kwargs should be returned. The hook should have the "
+"following signature::"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_pre_hook:25
+msgid ""
+"If true, the provided ``hook`` will be fired before all existing "
+"``forward_pre`` hooks on this :class:`torch.nn.modules.Module`. "
+"Otherwise, the provided ``hook`` will be fired after all existing "
+"``forward_pre`` hooks on this :class:`torch.nn.modules.Module`. Note that"
+" global ``forward_pre`` hooks registered with "
+":func:`register_module_forward_pre_hook` will fire before all hooks "
+"registered by this method. Default: ``False``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_forward_pre_hook:35
+msgid ""
+"If true, the ``hook`` will be passed the kwargs given to the forward "
+"function. Default: ``False``"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_hook:3
+msgid ""
+"The hook will be called every time the gradients with respect to a module"
+" are computed, i.e. the hook will execute if and only if the gradients "
+"with respect to module outputs are computed. The hook should have the "
+"following signature::"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_hook:10
+msgid ""
+"The :attr:`grad_input` and :attr:`grad_output` are tuples that contain "
+"the gradients with respect to the inputs and outputs respectively. The "
+"hook should not modify its arguments, but it can optionally return a new "
+"gradient with respect to the input that will be used in place of "
+":attr:`grad_input` in subsequent computations. :attr:`grad_input` will "
+"only correspond to the inputs given as positional arguments and all kwarg"
+" arguments are ignored. Entries in :attr:`grad_input` and "
+":attr:`grad_output` will be ``None`` for all non-Tensor arguments."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_hook:19
+#: torch.nn.modules.module.Module.register_full_backward_pre_hook:14
+msgid ""
+"For technical reasons, when this hook is applied to a Module, its forward"
+" function will receive a view of each Tensor passed to the Module. "
+"Similarly the caller will receive a view of each Tensor returned by the "
+"Module's forward function."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_hook:24
+msgid ""
+"Modifying inputs or outputs inplace is not allowed when using backward "
+"hooks and will raise an error."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_hook:27
+#: torch.nn.modules.module.Module.register_full_backward_pre_hook:22
+msgid "The user-defined hook to be registered."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_hook:29
+msgid ""
+"If true, the provided ``hook`` will be fired before all existing "
+"``backward`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, "
+"the provided ``hook`` will be fired after all existing ``backward`` hooks"
+" on this :class:`torch.nn.modules.Module`. Note that global ``backward`` "
+"hooks registered with :func:`register_module_full_backward_hook` will "
+"fire before all hooks registered by this method."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_pre_hook:1
+msgid "Registers a backward pre-hook on the module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_pre_hook:3
+msgid ""
+"The hook will be called every time the gradients for the module are "
+"computed. The hook should have the following signature::"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_pre_hook:8
+msgid ""
+"The :attr:`grad_output` is a tuple. The hook should not modify its "
+"arguments, but it can optionally return a new gradient with respect to "
+"the output that will be used in place of :attr:`grad_output` in "
+"subsequent computations. Entries in :attr:`grad_output` will be ``None`` "
+"for all non-Tensor arguments."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_pre_hook:19
+msgid ""
+"Modifying inputs inplace is not allowed when using backward hooks and "
+"will raise an error."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_full_backward_pre_hook:24
+msgid ""
+"If true, the provided ``hook`` will be fired before all existing "
+"``backward_pre`` hooks on this :class:`torch.nn.modules.Module`. "
+"Otherwise, the provided ``hook`` will be fired after all existing "
+"``backward_pre`` hooks on this :class:`torch.nn.modules.Module`. Note "
+"that global ``backward_pre`` hooks registered with "
+":func:`register_module_full_backward_pre_hook` will fire before all hooks"
+" registered by this method."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_load_state_dict_post_hook:1
+msgid ""
+"Registers a post hook to be run after module's ``load_state_dict`` is "
+"called."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_load_state_dict_post_hook:5
+msgid "It should have the following signature::"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_load_state_dict_post_hook:5
+msgid "hook(module, incompatible_keys) -> None"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_load_state_dict_post_hook:7
+msgid ""
+"The ``module`` argument is the current module that this hook is "
+"registered on, and the ``incompatible_keys`` argument is a ``NamedTuple``"
+" consisting of attributes ``missing_keys`` and ``unexpected_keys``. "
+"``missing_keys`` is a ``list`` of ``str`` containing the missing keys and"
+" ``unexpected_keys`` is a ``list`` of ``str`` containing the unexpected "
+"keys."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_load_state_dict_post_hook:13
+msgid "The given incompatible_keys can be modified inplace if needed."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_load_state_dict_post_hook:15
+msgid ""
+"Note that the checks performed when calling :func:`load_state_dict` with "
+"``strict=True`` are affected by modifications the hook makes to "
+"``missing_keys`` or ``unexpected_keys``, as expected. Additions to either"
+" set of keys will result in an error being thrown when ``strict=True``, "
+"and clearing out both missing and unexpected keys will avoid an error."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_module:1
+msgid "Alias for :func:`add_module`."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_parameter:1
+msgid "Adds a parameter to the module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_parameter:3
+msgid "The parameter can be accessed as an attribute using given name."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_parameter:5
+msgid ""
+"name of the parameter. The parameter can be accessed from this module "
+"using the given name"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_parameter:8
+msgid ""
+"parameter to be added to the module. If ``None``, then operations that "
+"run on parameters, such as :attr:`cuda`, are ignored. If ``None``, the "
+"parameter is **not** included in the module's :attr:`state_dict`."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.register_state_dict_pre_hook:1
+msgid ""
+"These hooks will be called with arguments: ``self``, ``prefix``, and "
+"``keep_vars`` before calling ``state_dict`` on ``self``. The registered "
+"hooks can be used to perform pre-processing before the ``state_dict`` "
+"call is made."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.requires_grad_:1
+msgid "Change if autograd should record operations on parameters in this module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.requires_grad_:4
+msgid ""
+"This method sets the parameters' :attr:`requires_grad` attributes in-"
+"place."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.requires_grad_:7
+msgid ""
+"This method is helpful for freezing part of the module for finetuning or "
+"training parts of a model individually (e.g., GAN training)."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.requires_grad_:10
+msgid ""
+"See :ref:`locally-disable-grad-doc` for a comparison between "
+"`.requires_grad_()` and several similar mechanisms that may be confused "
+"with it."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.requires_grad_:13
+msgid ""
+"whether autograd should record operations on parameters in this module. "
+"Default: ``True``."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.set_extra_state:1
+msgid ""
+"This function is called from :func:`load_state_dict` to handle any extra "
+"state found within the `state_dict`. Implement this function and a "
+"corresponding :func:`get_extra_state` for your module if you need to "
+"store extra state within its `state_dict`."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.set_extra_state:6
+msgid "Extra state from the `state_dict`"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.share_memory:1
+msgid "See :meth:`torch.Tensor.share_memory_`"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.state_dict:1
+msgid ""
+"Returns a dictionary containing references to the whole state of the "
+"module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.state_dict:3
+msgid ""
+"Both parameters and persistent buffers (e.g. running averages) are "
+"included. Keys are corresponding parameter and buffer names. Parameters "
+"and buffers set to ``None`` are not included."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.state_dict:8
+msgid ""
+"The returned object is a shallow copy. It contains references to the "
+"module's parameters and buffers."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.state_dict:12
+msgid ""
+"Currently ``state_dict()`` also accepts positional arguments for "
+"``destination``, ``prefix`` and ``keep_vars`` in order. However, this is "
+"being deprecated and keyword arguments will be enforced in future "
+"releases."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.state_dict:18
+msgid ""
+"Please avoid the use of argument ``destination`` as it is not designed "
+"for end-users."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.state_dict:21
+msgid ""
+"If provided, the state of module will be updated into the dict and the "
+"same object is returned. Otherwise, an ``OrderedDict`` will be created "
+"and returned. Default: ``None``."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.state_dict:26
+msgid ""
+"a prefix added to parameter and buffer names to compose the keys in "
+"state_dict. Default: ``''``."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.state_dict:29
+msgid ""
+"by default the :class:`~torch.Tensor` s returned in the state dict are "
+"detached from autograd. If it's set to ``True``, detaching will not be "
+"performed. Default: ``False``."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.state_dict:35
+msgid "a dictionary containing a whole state of the module"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to:1
+msgid "Moves and/or casts the parameters and buffers."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to:3
+msgid "This can be called as"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to:17
+msgid ""
+"Its signature is similar to :meth:`torch.Tensor.to`, but only accepts "
+"floating point or complex :attr:`dtype`\\ s. In addition, this method "
+"will only cast the floating point or complex parameters and buffers to "
+":attr:`dtype` (if given). The integral parameters and buffers will be "
+"moved :attr:`device`, if that is given, but with dtypes unchanged. When "
+":attr:`non_blocking` is set, it tries to convert/move asynchronously with"
+" respect to the host if possible, e.g., moving CPU Tensors with pinned "
+"memory to CUDA devices."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to:26
+msgid "See below for examples."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to:31
+msgid "the desired device of the parameters and buffers in this module"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to:34
+msgid ""
+"the desired floating point or complex dtype of the parameters and buffers"
+" in this module"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to:37
+msgid ""
+"Tensor whose dtype and device are the desired dtype and device for all "
+"parameters and buffers in this module"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to:40
+msgid ""
+"the desired memory format for 4D parameters and buffers in this module "
+"(keyword only argument)"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to:48
+msgid "Examples::"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to_empty:1
+msgid ""
+"Moves the parameters and buffers to the specified device without copying "
+"storage."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.to_empty:3
+msgid "The desired device of the parameters and buffers in this module."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.train:1
+msgid "Sets the module in training mode."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.train:8
+msgid ""
+"whether to set training mode (``True``) or evaluation mode (``False``). "
+"Default: ``True``."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.type:1
+msgid "Casts all parameters and buffers to :attr:`dst_type`."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.type:6
+msgid "the desired type"
+msgstr ""
+
+#: of torch.nn.modules.module.Module.xpu:1
+msgid "Moves all model parameters and buffers to the XPU."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.xpu:3
+msgid ""
+"This also makes associated parameters and buffers different objects. So "
+"it should be called before constructing optimizer if the module will live"
+" on XPU while being optimized."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.zero_grad:1
+msgid ""
+"Sets gradients of all model parameters to zero. See similar function "
+"under :class:`torch.optim.Optimizer` for more context."
+msgstr ""
+
+#: of torch.nn.modules.module.Module.zero_grad:4
+msgid ""
+"instead of setting to zero, set the grads to None. See "
+":meth:`torch.optim.Optimizer.zero_grad` for details."
+msgstr ""
+
+#: of tensorcircuit.torchnn.QuantumNet:1
+msgid "Bases: :py:class:`~torch.nn.modules.module.Module`"
+msgstr ""
+
+#: ../../source/api/translation.rst:2
+msgid "tensorcircuit.translation"
+msgstr ""
+
+#: of tensorcircuit.translation:1
+msgid "Circuit object translation in different packages"
+msgstr ""
+
+#: of tensorcircuit.translation.eqasm2tc:1
+msgid "Translation qexe/eqasm instruction to tensorcircuit Circuit object"
+msgstr ""
+
+#: of tensorcircuit.translation.eqasm2tc:7
+msgid "lines of ignored code at the head and the tail, defaults to (6, 1)"
+msgstr ""
+
+#: of tensorcircuit.translation.perm_matrix:1
+msgid ""
+"Generate a permutation matrix P. Due to the different convention or "
+"qubits' order in qiskit and tensorcircuit, the unitary represented by the"
+" same circuit is different. They are related by this permutation matrix "
+"P: P @ U_qiskit @ P = U_tc"
+msgstr ""
+
+#: of tensorcircuit.translation.perm_matrix:7
+#: tensorcircuit.translation.qir2cirq:15
+#: tensorcircuit.translation.qir2qiskit:16
+#: tensorcircuit.translation.qiskit2tc:14 tensorcircuit.vis.qir2tex:12
+msgid "# of qubits"
+msgstr ""
+
+#: of tensorcircuit.translation.perm_matrix:9
+msgid "The permutation matrix P"
+msgstr ""
+
+#: of tensorcircuit.translation.qir2cirq:1
+msgid ""
+"Generate a cirq circuit using the quantum intermediate representation "
+"(qir) in tensorcircuit."
+msgstr ""
+
+#: of tensorcircuit.translation.qir2cirq:17
+#: tensorcircuit.translation.qir2qiskit:18
+msgid ""
+"The extra quantum IR of tc circuit including measure and reset on "
+"hardware, defaults to None"
+msgstr ""
+
+#: of tensorcircuit.translation.qir2cirq:20
+msgid "qiskit cirq object"
+msgstr ""
+
+#: of tensorcircuit.translation.qir2cirq:23
+msgid ""
+"#TODO(@erertertet): add default theta to iswap gate add more cirq built-"
+"in gate instead of customized add unitary test with tolerance add support"
+" of cirq built-in ControlledGate for multiplecontroll support more "
+"element in qir, e.g. barrier, measure..."
+msgstr ""
+
+#: of tensorcircuit.translation.qir2json:1
+msgid ""
+"transform qir to json compatible list of dict where array is replaced by "
+"real and imaginary list"
+msgstr ""
+
+#: of tensorcircuit.translation.qir2qiskit:1
+msgid ""
+"Generate a qiskit quantum circuit using the quantum intermediate "
+"representation (qir) in tensorcircuit."
+msgstr ""
+
+#: of tensorcircuit.translation.qir2qiskit:21
+msgid "Circuit initial state in qiskit format"
+msgstr ""
+
+#: of tensorcircuit.translation.qir2qiskit:23
+msgid "qiskit QuantumCircuit object"
+msgstr ""
+
+#: of tensorcircuit.translation.qiskit2tc:1
+msgid "Generate a tensorcircuit circuit using the quantum circuit data in qiskit."
+msgstr ""
+
+#: of tensorcircuit.translation.qiskit2tc:12
+msgid "Quantum circuit data from qiskit."
+msgstr ""
+
+#: of tensorcircuit.translation.qiskit2tc:16
+msgid "Input state of the circuit. Default is None."
+msgstr ""
+
+#: of tensorcircuit.translation.qiskit2tc:18
+msgid "``Circuit``, ``DMCircuit`` or ``MPSCircuit``"
+msgstr ""
+
+#: of tensorcircuit.translation.qiskit2tc:26
+msgid "A quantum circuit in tensorcircuit"
+msgstr ""
+
+#: of tensorcircuit.translation.qiskit_from_qasm_str_ordered_measure:1
+msgid ""
+"qiskit ``from_qasm_str`` method cannot keep the order of measure as the "
+"qasm file, we provide this alternative function in case the order of "
+"measure instruction matters"
+msgstr ""
+
+#: of tensorcircuit.translation.qiskit_from_qasm_str_ordered_measure:4
+msgid "open qasm str"
+msgstr ""
+
+#: of tensorcircuit.translation.qiskit_from_qasm_str_ordered_measure:6
+msgid "``qiskit.circuit.QuantumCircuit``"
+msgstr ""
+
+#: ../../source/api/utils.rst:2
+msgid "tensorcircuit.utils"
+msgstr ""
+
+#: of tensorcircuit.utils:1
+msgid "Helper functions"
+msgstr ""
+
+#: of tensorcircuit.utils.append:1
+msgid "Functional programming paradigm to build function pipeline"
+msgstr ""
+
+#: of tensorcircuit.utils.append:9
+msgid "The function which are attached with other functions"
+msgstr ""
+
+#: of tensorcircuit.utils.append:11
+msgid "Function to be attached"
+msgstr ""
+
+#: of tensorcircuit.utils.append:13
+msgid "The final results after function pipeline"
+msgstr ""
+
+#: of tensorcircuit.utils.arg_alias:1
+msgid "function argument alias decorator with new docstring"
+msgstr ""
+
+#: of tensorcircuit.utils.arg_alias:7
+msgid "whether to add doc for these new alias arguments, defaults True"
+msgstr ""
+
+#: of tensorcircuit.utils.arg_alias:9
+msgid "the decorated function"
+msgstr ""
+
+#: of tensorcircuit.utils.benchmark:1
+msgid "benchmark jittable function with staging time and running time"
+msgstr ""
+
+#: of tensorcircuit.utils.benchmark:5
+msgid "_description_, defaults to 5"
+msgstr ""
+
+#: of tensorcircuit.utils.is_m1mac:1
+msgid "check whether the running platform is MAC with M1 chip"
+msgstr ""
+
+#: of tensorcircuit.utils.is_m1mac:3
+msgid "True for MAC M1 platform"
+msgstr ""
+
+#: of tensorcircuit.utils.return_partial:1
+msgid ""
+"Return a callable function for output ith parts of the original output "
+"along the first axis. Original output supports List and Tensor."
+msgstr ""
+
+#: of tensorcircuit.utils.return_partial:20
+msgid "The function to be applied this method"
+msgstr ""
+
+#: of tensorcircuit.utils.return_partial:22
+msgid "The ith parts of original output along the first axis (axis=0 or dim=0)"
+msgstr ""
+
+#: of tensorcircuit.utils.return_partial:24
+msgid "The modified callable function"
+msgstr ""
+
+#: ../../source/api/vis.rst:2
+msgid "tensorcircuit.vis"
+msgstr ""
+
+#: of tensorcircuit.vis:1
+msgid "Visualization on circuits"
+msgstr ""
+
+#: of tensorcircuit.vis.gate_name_trans:1
+msgid ""
+"Translating from the gate name to gate information including the number "
+"of control qubits and the reduced gate name."
+msgstr ""
+
+#: of tensorcircuit.vis.gate_name_trans:10
+msgid "String of gate name"
+msgstr ""
+
+#: of tensorcircuit.vis.gate_name_trans:12
+msgid "# of control qubits, reduced gate name"
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:1
+msgid ""
+"Generate Tex code from 'qir' string to illustrate the circuit structure. "
+"This visualization is based on quantikz package."
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:10
+msgid "The quantum intermediate representation of a circuit in tensorcircuit."
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:14
+msgid "Initial state, default is an all zero state '000...000'."
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:16
+msgid ""
+"Measurement Basis, default is None which means no measurement in the end "
+"of the circuit."
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:19
+msgid ""
+"If true, a right compression of the circuit will be conducted. A right "
+"compression means we will try to shift gates from right to left if "
+"possible."
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:21
+msgid ""
+"Default is false. :type rcompress: bool :param lcompress: If true, a left"
+" compression of the circuit will be conducted."
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:24
+msgid ""
+"A left compression means we will try to shift gates from left to right if"
+" possible. Default is false."
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:27
+msgid ""
+"If true, the tex code will be designed to generate a standalone document."
+" Default is false which means the generated tex code is just a quantikz "
+"code block."
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:30
+msgid ""
+"If true, a string table of tex code will also be returned. Default is "
+"false."
+msgstr ""
+
+#: of tensorcircuit.vis.qir2tex:33
+msgid ""
+"Tex code of circuit visualization based on quantikz package. If "
+"return_string_table is true, a string table of tex code will also be "
+"returned."
+msgstr ""
+
+#: of tensorcircuit.vis.render_pdf:1
+msgid ""
+"Generate the PDF file with given latex string and filename. Latex command"
+" and file path can be specified. When notebook is True, convert the "
+"output PDF file to image and return a Image object."
+msgstr ""
+
+#: of tensorcircuit.vis.render_pdf:15
+msgid "String of latex content"
+msgstr ""
+
+#: of tensorcircuit.vis.render_pdf:17
+msgid "File name, defaults to random UUID `str(uuid4())`"
+msgstr ""
+
+#: of tensorcircuit.vis.render_pdf:19
+msgid "Executable Latex command, defaults to `pdflatex`"
+msgstr ""
+
+#: of tensorcircuit.vis.render_pdf:21
+msgid "File path, defaults to current working place `os.getcwd()`"
+msgstr ""
+
+#: of tensorcircuit.vis.render_pdf:25
+msgid "if notebook is True, return `Image` object; otherwise return `None`"
+msgstr ""
+
+#~ msgid ""
+#~ "This is a method that implementers "
+#~ "of subclasses of `Layer` or `Model` "
+#~ "can override if they need a "
+#~ "state-creation step in-between layer "
+#~ "instantiation and layer call."
+#~ msgstr ""
+
+#~ msgid "This is typically used to create the weights of `Layer` subclasses."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Note here that `call()` method in "
+#~ "`tf.keras` is little bit different from"
+#~ " `keras` API. In `keras` API, you "
+#~ "can pass support masking for layers "
+#~ "as additional arguments. Whereas `tf.keras`"
+#~ " has `compute_mask()` method to support "
+#~ "masking."
+#~ msgstr ""
+
+#~ msgid "Modules for DQAS framework"
+#~ msgstr ""
+
+#~ msgid "DQAS framework entrypoint"
+#~ msgstr ""
+
+#~ msgid "Parameters"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "function with input of data instance,"
+#~ " circuit parameters theta and structural"
+#~ " paramter k, return tuple of "
+#~ "objective value and gradient with "
+#~ "respect to theta"
+#~ msgstr ""
+
+#~ msgid "data generator as dataset"
+#~ msgstr ""
+
+#~ msgid "list of operations as primitive operator pool"
+#~ msgstr ""
+
+#~ msgid "the default layer number of the circuit ansatz"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "shape of circuit parameter pool, in "
+#~ "general p_stp*l, where l is the "
+#~ "max number of circuit parameters for "
+#~ "op in the operator pool"
+#~ msgstr ""
+
+#~ msgid "the same as p in the most times"
+#~ msgstr ""
+
+#~ msgid "batch size of one epoch"
+#~ msgstr ""
+
+#~ msgid "prethermal update times"
+#~ msgstr ""
+
+#~ msgid "training epochs"
+#~ msgstr ""
+
+#~ msgid "parallel thread number, 0 to disable multiprocessing model by default"
+#~ msgstr ""
+
+#~ msgid "set verbose log to print"
+#~ msgstr ""
+
+#~ msgid "function to output verbose information"
+#~ msgstr ""
+
+#~ msgid "function return intermiediate result for final history list"
+#~ msgstr ""
+
+#~ msgid "cutoff probability to avoid peak distribution"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "function accepting list of objective "
+#~ "values and return the baseline value "
+#~ "used in the next round"
+#~ msgstr ""
+
+#~ msgid "return noise with the same shape as circuit parameter pool"
+#~ msgstr ""
+
+#~ msgid "initial values for circuit parameter pool"
+#~ msgstr ""
+
+#~ msgid "initial values for probabilistic model parameters"
+#~ msgstr ""
+
+#~ msgid "optimizer for circuit parameters theta"
+#~ msgstr ""
+
+#~ msgid "optimizer for model parameters alpha"
+#~ msgstr ""
+
+#~ msgid "optimizer for circuit parameters in prethermal stage"
+#~ msgstr ""
+
+#~ msgid "fixed structural parameters for prethermal training"
+#~ msgstr ""
+
+#~ msgid "regularization function for model parameters alpha"
+#~ msgstr ""
+
+#~ msgid "regularization function for circuit parameters theta"
+#~ msgstr ""
+
+#~ msgid "Returns"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "The probabilistic model based DQAS, can"
+#~ " use extensively for DQAS case for"
+#~ " ``NMF`` probabilistic model."
+#~ msgstr ""
+
+#~ msgid "vag func, return loss and nabla lnp"
+#~ msgstr ""
+
+#~ msgid "keras model"
+#~ msgstr ""
+
+#~ msgid "sample func of logic with keras model input"
+#~ msgstr ""
+
+#~ msgid "input data pipeline generator"
+#~ msgstr ""
+
+#~ msgid "operation pool"
+#~ msgstr ""
+
+#~ msgid "depth for DQAS"
+#~ msgstr ""
+
+#~ msgid "parallel kernels"
+#~ msgstr ""
+
+#~ msgid "final loss function in terms of average of sub loss for each circuit"
+#~ msgstr ""
+
+#~ msgid "derivative function for ``loss_func``"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Call in customized functions and grab"
+#~ " variables within DQAS framework function"
+#~ " by var name str."
+#~ msgstr ""
+
+#~ msgid "The DQAS framework function"
+#~ msgstr ""
+
+#~ msgid "Variables within the DQAS framework"
+#~ msgstr ""
+
+#~ msgid "Return type"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "This function works only when nnp "
+#~ "has the same shape as stp, i.e."
+#~ " one parameter for each op."
+#~ msgstr ""
+
+#~ msgid "The kernel for multiprocess to run parallel in DQAS function/"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "parallel variational parameter training and"
+#~ " search to avoid local minimum not"
+#~ " limited to qaoa setup as the "
+#~ "function name indicates, as long as "
+#~ "you provided suitable `vag_func`"
+#~ msgstr ""
+
+#~ msgid "data input generator for vag_func"
+#~ msgstr ""
+
+#~ msgid "vag_kernel"
+#~ msgstr ""
+
+#~ msgid "number of tries"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "for optimization problem the input is"
+#~ " in general fixed so batch is "
+#~ "often 1"
+#~ msgstr ""
+
+#~ msgid "number of parallel jobs"
+#~ msgstr ""
+
+#~ msgid "mean value of normal distribution for nnp"
+#~ msgstr ""
+
+#~ msgid "std deviation of normal distribution for nnp"
+#~ msgstr ""
+
+#~ msgid "Doesn't support prob model DQAS search."
+#~ msgstr ""
+
+#~ msgid "Modules for graph instance data and more"
+#~ msgstr ""
+
+#~ msgid "```python d = nx.to_dict_of_dicts(g) ```"
+#~ msgstr ""
+
+#~ msgid "1D PBC chain with n sites."
+#~ msgstr ""
+
+#~ msgid "The number of nodes"
+#~ msgstr ""
+
+#~ msgid "The resulted graph g"
+#~ msgstr ""
+
+#~ msgid "all graphs with m edge out from g"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Generate a reduced graph with given "
+#~ "ratio of edges compared to the "
+#~ "original graph g."
+#~ msgstr ""
+
+#~ msgid "The base graph"
+#~ msgstr ""
+
+#~ msgid "number of edges kept, default half of the edges"
+#~ msgstr ""
+
+#~ msgid "The resulted reduced graph"
+#~ msgstr ""
+
+#~ msgid "Split the graph in exactly ``split`` piece evenly."
+#~ msgstr ""
+
+#~ msgid "The mother graph"
+#~ msgstr ""
+
+#~ msgid "The number of the graph we want to divide into, defaults to 2"
+#~ msgstr ""
+
+#~ msgid "List of graph instance of size ``split``"
+#~ msgstr ""
+
+#~ msgid "Module for functions adding layers of circuits"
+#~ msgstr ""
+
+#~ msgid "Hlayer"
+#~ msgstr ""
+
+#~ msgid "anyrxlayer"
+#~ msgstr ""
+
+#~ msgid "anyrylayer"
+#~ msgstr ""
+
+#~ msgid "anyrzlayer"
+#~ msgstr ""
+
+#~ msgid "anyswaplayer"
+#~ msgstr ""
+
+#~ msgid "anyxxlayer"
+#~ msgstr ""
+
+#~ msgid "anyxylayer"
+#~ msgstr ""
+
+#~ msgid "anyxzlayer"
+#~ msgstr ""
+
+#~ msgid "anyyxlayer"
+#~ msgstr ""
+
+#~ msgid "anyyylayer"
+#~ msgstr ""
+
+#~ msgid "anyyzlayer"
+#~ msgstr ""
+
+#~ msgid "anyzxlayer"
+#~ msgstr ""
+
+#~ msgid "anyzylayer"
+#~ msgstr ""
+
+#~ msgid "anyzzlayer"
+#~ msgstr ""
+
+#~ msgid "cnotlayer"
+#~ msgstr ""
+
+#~ msgid "rxlayer"
+#~ msgstr ""
+
+#~ msgid "rylayer"
+#~ msgstr ""
+
+#~ msgid "rzlayer"
+#~ msgstr ""
+
+#~ msgid "swaplayer"
+#~ msgstr ""
+
+#~ msgid "xxgate"
+#~ msgstr ""
+
+#~ msgid "xxlayer"
+#~ msgstr ""
+
+#~ msgid "xygate"
+#~ msgstr ""
+
+#~ msgid "xylayer"
+#~ msgstr ""
+
+#~ msgid "xzgate"
+#~ msgstr ""
+
+#~ msgid "xzlayer"
+#~ msgstr ""
+
+#~ msgid "yxgate"
+#~ msgstr ""
+
+#~ msgid "yxlayer"
+#~ msgstr ""
+
+#~ msgid "yygate"
+#~ msgstr ""
+
+#~ msgid "yylayer"
+#~ msgstr ""
+
+#~ msgid "yzgate"
+#~ msgstr ""
+
+#~ msgid "yzlayer"
+#~ msgstr ""
+
+#~ msgid "zxgate"
+#~ msgstr ""
+
+#~ msgid "zxlayer"
+#~ msgstr ""
+
+#~ msgid "zygate"
+#~ msgstr ""
+
+#~ msgid "zylayer"
+#~ msgstr ""
+
+#~ msgid "zzgate"
+#~ msgstr ""
+
+#~ msgid "zzlayer"
+#~ msgstr ""
+
+#~ msgid "$$e^{-i heta_i \\sigma}$$"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "The following function should be used"
+#~ " to generate layers with special "
+#~ "case. As its soundness depends on "
+#~ "the nature of the task or problem,"
+#~ " it doesn't always make sense."
+#~ msgstr ""
+
+#~ msgid "$$e^{-i heta \\sigma}$$"
+#~ msgstr ""
+
+#~ msgid "$$e^{-i heta \\sigma}$$"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "A collection of useful function snippets"
+#~ " that irrelevant with the main "
+#~ "modules or await for furthere refactor"
+#~ msgstr ""
+
+#~ msgid "Bases: :py:class:`object`"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "color cirq circuit SVG for given "
+#~ "gates, a small tool to hack the"
+#~ " cirq SVG"
+#~ msgstr ""
+
+#~ msgid "integer coordinate which gate is colored"
+#~ msgstr ""
+
+#~ msgid "transform repr form of an array to real numpy array"
+#~ msgstr ""
+
+#~ msgid "DQAS application kernels as vag functions"
+#~ msgstr ""
+
+#~ msgid "1D array for full wavefunction, the basis is in lexcical order"
+#~ msgstr ""
+
+#~ msgid "nx.Graph"
+#~ msgstr ""
+
+#~ msgid "transformation functions before averaged"
+#~ msgstr ""
+
+#~ msgid "as f3"
+#~ msgstr ""
+
+#~ msgid "maxcut energy for n qubit wavefunction i-th basis"
+#~ msgstr ""
+
+#~ msgid "ranged from 0 to 2**n-1"
+#~ msgstr ""
+
+#~ msgid "number of qubits"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "deprecated as non tf and non "
+#~ "flexible, use the combination of "
+#~ "``reduced_density_matrix`` and ``entropy`` instead."
+#~ msgstr ""
+
+#~ msgid "deprecated, current version in tc.quantum"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "value and gradient, currently only "
+#~ "tensorflow backend is supported jax and"
+#~ " numpy seems to be slow in "
+#~ "circuit simulation anyhow. *deprecated*"
+#~ msgstr ""
+
+#~ msgid "if lbd=0, take energy as objective"
+#~ msgstr ""
+
+#~ msgid "if as default 0, overlap will not compute in the process"
+#~ msgstr ""
+
+#~ msgid "Fill single qubit gates according to placeholder on circuit"
+#~ msgstr ""
+
+#~ msgid "Hamiltonian measurements for Heisenberg model on graph lattice g"
+#~ msgstr ""
-#: of tensorcircuit.vis.render_pdf:15
-msgid "String of latex content"
-msgstr ""
+#~ msgid "short cut for ``cirq.LineQubit(i)``"
+#~ msgstr ""
-#: of tensorcircuit.vis.render_pdf:17
-msgid "File name, defaults to random UUID `str(uuid4())`"
-msgstr ""
+#~ msgid "QAOA block encoding kernel, support 2 params in one op"
+#~ msgstr ""
-#: of tensorcircuit.vis.render_pdf:19
-msgid "Executable Latex command, defaults to `pdflatex`"
-msgstr ""
+#~ msgid ""
+#~ "training QAOA with only optimizing "
+#~ "circuit parameters, can be well replaced"
+#~ " with more general function `DQAS_search`"
+#~ msgstr ""
-#: of tensorcircuit.vis.render_pdf:21
-msgid "File path, defaults to current working place `os.getcwd()`"
-msgstr ""
+#~ msgid "multi parameter for one layer"
+#~ msgstr ""
-#: of tensorcircuit.vis.render_pdf:23
-msgid "[description], defaults to False"
-msgstr ""
+#~ msgid "kw arguments for measurements_func"
+#~ msgstr ""
-#: of tensorcircuit.vis.render_pdf:25
-msgid "if notebook is True, return `Image` object; otherwise return `None`"
-msgstr ""
+#~ msgid "loss function, gradient of nnp"
+#~ msgstr ""
#~ msgid ""
-#~ "This is a method that implementers "
-#~ "of subclasses of `Layer` or `Model` "
-#~ "can override if they need a "
-#~ "state-creation step in-between layer "
-#~ "instantiation and layer call."
+#~ "tensorflow quantum backend compare to "
+#~ "qaoa_vag which is tensorcircuit backend"
#~ msgstr ""
-#~ msgid "This is typically used to create the weights of `Layer` subclasses."
+#~ msgid "Hamiltonian for tfim on lattice defined by graph g"
+#~ msgstr ""
+
+#~ msgid "cirq.PauliSum as operators for tfq expectation layer"
#~ msgstr ""
#~ msgid ""
-#~ "Note here that `call()` method in "
-#~ "`tf.keras` is little bit different from"
-#~ " `keras` API. In `keras` API, you "
-#~ "can pass support masking for layers "
-#~ "as additional arguments. Whereas `tf.keras`"
-#~ " has `compute_mask()` method to support "
-#~ "masking."
+#~ "generate random wavefunction from "
+#~ "approximately Haar measure, reference: "
+#~ "https://doi.org/10.1063/1.4983266"
#~ msgstr ""
-#~ msgid "Modules for DQAS framework"
+#~ msgid "repetition of the blocks"
#~ msgstr ""
-#~ msgid "DQAS framework entrypoint"
+#~ msgid "random Haar measure approximation"
#~ msgstr ""
-#~ msgid "Parameters"
+#~ msgid "cirq.Circuit, empty circuit"
+#~ msgstr ""
+
+#~ msgid "# of qubit"
#~ msgstr ""
#~ msgid ""
-#~ "function with input of data instance,"
-#~ " circuit parameters theta and structural"
-#~ " paramter k, return tuple of "
-#~ "objective value and gradient with "
-#~ "respect to theta"
+#~ "One-hot variational autoregressive models "
+#~ "for multiple categorical choices beyond "
+#~ "binary"
#~ msgstr ""
-#~ msgid "data generator as dataset"
+#~ msgid "Bases: :py:class:`keras.engine.training.Model`"
#~ msgstr ""
-#~ msgid "list of operations as primitive operator pool"
+#~ msgid "Calls the model on new inputs and returns the outputs as tensors."
#~ msgstr ""
-#~ msgid "the default layer number of the circuit ansatz"
+#~ msgid ""
+#~ "In this case `call()` just reapplies "
+#~ "all ops in the graph to the "
+#~ "new inputs (e.g. build a new "
+#~ "computational graph from the provided "
+#~ "inputs)."
#~ msgstr ""
#~ msgid ""
-#~ "shape of circuit parameter pool, in "
-#~ "general p_stp*l, where l is the "
-#~ "max number of circuit parameters for "
-#~ "op in the operator pool"
+#~ "Note: This method should not be "
+#~ "called directly. It is only meant "
+#~ "to be overridden when subclassing "
+#~ "`tf.keras.Model`. To call a model on "
+#~ "an input, always use the `__call__()`"
+#~ " method, i.e. `model(inputs)`, which relies"
+#~ " on the underlying `call()` method."
#~ msgstr ""
-#~ msgid "the same as p in the most times"
+#~ msgid "Args:"
#~ msgstr ""
-#~ msgid "batch size of one epoch"
+#~ msgid ""
+#~ "inputs: Input tensor, or dict/list/tuple "
+#~ "of input tensors. training: Boolean or"
+#~ " boolean scalar tensor, indicating whether"
+#~ " to run"
#~ msgstr ""
-#~ msgid "prethermal update times"
+#~ msgid "the `Network` in training mode or inference mode."
#~ msgstr ""
-#~ msgid "training epochs"
+#~ msgid "mask: A mask or list of masks. A mask can be either a boolean tensor or"
#~ msgstr ""
-#~ msgid "parallel thread number, 0 to disable multiprocessing model by default"
+#~ msgid "None (no mask). For more details, check the guide"
#~ msgstr ""
-#~ msgid "set verbose log to print"
+#~ msgid "[here](https://www.tensorflow.org/guide/keras/masking_and_padding)."
#~ msgstr ""
-#~ msgid "function to output verbose information"
+#~ msgid "Returns:"
#~ msgstr ""
-#~ msgid "function return intermiediate result for final history list"
+#~ msgid ""
+#~ "A tensor if there is a single "
+#~ "output, or a list of tensors if"
+#~ " there are more than one outputs."
#~ msgstr ""
-#~ msgid "cutoff probability to avoid peak distribution"
+#~ msgid "Bases: :py:class:`keras.engine.base_layer.Layer`"
#~ msgstr ""
#~ msgid ""
-#~ "function accepting list of objective "
-#~ "values and return the baseline value "
-#~ "used in the next round"
+#~ "Creates the variables of the layer "
+#~ "(optional, for subclass implementers)."
#~ msgstr ""
-#~ msgid "return noise with the same shape as circuit parameter pool"
+#~ msgid ""
+#~ "This is a method that implementers "
+#~ "of subclasses of `Layer` or `Model` "
+#~ "can override if they need a "
+#~ "state-creation step in-between layer "
+#~ "instantiation and layer call. It is "
+#~ "invoked automatically before the first "
+#~ "execution of `call()`."
#~ msgstr ""
-#~ msgid "initial values for circuit parameter pool"
+#~ msgid ""
+#~ "This is typically used to create "
+#~ "the weights of `Layer` subclasses (at"
+#~ " the discretion of the subclass "
+#~ "implementer)."
#~ msgstr ""
-#~ msgid "initial values for probabilistic model parameters"
+#~ msgid "input_shape: Instance of `TensorShape`, or list of instances of"
#~ msgstr ""
-#~ msgid "optimizer for circuit parameters theta"
+#~ msgid ""
+#~ "`TensorShape` if the layer expects a "
+#~ "list of inputs (one instance per "
+#~ "input)."
#~ msgstr ""
-#~ msgid "optimizer for model parameters alpha"
+#~ msgid "This is where the layer's logic lives."
#~ msgstr ""
-#~ msgid "optimizer for circuit parameters in prethermal stage"
+#~ msgid ""
+#~ "The `call()` method may not create "
+#~ "state (except in its first invocation,"
+#~ " wrapping the creation of variables "
+#~ "or other resources in `tf.init_scope()`). "
+#~ "It is recommended to create state "
+#~ "in `__init__()`, or the `build()` method"
+#~ " that is called automatically before "
+#~ "`call()` executes the first time."
#~ msgstr ""
-#~ msgid "fixed structural parameters for prethermal training"
+#~ msgid "inputs: Input tensor, or dict/list/tuple of input tensors."
#~ msgstr ""
-#~ msgid "regularization function for model parameters alpha"
+#~ msgid ""
+#~ "The first positional `inputs` argument "
+#~ "is subject to special rules: - "
+#~ "`inputs` must be explicitly passed. A"
+#~ " layer cannot have zero"
#~ msgstr ""
-#~ msgid "regularization function for circuit parameters theta"
+#~ msgid ""
+#~ "arguments, and `inputs` cannot be "
+#~ "provided via the default value of "
+#~ "a keyword argument."
#~ msgstr ""
-#~ msgid "Returns"
+#~ msgid "NumPy array or Python scalar values in `inputs` get cast as tensors."
+#~ msgstr ""
+
+#~ msgid "Keras mask metadata is only collected from `inputs`."
#~ msgstr ""
#~ msgid ""
-#~ "The probabilistic model based DQAS, can"
-#~ " use extensively for DQAS case for"
-#~ " ``NMF`` probabilistic model."
+#~ "Layers are built (`build(input_shape)` method)"
+#~ " using shape info from `inputs` only."
#~ msgstr ""
-#~ msgid "vag func, return loss and nabla lnp"
+#~ msgid "`input_spec` compatibility is only checked against `inputs`."
#~ msgstr ""
-#~ msgid "keras model"
+#~ msgid ""
+#~ "Mixed precision input casting is only"
+#~ " applied to `inputs`. If a layer "
+#~ "has tensor arguments in `*args` or "
+#~ "`**kwargs`, their casting behavior in "
+#~ "mixed precision should be handled "
+#~ "manually."
#~ msgstr ""
-#~ msgid "sample func of logic with keras model input"
+#~ msgid "The SavedModel input specification is generated using `inputs` only."
#~ msgstr ""
-#~ msgid "input data pipeline generator"
+#~ msgid ""
+#~ "Integration with various ecosystem packages"
+#~ " like TFMOT, TFLite, TF.js, etc is"
+#~ " only supported for `inputs` and not"
+#~ " for tensors in positional and "
+#~ "keyword arguments."
#~ msgstr ""
-#~ msgid "operation pool"
+#~ msgid "*args: Additional positional arguments. May contain tensors, although"
#~ msgstr ""
-#~ msgid "depth for DQAS"
+#~ msgid "this is not recommended, for the reasons above."
#~ msgstr ""
-#~ msgid "parallel kernels"
+#~ msgid "**kwargs: Additional keyword arguments. May contain tensors, although"
#~ msgstr ""
-#~ msgid "final loss function in terms of average of sub loss for each circuit"
+#~ msgid ""
+#~ "this is not recommended, for the "
+#~ "reasons above. The following optional "
+#~ "keyword arguments are reserved: - "
+#~ "`training`: Boolean scalar tensor of "
+#~ "Python boolean indicating"
#~ msgstr ""
-#~ msgid "derivative function for ``loss_func``"
+#~ msgid "whether the `call` is meant for training or inference."
#~ msgstr ""
#~ msgid ""
-#~ "Call in customized functions and grab"
-#~ " variables within DQAS framework function"
-#~ " by var name str."
+#~ "`mask`: Boolean input mask. If the "
+#~ "layer's `call()` method takes a `mask`"
+#~ " argument, its default value will be"
+#~ " set to the mask generated for "
+#~ "`inputs` by the previous layer (if "
+#~ "`input` did come from a layer that"
+#~ " generated a corresponding mask, i.e. "
+#~ "if it came from a Keras layer "
+#~ "with masking support)."
#~ msgstr ""
-#~ msgid "The DQAS framework function"
+#~ msgid "A tensor or list/tuple of tensors."
#~ msgstr ""
-#~ msgid "Variables within the DQAS framework"
+#~ msgid "Relevant classes for VQNHE"
#~ msgstr ""
-#~ msgid "Return type"
+#~ msgid ""
+#~ "Bases: "
+#~ ":py:class:`keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule`"
#~ msgstr ""
-#~ msgid ""
-#~ "This function works only when nnp "
-#~ "has the same shape as stp, i.e."
-#~ " one parameter for each op."
+#~ msgid "Dense layer but with complex weights, used for building complex RBM"
#~ msgstr ""
-#~ msgid "The kernel for multiprocess to run parallel in DQAS function/"
+#~ msgid "VQNHE"
#~ msgstr ""
-#~ msgid ""
-#~ "parallel variational parameter training and"
-#~ " search to avoid local minimum not"
-#~ " limited to qaoa setup as the "
-#~ "function name indicates, as long as "
-#~ "you provided suitable `vag_func`"
+#~ msgid "[description]"
#~ msgstr ""
-#~ msgid "data input generator for vag_func"
+#~ msgid "VQE"
#~ msgstr ""
-#~ msgid "vag_kernel"
+#~ msgid "Backend register"
#~ msgstr ""
-#~ msgid "number of tries"
+#~ msgid "Get the `tc.backend` object."
#~ msgstr ""
-#~ msgid ""
-#~ "for optimization problem the input is"
-#~ " in general fixed so batch is "
-#~ "often 1"
+#~ msgid "\"numpy\", \"tensorflow\", \"jax\", \"pytorch\""
#~ msgstr ""
-#~ msgid "number of parallel jobs"
+#~ msgid "Raises"
#~ msgstr ""
-#~ msgid "mean value of normal distribution for nnp"
+#~ msgid "Backend doesn't exist for `backend` argument."
#~ msgstr ""
-#~ msgid "std deviation of normal distribution for nnp"
+#~ msgid "The `tc.backend` object that with all registered universal functions."
#~ msgstr ""
-#~ msgid "Doesn't support prob model DQAS search."
+#~ msgid "Backend magic inherited from tensornetwork: jax backend"
#~ msgstr ""
-#~ msgid "Modules for graph instance data and more"
+#~ msgid "Bases: :py:class:`tensornetwork.backends.jax.jax_backend.JaxBackend`"
#~ msgstr ""
-#~ msgid "```python d = nx.to_dict_of_dicts(g) ```"
+#~ msgid ""
+#~ "See the original backend API at "
+#~ "``jax backend``. "
+#~ "`_"
#~ msgstr ""
-#~ msgid "1D PBC chain with n sites."
+#~ msgid "Returns the elementwise absolute value of tensor. Args:"
#~ msgstr ""
-#~ msgid "The number of nodes"
+#~ msgid "tensor: An input tensor."
#~ msgstr ""
-#~ msgid "The resulted graph g"
+#~ msgid "tensor: Its elementwise absolute value."
#~ msgstr ""
-#~ msgid "all graphs with m edge out from g"
+#~ msgid "Return the index of maximum of an array an axis."
#~ msgstr ""
-#~ msgid ""
-#~ "Generate a reduced graph with given "
-#~ "ratio of edges compared to the "
-#~ "original graph g."
+#~ msgid "[description], defaults to 0, different behavior from numpy defaults!"
#~ msgstr ""
-#~ msgid "The base graph"
+#~ msgid "Return the index of minimum of an array an axis."
#~ msgstr ""
-#~ msgid "number of edges kept, default half of the edges"
+#~ msgid "Cast the tensor dtype of a ``a``."
#~ msgstr ""
-#~ msgid "The resulted reduced graph"
+#~ msgid "tensor"
#~ msgstr ""
-#~ msgid "Split the graph in exactly ``split`` piece evenly."
+#~ msgid "\"float32\", \"float64\", \"complex64\", \"complex128\""
#~ msgstr ""
-#~ msgid "The mother graph"
+#~ msgid "``a`` of new dtype"
#~ msgstr ""
-#~ msgid "The number of the graph we want to divide into, defaults to 2"
+#~ msgid "Join a sequence of arrays along an existing axis."
#~ msgstr ""
-#~ msgid "List of graph instance of size ``split``"
+#~ msgid "[description], defaults to 0"
#~ msgstr ""
-#~ msgid "Module for functions adding layers of circuits"
+#~ msgid ""
+#~ "The native cond for XLA compiling, "
+#~ "wrapper for ``tf.cond`` and limited "
+#~ "functionality of ``jax.lax.cond``."
#~ msgstr ""
-#~ msgid "Hlayer"
+#~ msgid "Convert a np.array or a tensor to a tensor type for the backend."
#~ msgstr ""
-#~ msgid "anyrxlayer"
+#~ msgid ""
+#~ "Generate the coo format sparse matrix"
+#~ " from indices and values, which is"
+#~ " the only sparse format supported in"
+#~ " different ML backends."
#~ msgstr ""
-#~ msgid "anyrylayer"
+#~ msgid "shape [n, 2] for n non zero values in the returned matrix"
#~ msgstr ""
-#~ msgid "anyrzlayer"
+#~ msgid "shape [n]"
#~ msgstr ""
-#~ msgid "anyswaplayer"
+#~ msgid "Tuple[int, ...]"
#~ msgstr ""
-#~ msgid "anyxxlayer"
+#~ msgid "Return the expm of ``a``, matrix exponential."
#~ msgstr ""
-#~ msgid "anyxylayer"
+#~ msgid "tensor in matrix form"
#~ msgstr ""
-#~ msgid "anyxzlayer"
+#~ msgid "matrix exponential of matrix ``a``"
#~ msgstr ""
-#~ msgid "anyyxlayer"
+#~ msgid "Return the cosine of a tensor ``a``."
#~ msgstr ""
-#~ msgid "anyyylayer"
+#~ msgid "cosine of ``a``"
#~ msgstr ""
-#~ msgid "anyyzlayer"
+#~ msgid "Return the cumulative sum of the elements along a given axis."
#~ msgstr ""
-#~ msgid "anyzxlayer"
+#~ msgid ""
+#~ "The default behavior is the same "
+#~ "as numpy, different from tf/torch as "
+#~ "cumsum of the flatten 1D array, "
+#~ "defaults to None"
#~ msgstr ""
-#~ msgid "anyzylayer"
+#~ msgid "Return the copy of tensor ''a''."
#~ msgstr ""
-#~ msgid "anyzzlayer"
+#~ msgid "Return an identity matrix of dimension `dim`"
#~ msgstr ""
-#~ msgid "cnotlayer"
+#~ msgid ""
+#~ "Depending on specific backends, `dim` "
+#~ "has to be either an int (numpy,"
+#~ " torch, tensorflow) or a `ShapeType` "
+#~ "object (for block-sparse backends). "
+#~ "Block-sparse behavior is currently not "
+#~ "supported"
#~ msgstr ""
-#~ msgid "rxlayer"
+#~ msgid ""
+#~ "N (int): The dimension of the "
+#~ "returned matrix. dtype: The dtype of "
+#~ "the returned matrix. M (int): The "
+#~ "dimension of the returned matrix."
#~ msgstr ""
-#~ msgid "rylayer"
+#~ msgid "Return the function which is the grad function of input ``f``."
#~ msgstr ""
-#~ msgid "rzlayer"
+#~ msgid "Example"
#~ msgstr ""
-#~ msgid "swaplayer"
+#~ msgid "the function to be differentiated"
#~ msgstr ""
-#~ msgid "xxgate"
+#~ msgid ""
+#~ "the position of args in ``f`` that"
+#~ " are to be differentiated, defaults "
+#~ "to be 0"
#~ msgstr ""
-#~ msgid "xxlayer"
+#~ msgid "the grad function of ``f`` with the same set of arguments as ``f``"
#~ msgstr ""
-#~ msgid "xygate"
+#~ msgid "Return 1.j in as a tensor compatible with the backend."
#~ msgstr ""
-#~ msgid "xylayer"
+#~ msgid "\"complex64\" or \"complex128\""
#~ msgstr ""
-#~ msgid "xzgate"
+#~ msgid "1.j tensor"
#~ msgstr ""
-#~ msgid "xzlayer"
+#~ msgid "Return the elementwise imaginary value of a tensor ``a``."
#~ msgstr ""
-#~ msgid "yxgate"
+#~ msgid "imaginary value of ``a``"
#~ msgstr ""
-#~ msgid "yxlayer"
+#~ msgid "[summary]"
#~ msgstr ""
-#~ msgid "yygate"
+#~ msgid "The possible options"
#~ msgstr ""
-#~ msgid "yylayer"
+#~ msgid "Sampling output shape"
#~ msgstr ""
-#~ msgid "yzgate"
+#~ msgid ""
+#~ "probability for each option in a, "
+#~ "defaults to None, as equal probability"
+#~ " distribution"
#~ msgstr ""
-#~ msgid "yzlayer"
+#~ msgid ""
+#~ "Call the random normal function with "
+#~ "the random state management behind the"
+#~ " scene."
#~ msgstr ""
-#~ msgid "zxgate"
+#~ msgid "[description], defaults to 1"
#~ msgstr ""
-#~ msgid "zxlayer"
+#~ msgid "[description], defaults to \"32\""
#~ msgstr ""
-#~ msgid "zygate"
+#~ msgid "Determine whether the type of input ``a`` is ``sparse``."
#~ msgstr ""
-#~ msgid "zylayer"
+#~ msgid "input matrix ``a``"
#~ msgstr ""
-#~ msgid "zzgate"
+#~ msgid "a bool indicating whether the matrix ``a`` is sparse"
#~ msgstr ""
-#~ msgid "zzlayer"
+#~ msgid "Return a boolean on whether ``a`` is a tensor in backend package."
#~ msgstr ""
-#~ msgid "$$e^{-i heta_i \\sigma}$$"
+#~ msgid "a tensor to be determined"
#~ msgstr ""
-#~ msgid ""
-#~ "The following function should be used"
-#~ " to generate layers with special "
-#~ "case. As its soundness depends on "
-#~ "the nature of the task or problem,"
-#~ " it doesn't always make sense."
+#~ msgid "whether ``a`` is a tensor"
#~ msgstr ""
-#~ msgid "$$e^{-i heta \\sigma}$$"
+#~ msgid "Return the jitted version of function ``f``."
#~ msgstr ""
-#~ msgid "$$e^{-i heta \\sigma}$$"
+#~ msgid "function to be jitted"
#~ msgstr ""
#~ msgid ""
-#~ "A collection of useful function snippets"
-#~ " that irrelevant with the main "
-#~ "modules or await for furthere refactor"
+#~ "index of args that doesn't regarded "
+#~ "as tensor, only work for jax "
+#~ "backend"
#~ msgstr ""
-#~ msgid "Bases: :py:class:`object`"
+#~ msgid ""
+#~ "whether open XLA compilation, only works"
+#~ " for tensorflow backend, defaults False "
+#~ "since several ops has no XLA "
+#~ "correspondence"
#~ msgstr ""
-#~ msgid ""
-#~ "color cirq circuit SVG for given "
-#~ "gates, a small tool to hack the"
-#~ " cirq SVG"
+#~ msgid "jitted version of ``f``"
#~ msgstr ""
-#~ msgid "integer coordinate which gate is colored"
+#~ msgid ""
+#~ "Function that computes a (forward-mode)"
+#~ " Jacobian-vector product of ``f``. "
+#~ "Strictly speaking, this function is "
+#~ "value_and_jvp."
#~ msgstr ""
-#~ msgid "transform repr form of an array to real numpy array"
+#~ msgid "The function to compute jvp"
#~ msgstr ""
-#~ msgid "DQAS application kernels as vag functions"
+#~ msgid "input for ``f``"
#~ msgstr ""
-#~ msgid "1D array for full wavefunction, the basis is in lexcical order"
+#~ msgid "tangents"
#~ msgstr ""
-#~ msgid "nx.Graph"
+#~ msgid ""
+#~ "(``f(*inputs)``, jvp_tensor), where jvp_tensor "
+#~ "is the same shape as the output"
+#~ " of ``f``"
#~ msgstr ""
-#~ msgid "transformation functions before averaged"
+#~ msgid "Return the kronecker product of two matrices ``a`` and ``b``."
#~ msgstr ""
-#~ msgid "as f3"
+#~ msgid "kronecker product of ``a`` and ``b``"
#~ msgstr ""
-#~ msgid "maxcut energy for n qubit wavefunction i-th basis"
+#~ msgid "Return the maximum of an array or maximum along an axis."
#~ msgstr ""
-#~ msgid "ranged from 0 to 2**n-1"
+#~ msgid "[description], defaults to None"
#~ msgstr ""
-#~ msgid "number of qubits"
+#~ msgid "Return the minimum of an array or minimum along an axis."
#~ msgstr ""
#~ msgid ""
-#~ "deprecated as non tf and non "
-#~ "flexible, use the combination of "
-#~ "``reduced_density_matrix`` and ``entropy`` instead."
+#~ "Return the numpy array of a tensor"
+#~ " ``a``, but may not work in a"
+#~ " jitted function."
#~ msgstr ""
-#~ msgid "deprecated, current version in tc.quantum"
+#~ msgid "numpy array of ``a``"
#~ msgstr ""
#~ msgid ""
-#~ "value and gradient, currently only "
-#~ "tensorflow backend is supported jax and"
-#~ " numpy seems to be slow in "
-#~ "circuit simulation anyhow. *deprecated*"
-#~ msgstr ""
-
-#~ msgid "if lbd=0, take energy as objective"
+#~ "One-hot encodes the given ``a``. "
+#~ "Each index in the input ``a`` is"
+#~ " encoded as a vector of zeros "
+#~ "of length ``num`` with the element "
+#~ "at index set to one:"
#~ msgstr ""
-#~ msgid "if as default 0, overlap will not compute in the process"
+#~ msgid "input tensor"
#~ msgstr ""
-#~ msgid "Fill single qubit gates according to placeholder on circuit"
+#~ msgid "number of features in onehot dimension"
#~ msgstr ""
-#~ msgid "Hamiltonian measurements for Heisenberg model on graph lattice g"
+#~ msgid "onehot tensor with the last extra dimension"
#~ msgstr ""
-#~ msgid "short cut for ``cirq.LineQubit(i)``"
+#~ msgid ""
+#~ "Return an ones-matrix of dimension "
+#~ "`dim` Depending on specific backends, "
+#~ "`dim` has to be either an int "
+#~ "(numpy, torch, tensorflow) or a "
+#~ "`ShapeType` object (for block-sparse "
+#~ "backends). Block-sparse behavior is "
+#~ "currently not supported Args:"
#~ msgstr ""
-#~ msgid "QAOA block encoding kernel, support 2 params in one op"
+#~ msgid ""
+#~ "shape (int): The dimension of the "
+#~ "returned matrix. dtype: The dtype of "
+#~ "the returned matrix."
#~ msgstr ""
#~ msgid ""
-#~ "training QAOA with only optimizing "
-#~ "circuit parameters, can be well replaced"
-#~ " with more general function `DQAS_search`"
+#~ "A jax like split API, but it "
+#~ "doesn't split the key generator for "
+#~ "other backends. It is just for a"
+#~ " consistent interface of random code; "
+#~ "make sure you know what the "
+#~ "function actually does. This function is"
+#~ " mainly a utility to write backend"
+#~ " agnostic code instead of doing magic"
+#~ " things."
#~ msgstr ""
-#~ msgid "multi parameter for one layer"
+#~ msgid "Return the elementwise real value of a tensor ``a``."
#~ msgstr ""
-#~ msgid "kw arguments for measurements_func"
+#~ msgid "real value of ``a``"
#~ msgstr ""
-#~ msgid "loss function, gradient of nnp"
+#~ msgid ""
+#~ "Rectified linear unit activation function. "
+#~ "Computes the element-wise function:"
#~ msgstr ""
-#~ msgid ""
-#~ "tensorflow quantum backend compare to "
-#~ "qaoa_vag which is tensorcircuit backend"
+#~ msgid "\\mathrm{relu}(x)=\\max(x,0)"
#~ msgstr ""
-#~ msgid "Hamiltonian for tfim on lattice defined by graph g"
+#~ msgid "Input tensor"
#~ msgstr ""
-#~ msgid "cirq.PauliSum as operators for tfq expectation layer"
+#~ msgid "Tensor after relu"
#~ msgstr ""
#~ msgid ""
-#~ "generate random wavefunction from "
-#~ "approximately Haar measure, reference: "
-#~ "https://doi.org/10.1063/1.4983266"
+#~ "Roughly equivalent to operand[indices] = "
+#~ "updates, indices only support shape with"
+#~ " rank 2 for now."
#~ msgstr ""
-#~ msgid "repetition of the blocks"
+#~ msgid "Set the random state attached to the backend."
#~ msgstr ""
-#~ msgid "random Haar measure approximation"
+#~ msgid "the random seed, defaults to be None"
#~ msgstr ""
-#~ msgid "cirq.Circuit, empty circuit"
+#~ msgid ""
+#~ "If set to be true, only get "
+#~ "the random state in return instead "
+#~ "of setting the state on the "
+#~ "backend"
#~ msgstr ""
-#~ msgid "# of qubit"
+#~ msgid "Return the elementwise sine of a tensor ``a``."
#~ msgstr ""
-#~ msgid ""
-#~ "One-hot variational autoregressive models "
-#~ "for multiple categorical choices beyond "
-#~ "binary"
+#~ msgid "sine of ``a``"
#~ msgstr ""
-#~ msgid "Bases: :py:class:`keras.engine.training.Model`"
+#~ msgid "Return the total number of elements in ``a`` in tensor form."
#~ msgstr ""
-#~ msgid "Calls the model on new inputs and returns the outputs as tensors."
+#~ msgid "the total number of elements in ``a``"
#~ msgstr ""
#~ msgid ""
-#~ "In this case `call()` just reapplies "
-#~ "all ops in the graph to the "
-#~ "new inputs (e.g. build a new "
-#~ "computational graph from the provided "
-#~ "inputs)."
+#~ "Softmax function. Computes the function "
+#~ "which rescales elements to the range "
+#~ "[0,1] such that the elements along "
+#~ "axis sum to 1."
#~ msgstr ""
-#~ msgid ""
-#~ "Note: This method should not be "
-#~ "called directly. It is only meant "
-#~ "to be overridden when subclassing "
-#~ "`tf.keras.Model`. To call a model on "
-#~ "an input, always use the `__call__()`"
-#~ " method, i.e. `model(inputs)`, which relies"
-#~ " on the underlying `call()` method."
+#~ msgid "\\mathrm{softmax}(x) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}"
#~ msgstr ""
-#~ msgid "Args:"
+#~ msgid "Tensor"
#~ msgstr ""
#~ msgid ""
-#~ "inputs: Input tensor, or dict/list/tuple "
-#~ "of input tensors. training: Boolean or"
-#~ " boolean scalar tensor, indicating whether"
-#~ " to run"
+#~ "A dimension along which Softmax will "
+#~ "be computed , defaults to None for"
+#~ " all axis sum."
#~ msgstr ""
-#~ msgid "the `Network` in training mode or inference mode."
+#~ msgid "concatenated tensor"
#~ msgstr ""
-#~ msgid "mask: A mask or list of masks. A mask can be either a boolean tensor or"
+#~ msgid "Solve the linear system Ax=b and return the solution x."
#~ msgstr ""
-#~ msgid "None (no mask). For more details, check the guide"
+#~ msgid "The multiplied matrix."
#~ msgstr ""
-#~ msgid "[here](https://www.tensorflow.org/guide/keras/masking_and_padding)."
+#~ msgid "The resulted matrix."
#~ msgstr ""
-#~ msgid "Returns:"
+#~ msgid "The solution of the linear system."
#~ msgstr ""
-#~ msgid ""
-#~ "A tensor if there is a single "
-#~ "output, or a list of tensors if"
-#~ " there are more than one outputs."
+#~ msgid "A sparse matrix multiplies a dense matrix."
#~ msgstr ""
-#~ msgid "Bases: :py:class:`keras.engine.base_layer.Layer`"
+#~ msgid "a sparse matrix"
#~ msgstr ""
-#~ msgid ""
-#~ "Creates the variables of the layer "
-#~ "(optional, for subclass implementers)."
+#~ msgid "a dense matrix"
#~ msgstr ""
-#~ msgid ""
-#~ "This is a method that implementers "
-#~ "of subclasses of `Layer` or `Model` "
-#~ "can override if they need a "
-#~ "state-creation step in-between layer "
-#~ "instantiation and layer call. It is "
-#~ "invoked automatically before the first "
-#~ "execution of `call()`."
+#~ msgid "dense matrix"
#~ msgstr ""
#~ msgid ""
-#~ "This is typically used to create "
-#~ "the weights of `Layer` subclasses (at"
-#~ " the discretion of the subclass "
-#~ "implementer)."
+#~ "Concatenates a sequence of tensors ``a``"
+#~ " along a new dimension ``axis``."
#~ msgstr ""
-#~ msgid "input_shape: Instance of `TensorShape`, or list of instances of"
+#~ msgid "List of tensors in the same shape"
#~ msgstr ""
-#~ msgid ""
-#~ "`TensorShape` if the layer expects a "
-#~ "list of inputs (one instance per "
-#~ "input)."
+#~ msgid "the stack axis, defaults to 0"
#~ msgstr ""
-#~ msgid "This is where the layer's logic lives."
+#~ msgid "stateful register for each package"
#~ msgstr ""
-#~ msgid ""
-#~ "The `call()` method may not create "
-#~ "state (except in its first invocation,"
-#~ " wrapping the creation of variables "
-#~ "or other resources in `tf.init_scope()`). "
-#~ "It is recommended to create state "
-#~ "in `__init__()`, or the `build()` method"
-#~ " that is called automatically before "
-#~ "`call()` executes the first time."
+#~ msgid "shape of output sampling tensor"
#~ msgstr ""
-#~ msgid "inputs: Input tensor, or dict/list/tuple of input tensors."
+#~ msgid "only real data type is supported, \"32\" or \"64\", defaults to \"32\""
#~ msgstr ""
-#~ msgid ""
-#~ "The first positional `inputs` argument "
-#~ "is subject to special rules: - "
-#~ "`inputs` must be explicitly passed. A"
-#~ " layer cannot have zero"
+#~ msgid "Uniform random sampler from ``low`` to ``high``."
#~ msgstr ""
-#~ msgid ""
-#~ "arguments, and `inputs` cannot be "
-#~ "provided via the default value of "
-#~ "a keyword argument."
+#~ msgid "shape of output sampling tensor, defaults to 1"
#~ msgstr ""
-#~ msgid "NumPy array or Python scalar values in `inputs` get cast as tensors."
+#~ msgid "Stop backpropagation from ``a``."
#~ msgstr ""
-#~ msgid "Keras mask metadata is only collected from `inputs`."
+#~ msgid "``branches[index]()``"
#~ msgstr ""
-#~ msgid ""
-#~ "Layers are built (`build(input_shape)` method)"
-#~ " using shape info from `inputs` only."
+#~ msgid "Constructs a tensor by tiling a given tensor."
#~ msgstr ""
-#~ msgid "`input_spec` compatibility is only checked against `inputs`."
+#~ msgid "1d tensor with length the same as the rank of ``a``"
#~ msgstr ""
-#~ msgid ""
-#~ "Mixed precision input casting is only"
-#~ " applied to `inputs`. If a layer "
-#~ "has tensor arguments in `*args` or "
-#~ "`**kwargs`, their casting behavior in "
-#~ "mixed precision should be handled "
-#~ "manually."
+#~ msgid "Convert a sparse matrix to dense tensor."
#~ msgstr ""
-#~ msgid "The SavedModel input specification is generated using `inputs` only."
+#~ msgid "the resulted dense matrix"
#~ msgstr ""
#~ msgid ""
-#~ "Integration with various ecosystem packages"
-#~ " like TFMOT, TFLite, TF.js, etc is"
-#~ " only supported for `inputs` and not"
-#~ " for tensors in positional and "
-#~ "keyword arguments."
-#~ msgstr ""
-
-#~ msgid "*args: Additional positional arguments. May contain tensors, although"
+#~ "Find the unique elements and their "
+#~ "corresponding counts of the given tensor"
+#~ " ``a``."
#~ msgstr ""
-#~ msgid "this is not recommended, for the reasons above."
+#~ msgid "Unique elements, corresponding counts"
#~ msgstr ""
-#~ msgid "**kwargs: Additional keyword arguments. May contain tensors, although"
+#~ msgid "Return the function which returns the value and grad of ``f``."
#~ msgstr ""
#~ msgid ""
-#~ "this is not recommended, for the "
-#~ "reasons above. The following optional "
-#~ "keyword arguments are reserved: - "
-#~ "`training`: Boolean scalar tensor of "
-#~ "Python boolean indicating"
+#~ "the value and grad function of "
+#~ "``f`` with the same set of "
+#~ "arguments as ``f``"
#~ msgstr ""
-#~ msgid "whether the `call` is meant for training or inference."
+#~ msgid ""
+#~ "Return the VVAG function of ``f``. "
+#~ "The inputs for ``f`` is (args[0], "
+#~ "args[1], args[2], ...), and the output"
+#~ " of ``f`` is a scalar. Suppose "
+#~ "VVAG(f) is a function with inputs "
+#~ "in the form (vargs[0], args[1], args[2],"
+#~ " ...), where vagrs[0] has one extra"
+#~ " dimension than args[0] in the first"
+#~ " axis and consistent with args[0] in"
+#~ " shape for remaining dimensions, i.e. "
+#~ "shape(vargs[0]) = [batch] + shape(args[0])."
+#~ " (We only cover cases where "
+#~ "``vectorized_argnums`` defaults to 0 here "
+#~ "for demonstration). VVAG(f) returns a "
+#~ "tuple as a value tensor with shape"
+#~ " [batch, 1] and a gradient tuple "
+#~ "with shape: ([batch]+shape(args[argnum]) for "
+#~ "argnum in argnums). The gradient for "
+#~ "argnums=k is defined as"
#~ msgstr ""
#~ msgid ""
-#~ "`mask`: Boolean input mask. If the "
-#~ "layer's `call()` method takes a `mask`"
-#~ " argument, its default value will be"
-#~ " set to the mask generated for "
-#~ "`inputs` by the previous layer (if "
-#~ "`input` did come from a layer that"
-#~ " generated a corresponding mask, i.e. "
-#~ "if it came from a Keras layer "
-#~ "with masking support)."
+#~ "g^k = \\frac{\\partial \\sum_{i\\in batch} "
+#~ "f(vargs[0][i], args[1], ...)}{\\partial args[k]}"
#~ msgstr ""
-#~ msgid "A tensor or list/tuple of tensors."
+#~ msgid "Therefore, if argnums=0, the gradient is reduced to"
#~ msgstr ""
-#~ msgid "Relevant classes for VQNHE"
+#~ msgid "g^0_i = \\frac{\\partial f(vargs[0][i])}{\\partial vargs[0][i]}"
#~ msgstr ""
#~ msgid ""
-#~ "Bases: "
-#~ ":py:class:`keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule`"
-#~ msgstr ""
-
-#~ msgid "Dense layer but with complex weights, used for building complex RBM"
+#~ ", which is specifically suitable for "
+#~ "batched VQE optimization, where args[0] "
+#~ "is the circuit parameters."
#~ msgstr ""
-#~ msgid "VQNHE"
+#~ msgid "And if argnums=1, the gradient is like"
#~ msgstr ""
-#~ msgid "[description]"
+#~ msgid ""
+#~ "g^1_i = \\frac{\\partial \\sum_j "
+#~ "f(vargs[0][j], args[1])}{\\partial args[1][i]}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "VQE"
+#~ msgid ""
+#~ ", which is suitable for quantum "
+#~ "machine learning scenarios, where ``f`` "
+#~ "is the loss function, args[0] "
+#~ "corresponds to the input data and "
+#~ "args[1] corresponds to the weights in"
+#~ " the QML model."
#~ msgstr ""
-#~ msgid "Backend register"
+#~ msgid ""
+#~ "the args to be vectorized, these "
+#~ "arguments should share the same batch"
+#~ " shape in the fist dimension"
#~ msgstr ""
-#~ msgid "Get the `tc.backend` object."
+#~ msgid ""
+#~ "Function that computes the dot product"
+#~ " between a vector v and the "
+#~ "Jacobian of the given function at "
+#~ "the point given by the inputs. "
+#~ "(reverse mode AD relevant) Strictly "
+#~ "speaking, this function is value_and_vjp."
#~ msgstr ""
-#~ msgid "\"numpy\", \"tensorflow\", \"jax\", \"pytorch\""
+#~ msgid "the function to carry out vjp calculation"
#~ msgstr ""
-#~ msgid "Raises"
+#~ msgid ""
+#~ "value vector or gradient from downstream"
+#~ " in reverse mode AD the same "
+#~ "shape as return of function ``f``"
#~ msgstr ""
-#~ msgid "Backend doesn't exist for `backend` argument."
+#~ msgid ""
+#~ "(``f(*inputs)``, vjp_tensor), where vjp_tensor "
+#~ "is the same shape as inputs"
#~ msgstr ""
-#~ msgid "The `tc.backend` object that with all registered universal functions."
+#~ msgid ""
+#~ "Return the vectorized map or batched "
+#~ "version of ``f`` on the first "
+#~ "extra axis. The general interface "
+#~ "supports ``f`` with multiple arguments "
+#~ "and broadcast in the fist dimension."
#~ msgstr ""
-#~ msgid "Backend magic inherited from tensornetwork: jax backend"
+#~ msgid "function to be broadcasted."
#~ msgstr ""
-#~ msgid "Bases: :py:class:`tensornetwork.backends.jax.jax_backend.JaxBackend`"
+#~ msgid "vmap version of ``f``"
#~ msgstr ""
#~ msgid ""
-#~ "See the original backend API at "
-#~ "``jax backend``. "
-#~ "`_"
+#~ "Return a zeros-matrix of dimension "
+#~ "`dim` Depending on specific backends, "
+#~ "`dim` has to be either an int "
+#~ "(numpy, torch, tensorflow) or a "
+#~ "`ShapeType` object (for block-sparse "
+#~ "backends)."
#~ msgstr ""
-#~ msgid "Returns the elementwise absolute value of tensor. Args:"
+#~ msgid "Block-sparse behavior is currently not supported Args:"
#~ msgstr ""
-#~ msgid "tensor: An input tensor."
+#~ msgid "Backend magic inherited from tensornetwork: numpy backend"
#~ msgstr ""
-#~ msgid "tensor: Its elementwise absolute value."
+#~ msgid ""
+#~ "Bases: "
+#~ ":py:class:`tensornetwork.backends.numpy.numpy_backend.NumPyBackend`"
#~ msgstr ""
-#~ msgid "Return the index of maximum of an array an axis."
+#~ msgid ""
+#~ "see the original backend API at "
+#~ "`numpy backend "
+#~ "`_"
#~ msgstr ""
-#~ msgid "[description], defaults to 0, different behavior from numpy defaults!"
+#~ msgid "Backend magic inherited from tensornetwork: pytorch backend"
#~ msgstr ""
-#~ msgid "Return the index of minimum of an array an axis."
+#~ msgid ""
+#~ "Bases: "
+#~ ":py:class:`tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend`"
#~ msgstr ""
-#~ msgid "Cast the tensor dtype of a ``a``."
+#~ msgid ""
+#~ "See the original backend API at "
+#~ "``pytorch backend``. "
+#~ "``_"
#~ msgstr ""
-#~ msgid "tensor"
+#~ msgid ""
+#~ "Note the functionality provided by "
+#~ "pytorch backend is incomplete, it "
+#~ "currenly lacks native efficicent jit and"
+#~ " vmap support."
#~ msgstr ""
-#~ msgid "\"float32\", \"float64\", \"complex64\", \"complex128\""
+#~ msgid "Backend magic inherited from tensornetwork: tensorflow backend"
#~ msgstr ""
-#~ msgid "``a`` of new dtype"
+#~ msgid ""
+#~ "Bases: "
+#~ ":py:class:`tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend`"
#~ msgstr ""
-#~ msgid "Join a sequence of arrays along an existing axis."
+#~ msgid ""
+#~ "See the original backend API at "
+#~ "`'tensorflow backend''. "
+#~ "`_"
#~ msgstr ""
-#~ msgid "[description], defaults to 0"
+#~ msgid "Some common noise quantum channels."
#~ msgstr ""
#~ msgid ""
-#~ "The native cond for XLA compiling, "
-#~ "wrapper for ``tf.cond`` and limited "
-#~ "functionality of ``jax.lax.cond``."
+#~ "Return an amplitude damping channel. "
+#~ "Notice: Amplitude damping corrspondings to "
+#~ "p = 1."
#~ msgstr ""
-#~ msgid "Convert a np.array or a tensor to a tensor type for the backend."
+#~ msgid ""
+#~ "\\sqrt{p}\n"
+#~ "\\begin{bmatrix}\n"
+#~ " 1 & 0\\\\\n"
+#~ " 0 & \\sqrt{1-\\gamma}\\\\\n"
+#~ "\\end{bmatrix}\\qquad\n"
+#~ "\\sqrt{p}\n"
+#~ "\\begin{bmatrix}\n"
+#~ " 0 & \\sqrt{\\gamma}\\\\\n"
+#~ " 0 & 0\\\\\n"
+#~ "\\end{bmatrix}\\qquad\n"
+#~ "\\sqrt{1-p}\n"
+#~ "\\begin{bmatrix}\n"
+#~ " \\sqrt{1-\\gamma} & 0\\\\\n"
+#~ " 0 & 1\\\\\n"
+#~ "\\end{bmatrix}\\qquad\n"
+#~ "\\sqrt{1-p}\n"
+#~ "\\begin{bmatrix}\n"
+#~ " 0 & 0\\\\\n"
+#~ " \\sqrt{\\gamma} & 0\\\\\n"
+#~ "\\end{bmatrix}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid ""
-#~ "Generate the coo format sparse matrix"
-#~ " from indices and values, which is"
-#~ " the only sparse format supported in"
-#~ " different ML backends."
+#~ msgid "the damping parameter of amplitude (:math:`\\gamma`)"
#~ msgstr ""
-#~ msgid "shape [n, 2] for n non zero values in the returned matrix"
+#~ msgid ":math:`p`"
#~ msgstr ""
-#~ msgid "shape [n]"
+#~ msgid "An amplitude damping channel with given :math:`\\gamma` and :math:`p`"
#~ msgstr ""
-#~ msgid "Tuple[int, ...]"
+#~ msgid "Return a Depolarizing Channel"
#~ msgstr ""
-#~ msgid "Return the expm of ``a``, matrix exponential."
+#~ msgid ""
+#~ "\\sqrt{1-p_x-p_y-p_z}\n"
+#~ "\\begin{bmatrix}\n"
+#~ " 1 & 0\\\\\n"
+#~ " 0 & 1\\\\\n"
+#~ "\\end{bmatrix}\\qquad\n"
+#~ "\\sqrt{p_x}\n"
+#~ "\\begin{bmatrix}\n"
+#~ " 0 & 1\\\\\n"
+#~ " 1 & 0\\\\\n"
+#~ "\\end{bmatrix}\\qquad\n"
+#~ "\\sqrt{p_y}\n"
+#~ "\\begin{bmatrix}\n"
+#~ " 0 & -1j\\\\\n"
+#~ " 1j & 0\\\\\n"
+#~ "\\end{bmatrix}\\qquad\n"
+#~ "\\sqrt{p_z}\n"
+#~ "\\begin{bmatrix}\n"
+#~ " 1 & 0\\\\\n"
+#~ " 0 & -1\\\\\n"
+#~ "\\end{bmatrix}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "tensor in matrix form"
+#~ msgid ":math:`p_x`"
#~ msgstr ""
-#~ msgid "matrix exponential of matrix ``a``"
+#~ msgid ":math:`p_y`"
#~ msgstr ""
-#~ msgid "Return the cosine of a tensor ``a``."
+#~ msgid ":math:`p_z`"
#~ msgstr ""
-#~ msgid "cosine of ``a``"
+#~ msgid "Sequences of Gates"
#~ msgstr ""
-#~ msgid "Return the cumulative sum of the elements along a given axis."
+#~ msgid "Convert Kraus operators to one Tensor (as one Super Gate)."
#~ msgstr ""
#~ msgid ""
-#~ "The default behavior is the same "
-#~ "as numpy, different from tf/torch as "
-#~ "cumsum of the flatten 1D array, "
-#~ "defaults to None"
+#~ "\\sum_{k}^{} K_k \\otimes K_k^{\\dagger}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "Return the copy of tensor ''a''."
+#~ msgid "A sequence of Gate"
#~ msgstr ""
-#~ msgid "Return an identity matrix of dimension `dim`"
+#~ msgid "The corresponding Tensor of the list of Kraus operators"
#~ msgstr ""
-#~ msgid ""
-#~ "Depending on specific backends, `dim` "
-#~ "has to be either an int (numpy,"
-#~ " torch, tensorflow) or a `ShapeType` "
-#~ "object (for block-sparse backends). "
-#~ "Block-sparse behavior is currently not "
-#~ "supported"
+#~ msgid "Return a phase damping channel with given :math:`\\gamma`"
#~ msgstr ""
#~ msgid ""
-#~ "N (int): The dimension of the "
-#~ "returned matrix. dtype: The dtype of "
-#~ "the returned matrix. M (int): The "
-#~ "dimension of the returned matrix."
+#~ "\\begin{bmatrix}\n"
+#~ " 1 & 0\\\\\n"
+#~ " 0 & \\sqrt{1-\\gamma}\\\\\n"
+#~ "\\end{bmatrix}\\qquad\n"
+#~ "\\begin{bmatrix}\n"
+#~ " 0 & 0\\\\\n"
+#~ " 0 & \\sqrt{\\gamma}\\\\\n"
+#~ "\\end{bmatrix}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "Return the function which is the grad function of input ``f``."
+#~ msgid "The damping parameter of phase (:math:`\\gamma`)"
#~ msgstr ""
-#~ msgid "Example"
+#~ msgid "A phase damping channel with given :math:`\\gamma`"
#~ msgstr ""
-#~ msgid "the function to be differentiated"
+#~ msgid "Reset channel"
#~ msgstr ""
#~ msgid ""
-#~ "the position of args in ``f`` that"
-#~ " are to be differentiated, defaults "
-#~ "to be 0"
+#~ "\\begin{bmatrix}\n"
+#~ " 1 & 0\\\\\n"
+#~ " 0 & 0\\\\\n"
+#~ "\\end{bmatrix}\\qquad\n"
+#~ "\\begin{bmatrix}\n"
+#~ " 0 & 1\\\\\n"
+#~ " 0 & 0\\\\\n"
+#~ "\\end{bmatrix}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "the grad function of ``f`` with the same set of arguments as ``f``"
+#~ msgid "Check identity of a single qubit Kraus operators."
#~ msgstr ""
-#~ msgid "Return 1.j in as a tensor compatible with the backend."
+#~ msgid "Examples:"
#~ msgstr ""
-#~ msgid "\"complex64\" or \"complex128\""
+#~ msgid ""
+#~ "\\sum_{k}^{} K_k^{\\dagger} K_k = I\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "1.j tensor"
+#~ msgid "List of Kraus operators."
#~ msgstr ""
-#~ msgid "Return the elementwise imaginary value of a tensor ``a``."
+#~ msgid "Quantum circuit: state simulator"
#~ msgstr ""
-#~ msgid "imaginary value of ``a``"
+#~ msgid "``Circuit`` class. Simple usage demo below."
#~ msgstr ""
-#~ msgid "[summary]"
+#~ msgid "Apply any gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "The possible options"
+#~ msgid "Qubit number than the gate applies on."
#~ msgstr ""
-#~ msgid "Sampling output shape"
+#~ msgid "Parameters for the gate"
#~ msgstr ""
-#~ msgid ""
-#~ "probability for each option in a, "
-#~ "defaults to None, as equal probability"
-#~ " distribution"
+#~ msgid "Apply cnot gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Call the random normal function with "
-#~ "the random state management behind the"
-#~ " scene."
-#~ msgstr ""
-
-#~ msgid "[description], defaults to 1"
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 1.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 1.+0.j & 0.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "[description], defaults to \"32\""
+#~ msgid "Qubit number than the gate applies on. The matrix for the gate is"
#~ msgstr ""
-#~ msgid "Determine whether the type of input ``a`` is ``sparse``."
+#~ msgid ""
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j & 0.+0.j\\\\ 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 1.+0.j\\\\ "
+#~ "0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j"
+#~ " \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "input matrix ``a``"
+#~ msgid "Apply cr gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "a bool indicating whether the matrix ``a`` is sparse"
+#~ msgid "Apply crx gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "Return a boolean on whether ``a`` is a tensor in backend package."
+#~ msgid "Apply cry gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "a tensor to be determined"
+#~ msgid "Apply crz gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "whether ``a`` is a tensor"
+#~ msgid "Apply cy gate on the circuit."
#~ msgstr ""
-#~ msgid "Return the jitted version of function ``f``."
+#~ msgid ""
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.-1.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+1.j & 0.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "function to be jitted"
+#~ msgid ""
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j & 0.+0.j\\\\ 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 0.-1.j\\\\ "
+#~ "0.+0.j & 0.+0.j & 0.+1.j & 0.+0.j"
+#~ " \\end{bmatrix}"
#~ msgstr ""
-#~ msgid ""
-#~ "index of args that doesn't regarded "
-#~ "as tensor, only work for jax "
-#~ "backend"
+#~ msgid "Apply cz gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "whether open XLA compilation, only works"
-#~ " for tensorflow backend, defaults False "
-#~ "since several ops has no XLA "
-#~ "correspondence"
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & -1.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "jitted version of ``f``"
+#~ msgid ""
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j & 0.+0.j\\\\ 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & -1.+0.j"
+#~ " \\end{bmatrix}"
#~ msgstr ""
-#~ msgid ""
-#~ "Function that computes a (forward-mode)"
-#~ " Jacobian-vector product of ``f``. "
-#~ "Strictly speaking, this function is "
-#~ "value_and_jvp."
+#~ msgid "Apply exp gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "The function to compute jvp"
+#~ msgid "Apply exp1 gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "input for ``f``"
+#~ msgid "Apply h gate on the circuit."
#~ msgstr ""
-#~ msgid "tangents"
+#~ msgid ""
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} "
+#~ "0.70710677+0.j & 0.70710677+0.j\\\\ "
+#~ "0.70710677+0.j & -0.70710677+0.j \\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "(``f(*inputs)``, jvp_tensor), where jvp_tensor "
-#~ "is the same shape as the output"
-#~ " of ``f``"
+#~ "\\begin{bmatrix} 0.70710677+0.j & "
+#~ "0.70710677+0.j\\\\ 0.70710677+0.j & "
+#~ "-0.70710677+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Return the kronecker product of two matrices ``a`` and ``b``."
+#~ msgid "Apply i gate on the circuit."
#~ msgstr ""
-#~ msgid "kronecker product of ``a`` and ``b``"
+#~ msgid ""
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j\\\\ 0.+0.j & 1.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Return the maximum of an array or maximum along an axis."
+#~ msgid ""
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "[description], defaults to None"
+#~ msgid "Apply iswap gate on the circuit."
#~ msgstr ""
-#~ msgid "Return the minimum of an array or minimum along an axis."
+#~ msgid ""
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 0.+0.j & 0.+1.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+1.j & 0.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "Return the numpy array of a tensor"
-#~ " ``a``, but may not work in a"
-#~ " jitted function."
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j"
+#~ " & 0.+1.j & 0.+0.j\\\\ 0.+0.j &"
+#~ " 0.+1.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "numpy array of ``a``"
+#~ msgid "Apply r gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "One-hot encodes the given ``a``. "
-#~ "Each index in the input ``a`` is"
-#~ " encoded as a vector of zeros "
-#~ "of length ``num`` with the element "
-#~ "at index set to one:"
+#~ msgid "Apply rx gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "input tensor"
+#~ msgid "Apply ry gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "number of features in onehot dimension"
+#~ msgid "Apply rz gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "onehot tensor with the last extra dimension"
+#~ msgid "Apply s gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Return an ones-matrix of dimension "
-#~ "`dim` Depending on specific backends, "
-#~ "`dim` has to be either an int "
-#~ "(numpy, torch, tensorflow) or a "
-#~ "`ShapeType` object (for block-sparse "
-#~ "backends). Block-sparse behavior is "
-#~ "currently not supported Args:"
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j\\\\ 0.+0.j & 0.+1.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "shape (int): The dimension of the "
-#~ "returned matrix. dtype: The dtype of "
-#~ "the returned matrix."
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 0.+1.j \\end{bmatrix}"
+#~ msgstr ""
+
+#~ msgid "Apply sd gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "A jax like split API, but it "
-#~ "doesn't split the key generator for "
-#~ "other backends. It is just for a"
-#~ " consistent interface of random code; "
-#~ "make sure you know what the "
-#~ "function actually does. This function is"
-#~ " mainly a utility to write backend"
-#~ " agnostic code instead of doing magic"
-#~ " things."
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j\\\\ 0.+0.j & 0.-1.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Return the elementwise real value of a tensor ``a``."
+#~ msgid ""
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 0.-1.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "real value of ``a``"
+#~ msgid "Apply swap gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Rectified linear unit activation function. "
-#~ "Computes the element-wise function:"
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 0.+0.j & 1.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "\\mathrm{relu}(x)=\\max(x,0)"
+#~ msgid ""
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j"
+#~ " & 1.+0.j & 0.+0.j\\\\ 0.+0.j &"
+#~ " 1.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Input tensor"
+#~ msgid "Apply t gate on the circuit."
#~ msgstr ""
-#~ msgid "Tensor after relu"
+#~ msgid ""
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1. &"
+#~ " +0.j & 0. & +0.j\\\\ 0. &"
+#~ " +0.j & 0.70710677+0.70710677j \\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "Roughly equivalent to operand[indices] = "
-#~ "updates, indices only support shape with"
-#~ " rank 2 for now."
+#~ "\\begin{bmatrix} 1. & +0.j & 0. "
+#~ "& +0.j\\\\ 0. & +0.j & "
+#~ "0.70710677+0.70710677j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Set the random state attached to the backend."
+#~ msgid "Apply td gate on the circuit."
#~ msgstr ""
-#~ msgid "the random seed, defaults to be None"
+#~ msgid ""
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1. &"
+#~ " +0.j & 0. & +0.j\\\\ 0. &"
+#~ " +0.j & 0.70710677-0.70710677j \\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "If set to be true, only get "
-#~ "the random state in return instead "
-#~ "of setting the state on the "
-#~ "backend"
+#~ "\\begin{bmatrix} 1. & +0.j & 0. "
+#~ "& +0.j\\\\ 0. & +0.j & "
+#~ "0.70710677-0.70710677j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Return the elementwise sine of a tensor ``a``."
+#~ msgid "Apply toffoli gate on the circuit."
#~ msgstr ""
-#~ msgid "sine of ``a``"
+#~ msgid ""
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
+#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j\\\\ 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 1.+0.j & 0.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Return the total number of elements in ``a`` in tensor form."
+#~ msgid ""
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j\\\\ 0.+0.j &"
+#~ " 1.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
+#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "1.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 1.+0.j & 0.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "the total number of elements in ``a``"
+#~ msgid "Apply wroot gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Softmax function. Computes the function "
-#~ "which rescales elements to the range "
-#~ "[0,1] such that the elements along "
-#~ "axis sum to 1."
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} "
+#~ "0.70710677+0.j & -0.5 & -0.5j\\\\ 0.5"
+#~ " & -0.5j & 0.70710677+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "\\mathrm{softmax}(x) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}"
+#~ msgid ""
+#~ "\\begin{bmatrix} 0.70710677+0.j & -0.5 &"
+#~ " -0.5j\\\\ 0.5 & -0.5j & "
+#~ "0.70710677+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Tensor"
+#~ msgid "Apply x gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "A dimension along which Softmax will "
-#~ "be computed , defaults to None for"
-#~ " all axis sum."
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 0.+0.j "
+#~ "& 1.+0.j\\\\ 1.+0.j & 0.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "concatenated tensor"
+#~ msgid ""
+#~ "\\begin{bmatrix} 0.+0.j & 1.+0.j\\\\ "
+#~ "1.+0.j & 0.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Solve the linear system Ax=b and return the solution x."
+#~ msgid "Apply y gate on the circuit."
#~ msgstr ""
-#~ msgid "The multiplied matrix."
+#~ msgid ""
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 0.+0.j "
+#~ "& 0.-1.j\\\\ 0.+1.j & 0.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "The resulted matrix."
+#~ msgid ""
+#~ "\\begin{bmatrix} 0.+0.j & 0.-1.j\\\\ "
+#~ "0.+1.j & 0.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "The solution of the linear system."
+#~ msgid "Apply z gate on the circuit."
#~ msgstr ""
-#~ msgid "A sparse matrix multiplies a dense matrix."
+#~ msgid ""
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j\\\\ 0.+0.j & -1.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "a sparse matrix"
+#~ msgid ""
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & -1.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "a dense matrix"
+#~ msgid "Circuit object based on state simulator."
#~ msgstr ""
-#~ msgid "dense matrix"
+#~ msgid "The number of qubits in the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Concatenates a sequence of tensors ``a``"
-#~ " along a new dimension ``axis``."
+#~ "If not None, the initial state of"
+#~ " the circuit is taken as ``inputs``"
+#~ " instead of :math:`\\vert 0\\rangle^n` "
+#~ "qubits, defaults to None"
#~ msgstr ""
-#~ msgid "List of tensors in the same shape"
+#~ msgid "(Nodes, dangling Edges) for a MPS like initial wavefunction"
#~ msgstr ""
-#~ msgid "the stack axis, defaults to 0"
+#~ msgid ""
+#~ "dict if two qubit gate is ready"
+#~ " for split, including parameters for "
+#~ "at least one of ``max_singular_values`` "
+#~ "and ``max_truncation_err``."
#~ msgstr ""
-#~ msgid "stateful register for each package"
+#~ msgid ""
+#~ "Monte Carlo trajectory simulation of "
+#~ "general Kraus channel whose Kraus "
+#~ "operators cannot be amplified to unitary"
+#~ " operators. For unitary operators composed"
+#~ " Kraus channel, :py:meth:`unitary_kraus` is "
+#~ "much faster."
#~ msgstr ""
-#~ msgid "shape of output sampling tensor"
+#~ msgid ""
+#~ "This function is jittable in theory. "
+#~ "But only jax+GPU combination is "
+#~ "recommended for jit since the graph "
+#~ "building time is too long for "
+#~ "other backend options; though the "
+#~ "running time of the function is "
+#~ "very fast for every case."
#~ msgstr ""
-#~ msgid "only real data type is supported, \"32\" or \"64\", defaults to \"32\""
+#~ msgid "list of ``tn.Node`` for Kraus operators"
#~ msgstr ""
-#~ msgid "Uniform random sampler from ``low`` to ``high``."
+#~ msgid "the qubits index that Kraus channel is applied on"
#~ msgstr ""
-#~ msgid "shape of output sampling tensor, defaults to 1"
+#~ msgid ""
+#~ "random tensor between 0 or 1, "
+#~ "defaults to be None, the random "
+#~ "number will be generated automatically"
#~ msgstr ""
-#~ msgid "Stop backpropagation from ``a``."
+#~ msgid "Compute the expectation of corresponding operators."
#~ msgstr ""
-#~ msgid "``branches[index]()``"
+#~ msgid ""
+#~ "operator and its position on the "
+#~ "circuit, eg. ``(tc.gates.z(), [1, ]), "
+#~ "(tc.gates.x(), [2, ])`` is for operator"
+#~ " :math:`Z_1X_2`"
#~ msgstr ""
-#~ msgid "Constructs a tensor by tiling a given tensor."
+#~ msgid ""
+#~ "if True, then the wavefunction tensor"
+#~ " is cached for further expectation "
+#~ "evaluation, defaults to be true"
#~ msgstr ""
-#~ msgid "1d tensor with length the same as the rank of ``a``"
+#~ msgid "Tensor with one element"
#~ msgstr ""
-#~ msgid "Convert a sparse matrix to dense tensor."
+#~ msgid "[WIP], check whether the circuit is legal."
#~ msgstr ""
-#~ msgid "the resulted dense matrix"
+#~ msgid "the bool indicating whether the circuit is legal"
#~ msgstr ""
-#~ msgid ""
-#~ "Find the unique elements and their "
-#~ "corresponding counts of the given tensor"
-#~ " ``a``."
+#~ msgid "Take measurement to the given quantum lines."
#~ msgstr ""
-#~ msgid "Unique elements, corresponding counts"
+#~ msgid "measure on which quantum line"
#~ msgstr ""
-#~ msgid "Return the function which returns the value and grad of ``f``."
+#~ msgid "if true, theoretical probability is also returned"
#~ msgstr ""
#~ msgid ""
-#~ "the value and grad function of "
-#~ "``f`` with the same set of "
-#~ "arguments as ``f``"
+#~ "Middle measurement in z-basis on the "
+#~ "circuit, note the wavefunction output is"
+#~ " not normalized with ``mid_measurement`` "
+#~ "involved, one should normalize the state"
+#~ " manually if needed."
#~ msgstr ""
-#~ msgid ""
-#~ "Return the VVAG function of ``f``. "
-#~ "The inputs for ``f`` is (args[0], "
-#~ "args[1], args[2], ...), and the output"
-#~ " of ``f`` is a scalar. Suppose "
-#~ "VVAG(f) is a function with inputs "
-#~ "in the form (vargs[0], args[1], args[2],"
-#~ " ...), where vagrs[0] has one extra"
-#~ " dimension than args[0] in the first"
-#~ " axis and consistent with args[0] in"
-#~ " shape for remaining dimensions, i.e. "
-#~ "shape(vargs[0]) = [batch] + shape(args[0])."
-#~ " (We only cover cases where "
-#~ "``vectorized_argnums`` defaults to 0 here "
-#~ "for demonstration). VVAG(f) returns a "
-#~ "tuple as a value tensor with shape"
-#~ " [batch, 1] and a gradient tuple "
-#~ "with shape: ([batch]+shape(args[argnum]) for "
-#~ "argnum in argnums). The gradient for "
-#~ "argnums=k is defined as"
+#~ msgid "the index of qubit that the Z direction postselection applied on"
#~ msgstr ""
-#~ msgid ""
-#~ "g^k = \\frac{\\partial \\sum_{i\\in batch} "
-#~ "f(vargs[0][i], args[1], ...)}{\\partial args[k]}"
+#~ msgid "0 for spin up, 1 for spin down, defaults to be 0"
#~ msgstr ""
-#~ msgid "Therefore, if argnums=0, the gradient is reduced to"
+#~ msgid "Reference: arXiv:1201.3974."
#~ msgstr ""
-#~ msgid "g^0_i = \\frac{\\partial f(vargs[0][i])}{\\partial vargs[0][i]}"
+#~ msgid "sampled bit string and the corresponding theoretical probability"
#~ msgstr ""
-#~ msgid ""
-#~ ", which is specifically suitable for "
-#~ "batched VQE optimization, where args[0] "
-#~ "is the circuit parameters."
+#~ msgid "Replace the input state with the circuit structure unchanged."
#~ msgstr ""
-#~ msgid "And if argnums=1, the gradient is like"
+#~ msgid "Input wavefunction."
#~ msgstr ""
#~ msgid ""
-#~ "g^1_i = \\frac{\\partial \\sum_j "
-#~ "f(vargs[0][j], args[1])}{\\partial args[1][i]}\n"
-#~ "\n"
+#~ "Replace the input state in MPS "
+#~ "representation while keep the circuit "
+#~ "structure unchanged."
#~ msgstr ""
-#~ msgid ""
-#~ ", which is suitable for quantum "
-#~ "machine learning scenarios, where ``f`` "
-#~ "is the loss function, args[0] "
-#~ "corresponds to the input data and "
-#~ "args[1] corresponds to the weights in"
-#~ " the QML model."
+#~ msgid "Compute the output wavefunction from the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "the args to be vectorized, these "
-#~ "arguments should share the same batch"
-#~ " shape in the fist dimension"
+#~ msgid "the str indicating the form of the output wavefunction"
#~ msgstr ""
-#~ msgid ""
-#~ "Function that computes the dot product"
-#~ " between a vector v and the "
-#~ "Jacobian of the given function at "
-#~ "the point given by the inputs. "
-#~ "(reverse mode AD relevant) Strictly "
-#~ "speaking, this function is value_and_vjp."
+#~ msgid "Tensor with the corresponding shape"
#~ msgstr ""
-#~ msgid "the function to carry out vjp calculation"
+#~ msgid "Compute :math:`\\langle bra\\vert ops \\vert ket\\rangle`"
#~ msgstr ""
-#~ msgid ""
-#~ "value vector or gradient from downstream"
-#~ " in reverse mode AD the same "
-#~ "shape as return of function ``f``"
+#~ msgid "Example 1 (:math:`bra` is same as :math:`ket`)"
#~ msgstr ""
-#~ msgid ""
-#~ "(``f(*inputs)``, vjp_tensor), where vjp_tensor "
-#~ "is the same shape as inputs"
+#~ msgid "Example 2 (:math:`bra` is different from :math:`ket`)"
#~ msgstr ""
-#~ msgid ""
-#~ "Return the vectorized map or batched "
-#~ "version of ``f`` on the first "
-#~ "extra axis. The general interface "
-#~ "supports ``f`` with multiple arguments "
-#~ "and broadcast in the fist dimension."
+#~ msgid "[description], defaults to None, which is the same as ``ket``"
#~ msgstr ""
-#~ msgid "function to be broadcasted."
+#~ msgid "[description], defaults to True"
#~ msgstr ""
-#~ msgid "vmap version of ``f``"
+#~ msgid "[description], defaults to False"
#~ msgstr ""
#~ msgid ""
-#~ "Return a zeros-matrix of dimension "
-#~ "`dim` Depending on specific backends, "
-#~ "`dim` has to be either an int "
-#~ "(numpy, torch, tensorflow) or a "
-#~ "`ShapeType` object (for block-sparse "
-#~ "backends)."
-#~ msgstr ""
-
-#~ msgid "Block-sparse behavior is currently not supported Args:"
-#~ msgstr ""
-
-#~ msgid "Backend magic inherited from tensornetwork: numpy backend"
+#~ "Not an ideal visualization for quantum"
+#~ " circuit, but reserve here as a "
+#~ "general approch to show tensornetwork "
+#~ "[Deperacted, use ``qir2tex instead``]"
#~ msgstr ""
-#~ msgid ""
-#~ "Bases: "
-#~ ":py:class:`tensornetwork.backends.numpy.numpy_backend.NumPyBackend`"
+#~ msgid "Constants and setups"
#~ msgstr ""
#~ msgid ""
-#~ "see the original backend API at "
-#~ "`numpy backend "
-#~ "`_"
-#~ msgstr ""
-
-#~ msgid "Backend magic inherited from tensornetwork: pytorch backend"
+#~ "To set runtime contractor of the "
+#~ "tensornetwork for a better contraction "
+#~ "path."
#~ msgstr ""
#~ msgid ""
-#~ "Bases: "
-#~ ":py:class:`tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend`"
+#~ "\"auto\", \"greedy\", \"branch\", \"plain\", "
+#~ "\"tng\", \"custom\", \"custom_stateful\". defaults"
+#~ " to None (\"auto\")"
#~ msgstr ""
-#~ msgid ""
-#~ "See the original backend API at "
-#~ "``pytorch backend``. "
-#~ "``_"
+#~ msgid "Valid for \"custom\" or \"custom_stateful\" as method, defaults to None"
#~ msgstr ""
#~ msgid ""
-#~ "Note the functionality provided by "
-#~ "pytorch backend is incomplete, it "
-#~ "currenly lacks native efficicent jit and"
-#~ " vmap support."
+#~ "It is not very useful, as "
+#~ "``memory_limit`` leads to ``branch`` "
+#~ "contraction instead of ``greedy`` which "
+#~ "is rather slow, defaults to None"
#~ msgstr ""
-#~ msgid "Backend magic inherited from tensornetwork: tensorflow backend"
+#~ msgid "Tensornetwork version is too low to support some of the contractors."
#~ msgstr ""
-#~ msgid ""
-#~ "Bases: "
-#~ ":py:class:`tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend`"
+#~ msgid "Unknown method options."
#~ msgstr ""
-#~ msgid ""
-#~ "See the original backend API at "
-#~ "`'tensorflow backend''. "
-#~ "`_"
+#~ msgid "The new tensornetwork with its contractor set."
#~ msgstr ""
-#~ msgid "Some common noise quantum channels."
+#~ msgid "To set the runtime numerical dtype of tensors."
#~ msgstr ""
#~ msgid ""
-#~ "Return an amplitude damping channel. "
-#~ "Notice: Amplitude damping corrspondings to "
-#~ "p = 1."
+#~ "\"complex64\" or \"complex128\", defaults to"
+#~ " None, which is equivalent to "
+#~ "\"complex64\"."
#~ msgstr ""
-#~ msgid ""
-#~ "\\sqrt{p}\n"
-#~ "\\begin{bmatrix}\n"
-#~ " 1 & 0\\\\\n"
-#~ " 0 & \\sqrt{1-\\gamma}\\\\\n"
-#~ "\\end{bmatrix}\\qquad\n"
-#~ "\\sqrt{p}\n"
-#~ "\\begin{bmatrix}\n"
-#~ " 0 & \\sqrt{\\gamma}\\\\\n"
-#~ " 0 & 0\\\\\n"
-#~ "\\end{bmatrix}\\qquad\n"
-#~ "\\sqrt{1-p}\n"
-#~ "\\begin{bmatrix}\n"
-#~ " \\sqrt{1-\\gamma} & 0\\\\\n"
-#~ " 0 & 1\\\\\n"
-#~ "\\end{bmatrix}\\qquad\n"
-#~ "\\sqrt{1-p}\n"
-#~ "\\begin{bmatrix}\n"
-#~ " 0 & 0\\\\\n"
-#~ " \\sqrt{\\gamma} & 0\\\\\n"
-#~ "\\end{bmatrix}\n"
-#~ "\n"
+#~ msgid "The naive state-vector simulator contraction path."
#~ msgstr ""
-#~ msgid "the damping parameter of amplitude (:math:`\\gamma`)"
+#~ msgid "The list of ``tn.Node``."
#~ msgstr ""
-#~ msgid ":math:`p`"
+#~ msgid "The list of dangling node edges, defaults to be None."
#~ msgstr ""
-#~ msgid "An amplitude damping channel with given :math:`\\gamma` and :math:`p`"
+#~ msgid "The ``tn.Node`` after contraction"
#~ msgstr ""
-#~ msgid "Return a Depolarizing Channel"
+#~ msgid "To set the runtime backend of tensorcircuit."
#~ msgstr ""
#~ msgid ""
-#~ "\\sqrt{1-p_x-p_y-p_z}\n"
-#~ "\\begin{bmatrix}\n"
-#~ " 1 & 0\\\\\n"
-#~ " 0 & 1\\\\\n"
-#~ "\\end{bmatrix}\\qquad\n"
-#~ "\\sqrt{p_x}\n"
-#~ "\\begin{bmatrix}\n"
-#~ " 0 & 1\\\\\n"
-#~ " 1 & 0\\\\\n"
-#~ "\\end{bmatrix}\\qquad\n"
-#~ "\\sqrt{p_y}\n"
-#~ "\\begin{bmatrix}\n"
-#~ " 0 & -1j\\\\\n"
-#~ " 1j & 0\\\\\n"
-#~ "\\end{bmatrix}\\qquad\n"
-#~ "\\sqrt{p_z}\n"
-#~ "\\begin{bmatrix}\n"
-#~ " 1 & 0\\\\\n"
-#~ " 0 & -1\\\\\n"
-#~ "\\end{bmatrix}\n"
-#~ "\n"
+#~ "Note: ``tc.set_backend`` and "
+#~ "``tc.cons.set_tensornetwork_backend`` are the same."
#~ msgstr ""
-#~ msgid ":math:`p_x`"
+#~ msgid ""
+#~ "\"numpy\", \"tensorflow\", \"jax\", \"pytorch\". "
+#~ "defaults to None, which gives the "
+#~ "same behavior as "
+#~ "``tensornetwork.backend_contextmanager.get_default_backend()``."
#~ msgstr ""
-#~ msgid ":math:`p_y`"
+#~ msgid "Whether the object should be set as global."
#~ msgstr ""
-#~ msgid ":math:`p_z`"
+#~ msgid "Quantum circuit class but with density matrix simulator"
#~ msgstr ""
-#~ msgid "Sequences of Gates"
+#~ msgid "Quantum circuit class but with density matrix simulator: v2"
#~ msgstr ""
-#~ msgid "Convert Kraus operators to one Tensor (as one Super Gate)."
+#~ msgid "Bases: :py:class:`tensorcircuit.densitymatrix.DMCircuit`"
#~ msgstr ""
-#~ msgid ""
-#~ "\\sum_{k}^{} K_k \\otimes K_k^{\\dagger}\n"
-#~ "\n"
+#~ msgid "Experimental features"
#~ msgstr ""
-#~ msgid "A sequence of Gate"
+#~ msgid ""
+#~ "Declarations of single-qubit and two-"
+#~ "qubit gates and their corresponding "
+#~ "matrix."
#~ msgstr ""
-#~ msgid "The corresponding Tensor of the list of Kraus operators"
+#~ msgid "Bases: :py:class:`tensornetwork.network_components.Node`"
#~ msgstr ""
-#~ msgid "Return a phase damping channel with given :math:`\\gamma`"
+#~ msgid "Wrapper of tn.Node, quantum gate"
#~ msgstr ""
-#~ msgid ""
-#~ "\\begin{bmatrix}\n"
-#~ " 1 & 0\\\\\n"
-#~ " 0 & \\sqrt{1-\\gamma}\\\\\n"
-#~ "\\end{bmatrix}\\qquad\n"
-#~ "\\begin{bmatrix}\n"
-#~ " 0 & 0\\\\\n"
-#~ " 0 & \\sqrt{\\gamma}\\\\\n"
-#~ "\\end{bmatrix}\n"
-#~ "\n"
+#~ msgid "Bases: :py:class:`tensorcircuit.gates.GateF`"
#~ msgstr ""
-#~ msgid "The damping parameter of phase (:math:`\\gamma`)"
+#~ msgid "Note one should provide the gate with properly reshaped."
#~ msgstr ""
-#~ msgid "A phase damping channel with given :math:`\\gamma`"
+#~ msgid "corresponding gate"
#~ msgstr ""
-#~ msgid "Reset channel"
+#~ msgid "The name of the gate."
#~ msgstr ""
-#~ msgid ""
-#~ "\\begin{bmatrix}\n"
-#~ " 1 & 0\\\\\n"
-#~ " 0 & 0\\\\\n"
-#~ "\\end{bmatrix}\\qquad\n"
-#~ "\\begin{bmatrix}\n"
-#~ " 0 & 1\\\\\n"
-#~ " 0 & 0\\\\\n"
-#~ "\\end{bmatrix}\n"
-#~ "\n"
+#~ msgid "the resulted gate"
#~ msgstr ""
-#~ msgid "Check identity of a single qubit Kraus operators."
+#~ msgid "Convert the inputs to Tensor with specified dtype."
#~ msgstr ""
-#~ msgid "Examples:"
+#~ msgid "inputs"
#~ msgstr ""
-#~ msgid ""
-#~ "\\sum_{k}^{} K_k^{\\dagger} K_k = I\n"
-#~ "\n"
+#~ msgid "dtype of the output Tensors"
#~ msgstr ""
-#~ msgid "List of Kraus operators."
+#~ msgid "List of Tensors"
#~ msgstr ""
-#~ msgid "Quantum circuit: state simulator"
+#~ msgid "Returns a LaTeX bmatrix."
#~ msgstr ""
-#~ msgid "``Circuit`` class. Simple usage demo below."
+#~ msgid "Formatted Display:"
#~ msgstr ""
-#~ msgid "Apply any gate with parameters on the circuit."
+#~ msgid ""
+#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 1.+0.j \\end{bmatrix}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "Qubit number than the gate applies on."
+#~ msgid "2D numpy array"
#~ msgstr ""
-#~ msgid "Parameters for the gate"
+#~ msgid "ValueError(\"bmatrix can at most display two dimensions\")"
#~ msgstr ""
-#~ msgid "Apply cnot gate on the circuit."
+#~ msgid "latex str for bmatrix of array a"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 1.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 1.+0.j & 0.+0.j \\end{bmatrix}"
+#~ "Controlled rotation gate, when the "
+#~ "control bit is 1, `rgate` is "
+#~ "applied on the target gate."
#~ msgstr ""
-#~ msgid "Qubit number than the gate applies on. The matrix for the gate is"
+#~ msgid "angle in radians"
#~ msgstr ""
-#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j & 0.+0.j\\\\ 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 1.+0.j\\\\ "
-#~ "0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j"
-#~ " \\end{bmatrix}"
+#~ msgid "CR Gate"
#~ msgstr ""
-#~ msgid "Apply cr gate with parameters on the circuit."
+#~ msgid ""
+#~ "Faster exponential gate, directly implemented"
+#~ " based on RHS, only work when: "
+#~ ":math:`U^2` is identity matrix."
#~ msgstr ""
-#~ msgid "Apply crx gate with parameters on the circuit."
+#~ msgid ""
+#~ "\\rm{exp}(U) &= e^{-i \\theta U} \\\\\n"
+#~ " &= \\cos(\\theta) I - j \\sin(\\theta) U \\\\\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "Apply cry gate with parameters on the circuit."
+#~ msgid "input unitary (U)"
#~ msgstr ""
-#~ msgid "Apply crz gate with parameters on the circuit."
+#~ msgid "suffix of Gate name"
#~ msgstr ""
-#~ msgid "Apply cy gate on the circuit."
+#~ msgid "Exponential Gate"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.-1.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+1.j & 0.+0.j \\end{bmatrix}"
+#~ msgid "Exponential gate."
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j & 0.+0.j\\\\ 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 0.-1.j\\\\ "
-#~ "0.+0.j & 0.+0.j & 0.+1.j & 0.+0.j"
-#~ " \\end{bmatrix}"
-#~ msgstr ""
-
-#~ msgid "Apply cz gate on the circuit."
+#~ "\\rm{exp}(U) = e^{-i \\theta U}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & -1.+0.j \\end{bmatrix}"
+#~ msgid "iSwap gate."
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j & 0.+0.j\\\\ 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & -1.+0.j"
-#~ " \\end{bmatrix}"
+#~ "iSwap(\\theta) =\n"
+#~ "\\begin{pmatrix}\n"
+#~ " 1 & 0 & 0 & 0\\\\\n"
+#~ " 0 & \\cos(\\frac{\\pi}{2} \\theta )"
+#~ " & j \\sin(\\frac{\\pi}{2} \\theta ) "
+#~ "& 0\\\\\n"
+#~ " 0 & j \\sin(\\frac{\\pi}{2} \\theta"
+#~ " ) & \\cos(\\frac{\\pi}{2} \\theta ) "
+#~ "& 0\\\\\n"
+#~ " 0 & 0 & 0 & 1\\\\\n"
+#~ "\\end{pmatrix}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "Apply exp gate with parameters on the circuit."
+#~ msgid "iSwap Gate"
#~ msgstr ""
-#~ msgid "Apply exp1 gate with parameters on the circuit."
+#~ msgid "Convert Gate to numpy array."
#~ msgstr ""
-#~ msgid "Apply h gate on the circuit."
+#~ msgid "input Gate"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} "
-#~ "0.70710677+0.j & 0.70710677+0.j\\\\ "
-#~ "0.70710677+0.j & -0.70710677+0.j \\end{bmatrix}"
+#~ msgid "corresponding Tensor"
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 0.70710677+0.j & "
-#~ "0.70710677+0.j\\\\ 0.70710677+0.j & "
-#~ "-0.70710677+0.j \\end{bmatrix}"
+#~ "Inner helper function to generate gate"
+#~ " functions, such as ``z()`` from "
+#~ "``_z_matrix``"
#~ msgstr ""
-#~ msgid "Apply i gate on the circuit."
+#~ msgid "General single qubit rotation gate"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j\\\\ 0.+0.j & 1.+0.j "
-#~ "\\end{bmatrix}"
+#~ "R(\\theta, \\phi, \\alpha) = i \\cos(\\theta) I\n"
+#~ "\n"
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 1.+0.j \\end{bmatrix}"
-#~ msgstr ""
-
-#~ msgid "Apply iswap gate on the circuit."
+#~ "- i \\cos(\\phi) \\sin(\\alpha) \\sin(\\theta) X\n"
+#~ "\n"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 0.+0.j & 0.+1.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+1.j & 0.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ "- i \\sin(\\phi) \\sin(\\alpha) \\sin(\\theta) Y\n"
+#~ "\n"
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j"
-#~ " & 0.+1.j & 0.+0.j\\\\ 0.+0.j &"
-#~ " 0.+1.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " \\end{bmatrix}"
+#~ "- i \\sin(\\theta) \\cos(\\alpha) Z\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid "Apply r gate with parameters on the circuit."
+#~ msgid "R Gate"
#~ msgstr ""
-#~ msgid "Apply rx gate with parameters on the circuit."
+#~ msgid "Random single qubit gate described in https://arxiv.org/abs/2002.07730."
#~ msgstr ""
-#~ msgid "Apply ry gate with parameters on the circuit."
+#~ msgid "A random single qubit gate"
#~ msgstr ""
-#~ msgid "Apply rz gate with parameters on the circuit."
+#~ msgid "Returns a random two-qubit gate."
#~ msgstr ""
-#~ msgid "Apply s gate on the circuit."
+#~ msgid "a random two-qubit gate"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j\\\\ 0.+0.j & 0.+1.j "
-#~ "\\end{bmatrix}"
+#~ "Rotation gate, which is in matrix "
+#~ "exponential form, shall give the same"
+#~ " result as `rgate`."
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 0.+1.j \\end{bmatrix}"
-#~ msgstr ""
-
-#~ msgid "Apply sd gate on the circuit."
+#~ "mx = \\sin(\\alpha) \\cos(\\phi) X\n"
+#~ "\n"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j\\\\ 0.+0.j & 0.-1.j "
-#~ "\\end{bmatrix}"
+#~ "my = \\sin(\\alpha) \\sin(\\phi) Y\n"
+#~ "\n"
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 0.-1.j \\end{bmatrix}"
-#~ msgstr ""
-
-#~ msgid "Apply swap gate on the circuit."
+#~ "mz = \\cos(\\alpha) Z\n"
+#~ "\n"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 0.+0.j & 1.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ "R(\\theta, \\alpha, \\phi) = e^{-i\\theta (mx+my+mz)}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j"
-#~ " & 1.+0.j & 0.+0.j\\\\ 0.+0.j &"
-#~ " 1.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " \\end{bmatrix}"
+#~ msgid "Rotation Gate"
#~ msgstr ""
-#~ msgid "Apply t gate on the circuit."
+#~ msgid "Rotation gate along X axis."
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1. &"
-#~ " +0.j & 0. & +0.j\\\\ 0. &"
-#~ " +0.j & 0.70710677+0.70710677j \\end{bmatrix}"
+#~ "RX(\\theta) = e^{-i\\frac{\\theta}{2}X}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid ""
-#~ "\\begin{bmatrix} 1. & +0.j & 0. "
-#~ "& +0.j\\\\ 0. & +0.j & "
-#~ "0.70710677+0.70710677j \\end{bmatrix}"
+#~ msgid "RX Gate"
#~ msgstr ""
-#~ msgid "Apply td gate on the circuit."
+#~ msgid "Rotation gate along Y axis."
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1. &"
-#~ " +0.j & 0. & +0.j\\\\ 0. &"
-#~ " +0.j & 0.70710677-0.70710677j \\end{bmatrix}"
+#~ "RY(\\theta) = e^{-i\\frac{\\theta}{2}Y}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid ""
-#~ "\\begin{bmatrix} 1. & +0.j & 0. "
-#~ "& +0.j\\\\ 0. & +0.j & "
-#~ "0.70710677-0.70710677j \\end{bmatrix}"
+#~ msgid "RY Gate"
#~ msgstr ""
-#~ msgid "Apply toffoli gate on the circuit."
+#~ msgid "Rotation gate along Z axis."
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
-#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j\\\\ 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 1.+0.j & 0.+0.j "
-#~ "\\end{bmatrix}"
+#~ "RZ(\\theta) = e^{-i\\frac{\\theta}{2}Z}\n"
+#~ "\n"
#~ msgstr ""
-#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j\\\\ 0.+0.j &"
-#~ " 1.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
-#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "1.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 1.+0.j & 0.+0.j \\end{bmatrix}"
+#~ msgid "RZ Gate"
#~ msgstr ""
-#~ msgid "Apply wroot gate on the circuit."
+#~ msgid "Interfaces bridging different backends"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} "
-#~ "0.70710677+0.j & -0.5 & -0.5j\\\\ 0.5"
-#~ " & -0.5j & 0.70710677+0.j \\end{bmatrix}"
+#~ msgid "Keras layer for tc quantum function"
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 0.70710677+0.j & -0.5 &"
-#~ " -0.5j\\\\ 0.5 & -0.5j & "
-#~ "0.70710677+0.j \\end{bmatrix}"
-#~ msgstr ""
-
-#~ msgid "Apply x gate on the circuit."
+#~ "`QuantumLayer` wraps the quantum function "
+#~ "`f` as a `keras.Layer` so that "
+#~ "tensorcircuit is better integrated with "
+#~ "tensorflow."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 0.+0.j "
-#~ "& 1.+0.j\\\\ 1.+0.j & 0.+0.j "
-#~ "\\end{bmatrix}"
+#~ msgid "[description], defaults to \"glorot_uniform\""
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 0.+0.j & 1.+0.j\\\\ "
-#~ "1.+0.j & 0.+0.j \\end{bmatrix}"
-#~ msgstr ""
-
-#~ msgid "Apply y gate on the circuit."
+#~ "Load function from the files in "
+#~ "the ``tf.savedmodel`` format. We can "
+#~ "load several functions at the same "
+#~ "time, as they can be the same "
+#~ "function of different input shapes."
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 0.+0.j "
-#~ "& 0.-1.j\\\\ 0.+1.j & 0.+0.j "
-#~ "\\end{bmatrix}"
+#~ "The fallback function when all functions"
+#~ " loaded are failed, defaults to None"
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 0.+0.j & 0.-1.j\\\\ "
-#~ "0.+1.j & 0.+0.j \\end{bmatrix}"
-#~ msgstr ""
-
-#~ msgid "Apply z gate on the circuit."
+#~ "When there is not legal loaded "
+#~ "function of the input shape and no"
+#~ " fallback callable."
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j\\\\ 0.+0.j & -1.+0.j "
-#~ "\\end{bmatrix}"
+#~ "A function that tries all loaded "
+#~ "function against the input until the "
+#~ "first success one."
#~ msgstr ""
#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & -1.+0.j \\end{bmatrix}"
+#~ "The keras loss function that directly"
+#~ " taking the model output as the "
+#~ "loss."
#~ msgstr ""
-#~ msgid "Circuit object based on state simulator."
+#~ msgid "Save tf function in the file (``tf.savedmodel`` format)."
#~ msgstr ""
-#~ msgid "The number of qubits in the circuit."
+#~ msgid "``tf.function`` ed function with graph building"
#~ msgstr ""
-#~ msgid ""
-#~ "If not None, the initial state of"
-#~ " the circuit is taken as ``inputs``"
-#~ " instead of :math:`\\vert 0\\rangle^n` "
-#~ "qubits, defaults to None"
+#~ msgid "the dir path to save the function"
#~ msgstr ""
-#~ msgid "(Nodes, dangling Edges) for a MPS like initial wavefunction"
+#~ msgid "FiniteMPS from tensornetwork with bug fixed"
#~ msgstr ""
#~ msgid ""
-#~ "dict if two qubit gate is ready"
-#~ " for split, including parameters for "
-#~ "at least one of ``max_singular_values`` "
-#~ "and ``max_truncation_err``."
+#~ "Bases: "
+#~ ":py:class:`tensornetwork.matrixproductstates.finite_mps.FiniteMPS`"
#~ msgstr ""
#~ msgid ""
-#~ "Monte Carlo trajectory simulation of "
-#~ "general Kraus channel whose Kraus "
-#~ "operators cannot be amplified to unitary"
-#~ " operators. For unitary operators composed"
-#~ " Kraus channel, :py:meth:`unitary_kraus` is "
-#~ "much faster."
+#~ "Apply a two-site gate to an "
+#~ "MPS. This routine will in general "
+#~ "destroy any canonical form of the "
+#~ "state. If a canonical form is "
+#~ "needed, the user can restore it "
+#~ "using `FiniteMPS.position`."
#~ msgstr ""
-#~ msgid ""
-#~ "This function is jittable in theory. "
-#~ "But only jax+GPU combination is "
-#~ "recommended for jit since the graph "
-#~ "building time is too long for "
-#~ "other backend options; though the "
-#~ "running time of the function is "
-#~ "very fast for every case."
+#~ msgid "A two-body gate."
#~ msgstr ""
-#~ msgid "list of ``tn.Node`` for Kraus operators"
+#~ msgid "The first site where the gate acts."
#~ msgstr ""
-#~ msgid "the qubits index that Kraus channel is applied on"
+#~ msgid "The second site where the gate acts."
#~ msgstr ""
-#~ msgid ""
-#~ "random tensor between 0 or 1, "
-#~ "defaults to be None, the random "
-#~ "number will be generated automatically"
+#~ msgid "The maximum number of singular values to keep."
#~ msgstr ""
-#~ msgid "Compute the expectation of corresponding operators."
+#~ msgid "The maximum allowed truncation error."
#~ msgstr ""
#~ msgid ""
-#~ "operator and its position on the "
-#~ "circuit, eg. ``(tc.gates.z(), [1, ]), "
-#~ "(tc.gates.x(), [2, ])`` is for operator"
-#~ " :math:`Z_1X_2`"
+#~ "An optional value to choose the "
+#~ "MPS tensor at `center_position` to be"
+#~ " isometric after the application of "
+#~ "the gate. Defaults to `site1`. If "
+#~ "the MPS is canonical "
+#~ "(i.e.`BaseMPS.center_position != None`), and "
+#~ "if the orthogonality center coincides "
+#~ "with either `site1` or `site2`, the "
+#~ "orthogonality center will be shifted to"
+#~ " `center_position` (`site1` by default). If"
+#~ " the orthogonality center does not "
+#~ "coincide with `(site1, site2)` then "
+#~ "`MPS.center_position` is set to `None`."
#~ msgstr ""
-#~ msgid ""
-#~ "if True, then the wavefunction tensor"
-#~ " is cached for further expectation "
-#~ "evaluation, defaults to be true"
+#~ msgid "Multiply `max_truncation_err` with the largest singular value."
#~ msgstr ""
-#~ msgid "Tensor with one element"
+#~ msgid ""
+#~ "\"rank of gate is {} but has "
+#~ "to be 4\", \"site1 = {} is "
+#~ "not between 0 <= site < N -"
+#~ " 1 = {}\", \"site2 = {} is "
+#~ "not between 1 <= site < N ="
+#~ " {}\",\"Found site2 ={}, site1={}. Only "
+#~ "nearest neighbor gates are currently "
+#~ "supported\", \"f center_position = "
+#~ "{center_position} not f in {(site1, "
+#~ "site2)} \", or \"center_position = {},"
+#~ " but gate is applied at sites "
+#~ "{}, {}. Truncation should only be "
+#~ "done if the gate is applied at "
+#~ "the center position of the MPS.\""
#~ msgstr ""
-#~ msgid "[WIP], check whether the circuit is legal."
+#~ msgid "A scalar tensor containing the truncated weight of the truncation."
#~ msgstr ""
-#~ msgid "the bool indicating whether the circuit is legal"
+#~ msgid "Measure the expectation value of local operators `ops` site `sites`."
#~ msgstr ""
-#~ msgid "Take measurement to the given quantum lines."
+#~ msgid "A list Tensors of rank 2; the local operators to be measured."
#~ msgstr ""
-#~ msgid "measure on which quantum line"
+#~ msgid "Sites where `ops` act."
#~ msgstr ""
-#~ msgid "if true, theoretical probability is also returned"
+#~ msgid "measurements :math:`\\langle` `ops[n]`:math:`\\rangle` for n in `sites`"
#~ msgstr ""
#~ msgid ""
-#~ "Middle measurement in z-basis on the "
-#~ "circuit, note the wavefunction output is"
-#~ " not normalized with ``mid_measurement`` "
-#~ "involved, one should normalize the state"
-#~ " manually if needed."
+#~ "Compute the correlator :math:`\\langle` "
+#~ "`op1[site1], op2[s]`:math:`\\rangle` between `site1`"
+#~ " and all sites `s` in `sites2`. "
+#~ "If `s == site1`, `op2[s]` will be"
+#~ " applied first."
#~ msgstr ""
-#~ msgid "the index of qubit that the Z direction postselection applied on"
+#~ msgid "Tensor of rank 2; the local operator at `site1`."
#~ msgstr ""
-#~ msgid "0 for spin up, 1 for spin down, defaults to be 0"
+#~ msgid "Tensor of rank 2; the local operator at `sites2`."
#~ msgstr ""
-#~ msgid "Reference: arXiv:1201.3974."
+#~ msgid "The site where `op1` acts"
#~ msgstr ""
-#~ msgid "sampled bit string and the corresponding theoretical probability"
+#~ msgid "Sites where operator `op2` acts."
#~ msgstr ""
-#~ msgid "Replace the input state with the circuit structure unchanged."
+#~ msgid ""
+#~ "Correlator :math:`\\langle` `op1[site1], "
+#~ "op2[s]`:math:`\\rangle` for `s` :math:`\\in` "
+#~ "`sites2`."
#~ msgstr ""
-#~ msgid "Input wavefunction."
+#~ msgid "Quantum circuit: MPS state simulator"
#~ msgstr ""
-#~ msgid ""
-#~ "Replace the input state in MPS "
-#~ "representation while keep the circuit "
-#~ "structure unchanged."
+#~ msgid "``MPSCircuit`` class. Simple usage demo below."
#~ msgstr ""
-#~ msgid "Compute the output wavefunction from the circuit."
+#~ msgid "MPSCircuit object based on state simulator."
#~ msgstr ""
-#~ msgid "the str indicating the form of the output wavefunction"
+#~ msgid ""
+#~ "If not None, the initial state of"
+#~ " the circuit is taken as ``tensors``"
+#~ " instead of :math:`\\vert 0\\rangle^n` "
+#~ "qubits, defaults to None"
#~ msgstr ""
-#~ msgid "Tensor with the corresponding shape"
+#~ msgid "The center position of MPS, default to 0"
#~ msgstr ""
-#~ msgid "Compute :math:`\\langle bra\\vert ops \\vert ket\\rangle`"
+#~ msgid "Apply a general qubit gate on MPS."
#~ msgstr ""
-#~ msgid "Example 1 (:math:`bra` is same as :math:`ket`)"
+#~ msgid "The Gate to be applied"
#~ msgstr ""
-#~ msgid "Example 2 (:math:`bra` is different from :math:`ket`)"
+#~ msgid "Qubit indices of the gate"
#~ msgstr ""
-#~ msgid "[description], defaults to None, which is the same as ``ket``"
+#~ msgid "\"MPS does not support application of gate on > 2 qubits.\""
#~ msgstr ""
-#~ msgid "[description], defaults to True"
+#~ msgid ""
+#~ "Apply a double qubit gate on "
+#~ "adjacent qubits of Matrix Product States"
+#~ " (MPS). Truncation rule is specified "
+#~ "by `set_truncation_rule`."
#~ msgstr ""
-#~ msgid "[description], defaults to False"
+#~ msgid "The first qubit index of the gate"
#~ msgstr ""
-#~ msgid ""
-#~ "Not an ideal visualization for quantum"
-#~ " circuit, but reserve here as a "
-#~ "general approch to show tensornetwork "
-#~ "[Deperacted, use ``qir2tex instead``]"
+#~ msgid "The second qubit index of the gate"
#~ msgstr ""
-#~ msgid "Constants and setups"
+#~ msgid "Center position of MPS, default is None"
#~ msgstr ""
#~ msgid ""
-#~ "To set runtime contractor of the "
-#~ "tensornetwork for a better contraction "
-#~ "path."
+#~ "Apply a double qubit gate on MPS."
+#~ " Truncation rule is specified by "
+#~ "`set_truncation_rule`."
#~ msgstr ""
#~ msgid ""
-#~ "\"auto\", \"greedy\", \"branch\", \"plain\", "
-#~ "\"tng\", \"custom\", \"custom_stateful\". defaults"
-#~ " to None (\"auto\")"
+#~ "Apply a single qubit gate on MPS,"
+#~ " and the gate must be unitary; "
+#~ "no truncation is needed."
#~ msgstr ""
-#~ msgid "Valid for \"custom\" or \"custom_stateful\" as method, defaults to None"
+#~ msgid "gate to be applied"
#~ msgstr ""
-#~ msgid ""
-#~ "It is not very useful, as "
-#~ "``memory_limit`` leads to ``branch`` "
-#~ "contraction instead of ``greedy`` which "
-#~ "is rather slow, defaults to None"
+#~ msgid "Qubit index of the gate"
#~ msgstr ""
-#~ msgid "Tensornetwork version is too low to support some of the contractors."
+#~ msgid "Compute the conjugate of the current MPS."
#~ msgstr ""
-#~ msgid "Unknown method options."
+#~ msgid "The constructed MPS"
#~ msgstr ""
-#~ msgid "The new tensornetwork with its contractor set."
+#~ msgid "Copy the current MPS."
#~ msgstr ""
-#~ msgid "To set the runtime numerical dtype of tensors."
+#~ msgid "Copy the current MPS without the tensors."
#~ msgstr ""
-#~ msgid ""
-#~ "\"complex64\" or \"complex128\", defaults to"
-#~ " None, which is equivalent to "
-#~ "\"complex64\"."
+#~ msgid "Compute the expectation of the corresponding double qubit gate."
#~ msgstr ""
-#~ msgid "The naive state-vector simulator contraction path."
+#~ msgid "qubit index of the gate"
#~ msgstr ""
-#~ msgid "The list of ``tn.Node``."
+#~ msgid ""
+#~ "Compute the expectation of the "
+#~ "corresponding single qubit gate in the"
+#~ " form of tensor."
#~ msgstr ""
-#~ msgid "The list of dangling node edges, defaults to be None."
+#~ msgid "Gate to be applied"
#~ msgstr ""
-#~ msgid "The ``tn.Node`` after contraction"
+#~ msgid "The expectation of the corresponding single qubit gate"
#~ msgstr ""
-#~ msgid "To set the runtime backend of tensorcircuit."
+#~ msgid ""
+#~ "Compute the expectation of the direct"
+#~ " product of the corresponding two "
+#~ "gates."
#~ msgstr ""
-#~ msgid ""
-#~ "Note: ``tc.set_backend`` and "
-#~ "``tc.cons.set_tensornetwork_backend`` are the same."
+#~ msgid "First gate to be applied"
#~ msgstr ""
-#~ msgid ""
-#~ "\"numpy\", \"tensorflow\", \"jax\", \"pytorch\". "
-#~ "defaults to None, which gives the "
-#~ "same behavior as "
-#~ "``tensornetwork.backend_contextmanager.get_default_backend()``."
+#~ msgid "Second gate to be applied"
#~ msgstr ""
-#~ msgid "Whether the object should be set as global."
+#~ msgid "Qubit index of the first gate"
#~ msgstr ""
-#~ msgid "Quantum circuit class but with density matrix simulator"
+#~ msgid "Qubit index of the second gate"
#~ msgstr ""
-#~ msgid "Quantum circuit class but with density matrix simulator: v2"
+#~ msgid "The correlation of the corresponding two qubit gates"
#~ msgstr ""
-#~ msgid "Bases: :py:class:`tensorcircuit.densitymatrix.DMCircuit`"
+#~ msgid "Construct the MPS from a given wavefunction."
#~ msgstr ""
-#~ msgid "Experimental features"
+#~ msgid "The given wavefunction (any shape is OK)"
#~ msgstr ""
#~ msgid ""
-#~ "Declarations of single-qubit and two-"
-#~ "qubit gates and their corresponding "
-#~ "matrix."
+#~ "Compute the expectation of corresponding "
+#~ "operators in the form of tensor."
#~ msgstr ""
-#~ msgid "Bases: :py:class:`tensornetwork.network_components.Node`"
+#~ msgid ""
+#~ "Operator and its position on the "
+#~ "circuit, eg. ``(gates.Z(), [1]), (gates.X(),"
+#~ " [2])`` is for operator :math:`Z_1X_2`"
#~ msgstr ""
-#~ msgid "Wrapper of tn.Node, quantum gate"
+#~ msgid "The expectation of corresponding operators"
#~ msgstr ""
-#~ msgid "Bases: :py:class:`tensorcircuit.gates.GateF`"
+#~ msgid "Get the normalized Center Position."
#~ msgstr ""
-#~ msgid "Note one should provide the gate with properly reshaped."
+#~ msgid "Normalized Center Position."
#~ msgstr ""
-#~ msgid "corresponding gate"
+#~ msgid "Check whether the circuit is legal."
#~ msgstr ""
-#~ msgid "The name of the gate."
+#~ msgid "Whether the circuit is legal."
#~ msgstr ""
-#~ msgid "the resulted gate"
+#~ msgid "integer indicating the measure on which quantum line"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Middle measurement in the z-basis on "
+#~ "the circuit, note the wavefunction "
+#~ "output is not normalized with "
+#~ "``mid_measurement`` involved, one should "
+#~ "normalized the state manually if needed."
#~ msgstr ""
-#~ msgid "Convert the inputs to Tensor with specified dtype."
+#~ msgid "The index of qubit that the Z direction postselection applied on"
#~ msgstr ""
-#~ msgid "inputs"
+#~ msgid "0 for spin up, 1 for spin down, defaults to 0"
#~ msgstr ""
-#~ msgid "dtype of the output Tensors"
+#~ msgid "Normalize MPS Circuit according to the center position."
#~ msgstr ""
-#~ msgid "List of Tensors"
+#~ msgid "Wrapper of tn.FiniteMPS.position. Set orthogonality center."
#~ msgstr ""
-#~ msgid "Returns a LaTeX bmatrix."
+#~ msgid "The orthogonality center"
#~ msgstr ""
-#~ msgid "Formatted Display:"
+#~ msgid "Compute the projection between `other` as bra and `self` as ket."
#~ msgstr ""
-#~ msgid ""
-#~ "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 1.+0.j \\end{bmatrix}\n"
-#~ "\n"
+#~ msgid "ket of the other MPS, which will be converted to bra automatically"
#~ msgstr ""
-#~ msgid "2D numpy array"
+#~ msgid "The projection in form of tensor"
#~ msgstr ""
-#~ msgid "ValueError(\"bmatrix can at most display two dimensions\")"
+#~ msgid ""
+#~ "Set truncation rules when double qubit"
+#~ " gates are applied. If nothing is "
+#~ "specified, no truncation will take place"
+#~ " and the bond dimension will keep "
+#~ "growing. For more details, refer to "
+#~ "`split_tensor`."
#~ msgstr ""
-#~ msgid "latex str for bmatrix of array a"
+#~ msgid "Tensor with shape [1, -1]"
#~ msgstr ""
#~ msgid ""
-#~ "Controlled rotation gate, when the "
-#~ "control bit is 1, `rgate` is "
-#~ "applied on the target gate."
+#~ "Split the tensor by SVD or QR "
+#~ "depends on whether a truncation is "
+#~ "required."
#~ msgstr ""
-#~ msgid "angle in radians"
+#~ msgid "The input tensor to split."
#~ msgstr ""
-#~ msgid "CR Gate"
+#~ msgid ""
+#~ "Determine the orthogonal center is on"
+#~ " the left tensor or the right "
+#~ "tensor."
#~ msgstr ""
-#~ msgid ""
-#~ "Faster exponential gate, directly implemented"
-#~ " based on RHS, only work when: "
-#~ ":math:`U^2` is identity matrix."
+#~ msgid "Two tensors after splitting"
#~ msgstr ""
-#~ msgid ""
-#~ "\\rm{exp}(U) &= e^{-i \\theta U} \\\\\n"
-#~ " &= \\cos(\\theta) I - j \\sin(\\theta) U \\\\\n"
-#~ "\n"
+#~ msgid "Quantum state and operator class backend by tensornetwork"
#~ msgstr ""
-#~ msgid "input unitary (U)"
+#~ msgid "Bases: :py:class:`tensorcircuit.quantum.QuOperator`"
#~ msgstr ""
-#~ msgid "suffix of Gate name"
+#~ msgid "Represents an adjoint (row) vector via a tensor network."
#~ msgstr ""
-#~ msgid "Exponential Gate"
+#~ msgid ""
+#~ "Constructs a new `QuAdjointVector` from "
+#~ "a tensor network. This encapsulates an"
+#~ " existing tensor network, interpreting it"
+#~ " as an adjoint vector (row vector)."
#~ msgstr ""
-#~ msgid "Exponential gate."
+#~ msgid "The edges of the network to be used as the input edges."
#~ msgstr ""
#~ msgid ""
-#~ "\\rm{exp}(U) = e^{-i \\theta U}\n"
-#~ "\n"
+#~ "Nodes used to refer to parts of"
+#~ " the tensor network that are not "
+#~ "connected to any input or output "
+#~ "edges (for example: a scalar factor)."
#~ msgstr ""
-#~ msgid "iSwap gate."
+#~ msgid ""
+#~ "Optional collection of edges to ignore"
+#~ " when performing consistency checks."
#~ msgstr ""
#~ msgid ""
-#~ "iSwap(\\theta) =\n"
-#~ "\\begin{pmatrix}\n"
-#~ " 1 & 0 & 0 & 0\\\\\n"
-#~ " 0 & \\cos(\\frac{\\pi}{2} \\theta )"
-#~ " & j \\sin(\\frac{\\pi}{2} \\theta ) "
-#~ "& 0\\\\\n"
-#~ " 0 & j \\sin(\\frac{\\pi}{2} \\theta"
-#~ " ) & \\cos(\\frac{\\pi}{2} \\theta ) "
-#~ "& 0\\\\\n"
-#~ " 0 & 0 & 0 & 1\\\\\n"
-#~ "\\end{pmatrix}\n"
-#~ "\n"
+#~ "Construct a `QuAdjointVector` directly from"
+#~ " a single tensor. This first wraps"
+#~ " the tensor in a `Node`, then "
+#~ "constructs the `QuAdjointVector` from that "
+#~ "`Node`."
#~ msgstr ""
-#~ msgid "iSwap Gate"
+#~ msgid "The tensor for constructing an QuAdjointVector."
#~ msgstr ""
-#~ msgid "Convert Gate to numpy array."
+#~ msgid ""
+#~ "Sequence of integer indices specifying "
+#~ "the order in which to interpret "
+#~ "the axes as subsystems (input edges)."
+#~ " If not specified, the axes are "
+#~ "taken in ascending order."
#~ msgstr ""
-#~ msgid "input Gate"
+#~ msgid "The new constructed QuAdjointVector give from the given tensor."
#~ msgstr ""
-#~ msgid "corresponding Tensor"
+#~ msgid ""
+#~ "Represents a linear operator via a "
+#~ "tensor network. To interpret a tensor"
+#~ " network as a linear operator, some"
+#~ " of the dangling edges must be "
+#~ "designated as `out_edges` (output edges) "
+#~ "and the rest as `in_edges` (input "
+#~ "edges). Considered as a matrix, the "
+#~ "`out_edges` represent the row index and"
+#~ " the `in_edges` represent the column "
+#~ "index. The (right) action of the "
+#~ "operator on another then consists of "
+#~ "connecting the `in_edges` of the first"
+#~ " operator to the `out_edges` of the"
+#~ " second. Can be used to do "
+#~ "simple linear algebra with tensor "
+#~ "networks."
#~ msgstr ""
#~ msgid ""
-#~ "Inner helper function to generate gate"
-#~ " functions, such as ``z()`` from "
-#~ "``_z_matrix``"
+#~ "Creates a new `QuOperator` from a "
+#~ "tensor network. This encapsulates an "
+#~ "existing tensor network, interpreting it "
+#~ "as a linear operator. The network "
+#~ "is checked for consistency: All dangling"
+#~ " edges must either be in `out_edges`,"
+#~ " `in_edges`, or `ignore_edges`."
#~ msgstr ""
-#~ msgid "General single qubit rotation gate"
+#~ msgid "The edges of the network to be used as the output edges."
#~ msgstr ""
#~ msgid ""
-#~ "R(\\theta, \\phi, \\alpha) = i \\cos(\\theta) I\n"
-#~ "\n"
+#~ "Optional collection of dangling edges to"
+#~ " ignore when performing consistency checks."
#~ msgstr ""
#~ msgid ""
-#~ "- i \\cos(\\phi) \\sin(\\alpha) \\sin(\\theta) X\n"
-#~ "\n"
+#~ "At least one reference node is "
+#~ "required to specify a scalar. None "
+#~ "provided!"
#~ msgstr ""
#~ msgid ""
-#~ "- i \\sin(\\phi) \\sin(\\alpha) \\sin(\\theta) Y\n"
-#~ "\n"
+#~ "The adjoint of the operator. This "
+#~ "creates a new `QuOperator` with "
+#~ "complex-conjugate copies of all tensors "
+#~ "in the network and with the input"
+#~ " and output edges switched."
#~ msgstr ""
#~ msgid ""
-#~ "- i \\sin(\\theta) \\cos(\\alpha) Z\n"
-#~ "\n"
-#~ msgstr ""
-
-#~ msgid "R Gate"
-#~ msgstr ""
-
-#~ msgid "Random single qubit gate described in https://arxiv.org/abs/2002.07730."
+#~ "Check that the network has the "
+#~ "expected dimensionality. This checks that "
+#~ "all input and output edges are "
+#~ "dangling and that there are no "
+#~ "other dangling edges (except any "
+#~ "specified in `ignore_edges`). If not, an"
+#~ " exception is raised."
#~ msgstr ""
-#~ msgid "A random single qubit gate"
+#~ msgid ""
+#~ "Contract the tensor network in place."
+#~ " This modifies the tensor network "
+#~ "representation of the operator (or "
+#~ "vector, or scalar), reducing it to "
+#~ "a single tensor, without changing the"
+#~ " value."
#~ msgstr ""
-#~ msgid "Returns a random two-qubit gate."
+#~ msgid "Manually specify the axis ordering of the final tensor."
#~ msgstr ""
-#~ msgid "a random two-qubit gate"
+#~ msgid "The present object."
#~ msgstr ""
#~ msgid ""
-#~ "Rotation gate, which is in matrix "
-#~ "exponential form, shall give the same"
-#~ " result as `rgate`."
+#~ "Contracts the tensor network in place"
+#~ " and returns the final tensor. Note"
+#~ " that this modifies the tensor "
+#~ "network representing the operator. The "
+#~ "default ordering for the axes of "
+#~ "the final tensor is: `*out_edges, "
+#~ "*in_edges`. If there are any \"ignored\""
+#~ " edges, their axes come first: "
+#~ "`*ignored_edges, *out_edges, *in_edges`."
#~ msgstr ""
#~ msgid ""
-#~ "mx = \\sin(\\alpha) \\cos(\\phi) X\n"
-#~ "\n"
+#~ "Manually specify the axis ordering of"
+#~ " the final tensor. The default "
+#~ "ordering is determined by `out_edges` "
+#~ "and `in_edges` (see above)."
#~ msgstr ""
-#~ msgid ""
-#~ "my = \\sin(\\alpha) \\sin(\\phi) Y\n"
-#~ "\n"
+#~ msgid "Node count '{}' > 1 after contraction!"
#~ msgstr ""
-#~ msgid ""
-#~ "mz = \\cos(\\alpha) Z\n"
-#~ "\n"
+#~ msgid "The final tensor representing the operator."
#~ msgstr ""
#~ msgid ""
-#~ "R(\\theta, \\alpha, \\phi) = e^{-i\\theta (mx+my+mz)}\n"
-#~ "\n"
+#~ "Construct a `QuOperator` directly from a"
+#~ " single tensor. This first wraps the"
+#~ " tensor in a `Node`, then constructs"
+#~ " the `QuOperator` from that `Node`."
#~ msgstr ""
-#~ msgid "Rotation Gate"
+#~ msgid "The tensor."
#~ msgstr ""
-#~ msgid "Rotation gate along X axis."
+#~ msgid "The axis indices of `tensor` to use as `out_edges`."
#~ msgstr ""
-#~ msgid ""
-#~ "RX(\\theta) = e^{-i\\frac{\\theta}{2}X}\n"
-#~ "\n"
+#~ msgid "The axis indices of `tensor` to use as `in_edges`."
#~ msgstr ""
-#~ msgid "RX Gate"
+#~ msgid "The new operator."
#~ msgstr ""
-#~ msgid "Rotation gate along Y axis."
+#~ msgid "All tensor-network nodes involved in the operator."
#~ msgstr ""
#~ msgid ""
-#~ "RY(\\theta) = e^{-i\\frac{\\theta}{2}Y}\n"
-#~ "\n"
-#~ msgstr ""
-
-#~ msgid "RY Gate"
-#~ msgstr ""
-
-#~ msgid "Rotation gate along Z axis."
+#~ "The norm of the operator. This is"
+#~ " the 2-norm (also known as the "
+#~ "Frobenius or Hilbert-Schmidt norm)."
#~ msgstr ""
#~ msgid ""
-#~ "RZ(\\theta) = e^{-i\\frac{\\theta}{2}Z}\n"
-#~ "\n"
+#~ "The partial trace of the operator. "
+#~ "Subsystems to trace out are supplied "
+#~ "as indices, so that dangling edges "
+#~ "are connected to each other as: "
+#~ "`out_edges[i] ^ in_edges[i] for i in "
+#~ "subsystems_to_trace_out` This does not modify"
+#~ " the original network. The original "
+#~ "ordering of the remaining subsystems is"
+#~ " maintained."
#~ msgstr ""
-#~ msgid "RZ Gate"
+#~ msgid "Indices of subsystems to trace out."
#~ msgstr ""
-#~ msgid "Interfaces bridging different backends"
+#~ msgid "A new QuOperator or QuScalar representing the result."
#~ msgstr ""
-#~ msgid "Keras layer for tc quantum function"
+#~ msgid ""
+#~ "Tensor product with another operator. "
+#~ "Given two operators `A` and `B`, "
+#~ "produces a new operator `AB` "
+#~ "representing `A` ⊗ `B`. The `out_edges`"
+#~ " (`in_edges`) of `AB` is simply the"
+#~ " concatenation of the `out_edges` "
+#~ "(`in_edges`) of `A.copy()` with that of"
+#~ " `B.copy()`: `new_out_edges = [*out_edges_A_copy,"
+#~ " *out_edges_B_copy]` `new_in_edges = "
+#~ "[*in_edges_A_copy, *in_edges_B_copy]`"
#~ msgstr ""
-#~ msgid ""
-#~ "`QuantumLayer` wraps the quantum function "
-#~ "`f` as a `keras.Layer` so that "
-#~ "tensorcircuit is better integrated with "
-#~ "tensorflow."
+#~ msgid "The other operator (`B`)."
#~ msgstr ""
-#~ msgid "[description], defaults to \"glorot_uniform\""
+#~ msgid "The result (`AB`)."
#~ msgstr ""
-#~ msgid ""
-#~ "Load function from the files in "
-#~ "the ``tf.savedmodel`` format. We can "
-#~ "load several functions at the same "
-#~ "time, as they can be the same "
-#~ "function of different input shapes."
+#~ msgid "The trace of the operator."
#~ msgstr ""
-#~ msgid ""
-#~ "The fallback function when all functions"
-#~ " loaded are failed, defaults to None"
+#~ msgid "Represents a scalar via a tensor network."
#~ msgstr ""
#~ msgid ""
-#~ "When there is not legal loaded "
-#~ "function of the input shape and no"
-#~ " fallback callable."
+#~ "Constructs a new `QuScalar` from a "
+#~ "tensor network. This encapsulates an "
+#~ "existing tensor network, interpreting it "
+#~ "as a scalar."
#~ msgstr ""
#~ msgid ""
-#~ "A function that tries all loaded "
-#~ "function against the input until the "
-#~ "first success one."
+#~ "Nodes used to refer to the tensor"
+#~ " network (need not be exhaustive -"
+#~ " one node from each disconnected "
+#~ "subnetwork is sufficient)."
#~ msgstr ""
#~ msgid ""
-#~ "The keras loss function that directly"
-#~ " taking the model output as the "
-#~ "loss."
-#~ msgstr ""
-
-#~ msgid "Save tf function in the file (``tf.savedmodel`` format)."
+#~ "Construct a `QuScalar` directly from a"
+#~ " single tensor. This first wraps the"
+#~ " tensor in a `Node`, then constructs"
+#~ " the `QuScalar` from that `Node`."
#~ msgstr ""
-#~ msgid "``tf.function`` ed function with graph building"
+#~ msgid "The tensor for constructing a new QuScalar."
#~ msgstr ""
-#~ msgid "the dir path to save the function"
+#~ msgid "The new constructed QuScalar from the given tensor."
#~ msgstr ""
-#~ msgid "FiniteMPS from tensornetwork with bug fixed"
+#~ msgid "Represents a (column) vector via a tensor network."
#~ msgstr ""
#~ msgid ""
-#~ "Bases: "
-#~ ":py:class:`tensornetwork.matrixproductstates.finite_mps.FiniteMPS`"
+#~ "Constructs a new `QuVector` from a "
+#~ "tensor network. This encapsulates an "
+#~ "existing tensor network, interpreting it "
+#~ "as a (column) vector."
#~ msgstr ""
#~ msgid ""
-#~ "Apply a two-site gate to an "
-#~ "MPS. This routine will in general "
-#~ "destroy any canonical form of the "
-#~ "state. If a canonical form is "
-#~ "needed, the user can restore it "
-#~ "using `FiniteMPS.position`."
-#~ msgstr ""
-
-#~ msgid "A two-body gate."
-#~ msgstr ""
-
-#~ msgid "The first site where the gate acts."
+#~ "Construct a `QuVector` directly from a"
+#~ " single tensor. This first wraps the"
+#~ " tensor in a `Node`, then constructs"
+#~ " the `QuVector` from that `Node`."
#~ msgstr ""
-#~ msgid "The second site where the gate acts."
+#~ msgid "The tensor for constructing a \"QuVector\"."
#~ msgstr ""
-#~ msgid "The maximum number of singular values to keep."
+#~ msgid ""
+#~ "Sequence of integer indices specifying "
+#~ "the order in which to interpret "
+#~ "the axes as subsystems (output edges)."
+#~ " If not specified, the axes are "
+#~ "taken in ascending order."
#~ msgstr ""
-#~ msgid "The maximum allowed truncation error."
+#~ msgid "The new constructed QuVector from the given tensor."
#~ msgstr ""
#~ msgid ""
-#~ "An optional value to choose the "
-#~ "MPS tensor at `center_position` to be"
-#~ " isometric after the application of "
-#~ "the gate. Defaults to `site1`. If "
-#~ "the MPS is canonical "
-#~ "(i.e.`BaseMPS.center_position != None`), and "
-#~ "if the orthogonality center coincides "
-#~ "with either `site1` or `site2`, the "
-#~ "orthogonality center will be shifted to"
-#~ " `center_position` (`site1` by default). If"
-#~ " the orthogonality center does not "
-#~ "coincide with `(site1, site2)` then "
-#~ "`MPS.center_position` is set to `None`."
+#~ "Check the vector spaces represented by"
+#~ " two lists of edges are compatible."
+#~ " The number of edges must be "
+#~ "the same and the dimensions of "
+#~ "each pair of edges must match. "
+#~ "Otherwise, an exception is raised. "
+#~ ":param edges_1: List of edges "
+#~ "representing a many-body Hilbert space."
+#~ " :type edges_1: Sequence[Edge] :param "
+#~ "edges_2: List of edges representing a"
+#~ " many-body Hilbert space. :type "
+#~ "edges_2: Sequence[Edge]"
#~ msgstr ""
-#~ msgid "Multiply `max_truncation_err` with the largest singular value."
+#~ msgid ""
+#~ "Hilbert-space mismatch: \"Cannot connect "
+#~ "{} subsystems with {} subsystems\", or"
+#~ " \"Input dimension {} != output "
+#~ "dimension {}.\""
#~ msgstr ""
#~ msgid ""
-#~ "\"rank of gate is {} but has "
-#~ "to be 4\", \"site1 = {} is "
-#~ "not between 0 <= site < N -"
-#~ " 1 = {}\", \"site2 = {} is "
-#~ "not between 1 <= site < N ="
-#~ " {}\",\"Found site2 ={}, site1={}. Only "
-#~ "nearest neighbor gates are currently "
-#~ "supported\", \"f center_position = "
-#~ "{center_position} not f in {(site1, "
-#~ "site2)} \", or \"center_position = {},"
-#~ " but gate is applied at sites "
-#~ "{}, {}. Truncation should only be "
-#~ "done if the gate is applied at "
-#~ "the center position of the MPS.\""
+#~ "Eliminates any connected CopyNodes that "
+#~ "are identity matrices. This will modify"
+#~ " the network represented by `nodes`. "
+#~ "Only identities that are connected to"
+#~ " other nodes are eliminated."
#~ msgstr ""
-#~ msgid "A scalar tensor containing the truncated weight of the truncation."
+#~ msgid "Collection of nodes to search."
#~ msgstr ""
-#~ msgid "Measure the expectation value of local operators `ops` site `sites`."
+#~ msgid ""
+#~ "The Dictionary mapping remaining Nodes "
+#~ "to any replacements, Dictionary specifying "
+#~ "all dangling-edge replacements."
#~ msgstr ""
-#~ msgid "A list Tensors of rank 2; the local operators to be measured."
+#~ msgid "Compute the entropy from the given density matrix ``rho``."
#~ msgstr ""
-#~ msgid "Sites where `ops` act."
+#~ msgid "[description], defaults to 1e-12"
#~ msgstr ""
-#~ msgid "measurements :math:`\\langle` `ops[n]`:math:`\\rangle` for n in `sites`"
+#~ msgid ""
+#~ "Note: further jit is recommended. For"
+#~ " large Hilbert space, sparse Hamiltonian"
+#~ " is recommended"
#~ msgstr ""
#~ msgid ""
-#~ "Compute the correlator :math:`\\langle` "
-#~ "`op1[site1], op2[s]`:math:`\\rangle` between `site1`"
-#~ " and all sites `s` in `sites2`. "
-#~ "If `s == site1`, `op2[s]` will be"
-#~ " applied first."
+#~ "Construct a 'QuOperator' representing the "
+#~ "identity on a given space. Internally,"
+#~ " this is done by constructing "
+#~ "'CopyNode's for each edge, with "
+#~ "dimension according to 'space'."
#~ msgstr ""
-#~ msgid "Tensor of rank 2; the local operator at `site1`."
+#~ msgid ""
+#~ "A sequence of integers for the "
+#~ "dimensions of the tensor product factors"
+#~ " of the space (the edges in the"
+#~ " tensor network)."
#~ msgstr ""
-#~ msgid "Tensor of rank 2; the local operator at `sites2`."
+#~ msgid "The data type (for conversion to dense)."
#~ msgstr ""
-#~ msgid "The site where `op1` acts"
+#~ msgid "The desired identity operator."
#~ msgstr ""
-#~ msgid "Sites where operator `op2` acts."
+#~ msgid ""
+#~ "Simulate the measuring of each qubit "
+#~ "of ``p`` in the computational basis, "
+#~ "thus producing output like that of "
+#~ "``qiskit``."
#~ msgstr ""
#~ msgid ""
-#~ "Correlator :math:`\\langle` `op1[site1], "
-#~ "op2[s]`:math:`\\rangle` for `s` :math:`\\in` "
-#~ "`sites2`."
+#~ "The quantum state, assumed to be "
+#~ "normalized, as either a ket or "
+#~ "density operator."
#~ msgstr ""
-#~ msgid "Quantum circuit: MPS state simulator"
+#~ msgid "The number of counts to perform."
#~ msgstr ""
-#~ msgid "``MPSCircuit`` class. Simple usage demo below."
+#~ msgid ""
+#~ "Defaults True. The bool indicating "
+#~ "whether the return form is in the"
+#~ " form of two array or one of"
+#~ " the same length as the ``state`` "
+#~ "(if ``sparse=False``)."
#~ msgstr ""
-#~ msgid "MPSCircuit object based on state simulator."
+#~ msgid "The counts for each bit string measured."
#~ msgstr ""
#~ msgid ""
-#~ "If not None, the initial state of"
-#~ " the circuit is taken as ``tensors``"
-#~ " instead of :math:`\\vert 0\\rangle^n` "
-#~ "qubits, defaults to None"
+#~ "Constructs an appropriately specialized "
+#~ "QuOperator. If there are no edges, "
+#~ "creates a QuScalar. If the are "
+#~ "only output (input) edges, creates a "
+#~ "QuVector (QuAdjointVector). Otherwise creates "
+#~ "a QuOperator."
#~ msgstr ""
-#~ msgid "The center position of MPS, default to 0"
+#~ msgid ""
+#~ "op = qu.quantum_constructor([], [psi_node[0], "
+#~ "psi_node[1]]) >>> show_attributes(op) op.is_scalar()"
+#~ " -> False op.is_vector() -> "
+#~ "False op.is_adjoint_vector() -> True "
+#~ "len(op.out_edges) -> 0 len(op.in_edges)"
+#~ " -> 2 >>> # psi_node[0] -> "
+#~ "op.in_edges[0] >>> # psi_node[1] -> "
+#~ "op.in_edges[1]"
#~ msgstr ""
-#~ msgid "Apply a general qubit gate on MPS."
+#~ msgid "output edges."
#~ msgstr ""
-#~ msgid "The Gate to be applied"
+#~ msgid "in edges."
#~ msgstr ""
-#~ msgid "Qubit indices of the gate"
+#~ msgid ""
+#~ "reference nodes for the tensor network"
+#~ " (needed if there is a scalar "
+#~ "component)."
#~ msgstr ""
-#~ msgid "\"MPS does not support application of gate on > 2 qubits.\""
+#~ msgid "edges to ignore when checking the dimensionality of the tensor network."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply a double qubit gate on "
-#~ "adjacent qubits of Matrix Product States"
-#~ " (MPS). Truncation rule is specified "
-#~ "by `set_truncation_rule`."
+#~ msgid "The new created QuOperator object."
#~ msgstr ""
-#~ msgid "The first qubit index of the gate"
+#~ msgid "Compute the reduced density matrix from the quantum state ``state``."
#~ msgstr ""
-#~ msgid "The second qubit index of the gate"
+#~ msgid "Compute the trace of several inputs ``o`` as tensor or ``QuOperator``."
#~ msgstr ""
-#~ msgid "Center position of MPS, default is None"
+#~ msgid "\\mathrm{Tr}(\\prod_i O_i)"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply a double qubit gate on MPS."
-#~ " Truncation rule is specified by "
-#~ "`set_truncation_rule`."
+#~ msgid "the trace of several inputs"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply a single qubit gate on MPS,"
-#~ " and the gate must be unitary; "
-#~ "no truncation is needed."
+#~ msgid "Tensornetwork Simplification"
#~ msgstr ""
-#~ msgid "gate to be applied"
+#~ msgid ""
+#~ "Get the new shape of two nodes,"
+#~ " also supporting to return original "
+#~ "shapes of two nodes."
#~ msgstr ""
-#~ msgid "Qubit index of the gate"
+#~ msgid "node one"
#~ msgstr ""
-#~ msgid "Compute the conjugate of the current MPS."
+#~ msgid "node two"
#~ msgstr ""
-#~ msgid "The constructed MPS"
+#~ msgid "Whether to include original shape of two nodes, default is True."
#~ msgstr ""
-#~ msgid "Copy the current MPS."
+#~ msgid "The new shape of the two nodes."
#~ msgstr ""
-#~ msgid "Copy the current MPS without the tensors."
+#~ msgid ""
+#~ "Contract between Node ``a`` and ``b``,"
+#~ " with correct shape only and no "
+#~ "calculation"
#~ msgstr ""
-#~ msgid "Compute the expectation of the corresponding double qubit gate."
+#~ msgid "Shortcuts for measurement patterns on circuit"
#~ msgstr ""
-#~ msgid "qubit index of the gate"
+#~ msgid "Some common graphs and lattices"
#~ msgstr ""
-#~ msgid ""
-#~ "Compute the expectation of the "
-#~ "corresponding single qubit gate in the"
-#~ " form of tensor."
+#~ msgid "1D chain with ``n`` sites"
#~ msgstr ""
-#~ msgid "Gate to be applied"
+#~ msgid ""
+#~ "This measurements pattern is specifically "
+#~ "suitable for vmap. Parameterize the "
+#~ "Pauli string to be measured."
#~ msgstr ""
-#~ msgid "The expectation of the corresponding single qubit gate"
+#~ msgid ""
+#~ "parameter tensors determines what Pauli "
+#~ "string to be measured, shape is "
+#~ "[nwires, 4] if onehot is False."
#~ msgstr ""
#~ msgid ""
-#~ "Compute the expectation of the direct"
-#~ " product of the corresponding two "
-#~ "gates."
+#~ "[description], defaults to False. If set"
+#~ " to be True, structures will first"
+#~ " go through onehot procedure."
#~ msgstr ""
-#~ msgid "First gate to be applied"
+#~ msgid "COO_sparse_matrix"
#~ msgstr ""
-#~ msgid "Second gate to be applied"
+#~ msgid "a real and scalar tensor of shape []"
#~ msgstr ""
-#~ msgid "Qubit index of the first gate"
+#~ msgid "Helper functions"
#~ msgstr ""
-#~ msgid "Qubit index of the second gate"
+#~ msgid ""
+#~ "Return a callable function for output"
+#~ " ith parts of the original output "
+#~ "along the first axis. Original output"
+#~ " supports List and Tensor."
#~ msgstr ""
-#~ msgid "The correlation of the corresponding two qubit gates"
+#~ msgid "The function to be applied this method"
#~ msgstr ""
-#~ msgid "Construct the MPS from a given wavefunction."
+#~ msgid "The ith parts of original output along the first axis (axis=0 or dim=0)"
#~ msgstr ""
-#~ msgid "The given wavefunction (any shape is OK)"
+#~ msgid "The modified callable function"
#~ msgstr ""
-#~ msgid ""
-#~ "Compute the expectation of corresponding "
-#~ "operators in the form of tensor."
+#~ msgid "Visualization on circuits"
#~ msgstr ""
-#~ msgid ""
-#~ "Operator and its position on the "
-#~ "circuit, eg. ``(gates.Z(), [1]), (gates.X(),"
-#~ " [2])`` is for operator :math:`Z_1X_2`"
+#~ msgid "# TODO(@YHPeter): add examples"
#~ msgstr ""
-#~ msgid "The expectation of corresponding operators"
+#~ msgid ""
+#~ "Generate the PDF file with given "
+#~ "latex string and filename. Latex command"
+#~ " and file path can be specified. "
+#~ "When notebook is True, convert the "
+#~ "output PDF file to image and "
+#~ "return a Image object."
#~ msgstr ""
-#~ msgid "Get the normalized Center Position."
+#~ msgid "String of latex content"
#~ msgstr ""
-#~ msgid "Normalized Center Position."
+#~ msgid "File name, defaults to random UUID `str(uuid4())`"
#~ msgstr ""
-#~ msgid "Check whether the circuit is legal."
+#~ msgid "Executable Latex command, defaults to `pdflatex`"
#~ msgstr ""
-#~ msgid "Whether the circuit is legal."
+#~ msgid "File path, defaults to current working place `os.getcwd()`"
#~ msgstr ""
-#~ msgid "integer indicating the measure on which quantum line"
+#~ msgid "if notebook is True, return `Image` object; otherwise return `None`"
#~ msgstr ""
-#~ msgid ""
-#~ "Middle measurement in the z-basis on "
-#~ "the circuit, note the wavefunction "
-#~ "output is not normalized with "
-#~ "``mid_measurement`` involved, one should "
-#~ "normalized the state manually if needed."
+#~ msgid "_summary_"
#~ msgstr ""
-#~ msgid "The index of qubit that the Z direction postselection applied on"
+#~ msgid "_description_"
#~ msgstr ""
-#~ msgid "0 for spin up, 1 for spin down, defaults to 0"
+#~ msgid "[description], default is None."
#~ msgstr ""
-#~ msgid "Normalize MPS Circuit according to the center position."
+#~ msgid "_description_, default is (1, -1)."
#~ msgstr ""
-#~ msgid "Wrapper of tn.FiniteMPS.position. Set orthogonality center."
+#~ msgid "Apply fredkin gate on the circuit."
#~ msgstr ""
-#~ msgid "The orthogonality center"
+#~ msgid "Apply orx gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "Compute the projection between `other` as bra and `self` as ket."
+#~ msgid "Apply ory gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "ket of the other MPS, which will be converted to bra automatically"
+#~ msgid "Apply orz gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "The projection in form of tensor"
+#~ msgid "Apply ox gate on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Set truncation rules when double qubit"
-#~ " gates are applied. If nothing is "
-#~ "specified, no truncation will take place"
-#~ " and the bond dimension will keep "
-#~ "growing. For more details, refer to "
-#~ "`split_tensor`."
+#~ msgid "Apply oy gate on the circuit."
#~ msgstr ""
-#~ msgid "Tensor with shape [1, -1]"
+#~ msgid "Apply oz gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Split the tensor by SVD or QR "
-#~ "depends on whether a truncation is "
-#~ "required."
+#~ "Random tensor between 0 or 1, "
+#~ "defaults to be None, the random "
+#~ "number will be generated automatically"
#~ msgstr ""
-#~ msgid "The input tensor to split."
+#~ msgid "The str indicating the form of the output wavefunction."
#~ msgstr ""
#~ msgid ""
-#~ "Determine the orthogonal center is on"
-#~ " the left tensor or the right "
-#~ "tensor."
-#~ msgstr ""
-
-#~ msgid "Two tensors after splitting"
+#~ "A collection of useful function snippets"
+#~ " that irrelevant with the main "
+#~ "modules or await for further refactor"
#~ msgstr ""
-#~ msgid "Quantum state and operator class backend by tensornetwork"
+#~ msgid "VQNHE application"
#~ msgstr ""
-#~ msgid "Bases: :py:class:`tensorcircuit.quantum.QuOperator`"
+#~ msgid "Apply **ANY** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "Represents an adjoint (row) vector via a tensor network."
+#~ msgid "Apply **CR** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Constructs a new `QuAdjointVector` from "
-#~ "a tensor network. This encapsulates an"
-#~ " existing tensor network, interpreting it"
-#~ " as an adjoint vector (row vector)."
+#~ msgid "Apply **CRX** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "The edges of the network to be used as the input edges."
+#~ msgid "Apply **CRY** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Nodes used to refer to parts of"
-#~ " the tensor network that are not "
-#~ "connected to any input or output "
-#~ "edges (for example: a scalar factor)."
+#~ msgid "Apply **CRZ** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Optional collection of edges to ignore"
-#~ " when performing consistency checks."
+#~ msgid "Apply **EXP** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Construct a `QuAdjointVector` directly from"
-#~ " a single tensor. This first wraps"
-#~ " the tensor in a `Node`, then "
-#~ "constructs the `QuAdjointVector` from that "
-#~ "`Node`."
+#~ msgid "Apply **EXP1** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "The tensor for constructing an QuAdjointVector."
+#~ msgid "Apply **FREDKIN** gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Sequence of integer indices specifying "
-#~ "the order in which to interpret "
-#~ "the axes as subsystems (input edges)."
-#~ " If not specified, the axes are "
-#~ "taken in ascending order."
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
+#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 1.+0.j &"
+#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 1.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "The new constructed QuAdjointVector give from the given tensor."
+#~ msgid "Apply **ISWAP** gate on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Represents a linear operator via a "
-#~ "tensor network. To interpret a tensor"
-#~ " network as a linear operator, some"
-#~ " of the dangling edges must be "
-#~ "designated as `out_edges` (output edges) "
-#~ "and the rest as `in_edges` (input "
-#~ "edges). Considered as a matrix, the "
-#~ "`out_edges` represent the row index and"
-#~ " the `in_edges` represent the column "
-#~ "index. The (right) action of the "
-#~ "operator on another then consists of "
-#~ "connecting the `in_edges` of the first"
-#~ " operator to the `out_edges` of the"
-#~ " second. Can be used to do "
-#~ "simple linear algebra with tensor "
-#~ "networks."
+#~ msgid "Apply **ORX** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Creates a new `QuOperator` from a "
-#~ "tensor network. This encapsulates an "
-#~ "existing tensor network, interpreting it "
-#~ "as a linear operator. The network "
-#~ "is checked for consistency: All dangling"
-#~ " edges must either be in `out_edges`,"
-#~ " `in_edges`, or `ignore_edges`."
+#~ msgid "Apply **ORY** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "The edges of the network to be used as the output edges."
+#~ msgid "Apply **ORZ** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Optional collection of dangling edges to"
-#~ " ignore when performing consistency checks."
+#~ msgid "Apply **OX** gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "At least one reference node is "
-#~ "required to specify a scalar. None "
-#~ "provided!"
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 0.+0.j "
+#~ "& 1.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "1.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid ""
-#~ "The adjoint of the operator. This "
-#~ "creates a new `QuOperator` with "
-#~ "complex-conjugate copies of all tensors "
-#~ "in the network and with the input"
-#~ " and output edges switched."
+#~ msgid "Apply **OY** gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Check that the network has the "
-#~ "expected dimensionality. This checks that "
-#~ "all input and output edges are "
-#~ "dangling and that there are no "
-#~ "other dangling edges (except any "
-#~ "specified in `ignore_edges`). If not, an"
-#~ " exception is raised."
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 0.+0.j "
+#~ "& 0.-1.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+1.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ msgstr ""
+
+#~ msgid "Apply **OZ** gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Contract the tensor network in place."
-#~ " This modifies the tensor network "
-#~ "representation of the operator (or "
-#~ "vector, or scalar), reducing it to "
-#~ "a single tensor, without changing the"
-#~ " value."
+#~ "Qubit number than the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & -1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Manually specify the axis ordering of the final tensor."
+#~ msgid "Apply **R** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "The present object."
+#~ msgid "Apply **RX** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Contracts the tensor network in place"
-#~ " and returns the final tensor. Note"
-#~ " that this modifies the tensor "
-#~ "network representing the operator. The "
-#~ "default ordering for the axes of "
-#~ "the final tensor is: `*out_edges, "
-#~ "*in_edges`. If there are any \"ignored\""
-#~ " edges, their axes come first: "
-#~ "`*ignored_edges, *out_edges, *in_edges`."
+#~ msgid "Apply **RY** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Manually specify the axis ordering of"
-#~ " the final tensor. The default "
-#~ "ordering is determined by `out_edges` "
-#~ "and `in_edges` (see above)."
+#~ msgid "Apply **RZ** gate with parameters on the circuit."
#~ msgstr ""
-#~ msgid "Node count '{}' > 1 after contraction!"
+#~ msgid "Apply **SD** gate on the circuit."
#~ msgstr ""
-#~ msgid "The final tensor representing the operator."
+#~ msgid "Apply **TD** gate on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Construct a `QuOperator` directly from a"
-#~ " single tensor. This first wraps the"
-#~ " tensor in a `Node`, then constructs"
-#~ " the `QuOperator` from that `Node`."
+#~ msgid "Apply **TOFFOLI** gate on the circuit."
#~ msgstr ""
-#~ msgid "The tensor."
+#~ msgid ""
+#~ "Apply unitary gates in ``kraus`` "
+#~ "randomly based on corresponding ``prob``."
#~ msgstr ""
-#~ msgid "The axis indices of `tensor` to use as `out_edges`."
+#~ msgid "Get the eigenvalues of matrix ``a``."
#~ msgstr ""
-#~ msgid "The axis indices of `tensor` to use as `in_edges`."
+#~ msgid "eigenvalues of ``a``"
#~ msgstr ""
-#~ msgid "The new operator."
+#~ msgid ""
+#~ "Apply **ANY** gate with parameters on"
+#~ " the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.any_gate`."
#~ msgstr ""
-#~ msgid "All tensor-network nodes involved in the operator."
+#~ msgid "Qubit number that the gate applies on."
#~ msgstr ""
#~ msgid ""
-#~ "The norm of the operator. This is"
-#~ " the 2-norm (also known as the "
-#~ "Frobenius or Hilbert-Schmidt norm)."
+#~ "Apply **CNOT** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.cnot_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "The partial trace of the operator. "
-#~ "Subsystems to trace out are supplied "
-#~ "as indices, so that dangling edges "
-#~ "are connected to each other as: "
-#~ "`out_edges[i] ^ in_edges[i] for i in "
-#~ "subsystems_to_trace_out` This does not modify"
-#~ " the original network. The original "
-#~ "ordering of the remaining subsystems is"
-#~ " maintained."
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 1.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 1.+0.j & 0.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Indices of subsystems to trace out."
+#~ msgid "Qubit number that the gate applies on. The matrix for the gate is"
#~ msgstr ""
-#~ msgid "A new QuOperator or QuScalar representing the result."
+#~ msgid ""
+#~ "Apply **CR** gate with parameters on "
+#~ "the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.cr_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Tensor product with another operator. "
-#~ "Given two operators `A` and `B`, "
-#~ "produces a new operator `AB` "
-#~ "representing `A` ⊗ `B`. The `out_edges`"
-#~ " (`in_edges`) of `AB` is simply the"
-#~ " concatenation of the `out_edges` "
-#~ "(`in_edges`) of `A.copy()` with that of"
-#~ " `B.copy()`: `new_out_edges = [*out_edges_A_copy,"
-#~ " *out_edges_B_copy]` `new_in_edges = "
-#~ "[*in_edges_A_copy, *in_edges_B_copy]`"
+#~ "Apply **CRX** gate with parameters on"
+#~ " the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.crx_gate`."
#~ msgstr ""
-#~ msgid "The other operator (`B`)."
+#~ msgid ""
+#~ "Apply **CRY** gate with parameters on"
+#~ " the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.cry_gate`."
#~ msgstr ""
-#~ msgid "The result (`AB`)."
+#~ msgid ""
+#~ "Apply **CRZ** gate with parameters on"
+#~ " the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.crz_gate`."
#~ msgstr ""
-#~ msgid "The trace of the operator."
+#~ msgid ""
+#~ "Apply **CY** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.cy_gate`."
#~ msgstr ""
-#~ msgid "Represents a scalar via a tensor network."
+#~ msgid ""
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.-1.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+1.j & 0.+0.j \\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "Constructs a new `QuScalar` from a "
-#~ "tensor network. This encapsulates an "
-#~ "existing tensor network, interpreting it "
-#~ "as a scalar."
+#~ "Apply **CZ** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.cz_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Nodes used to refer to the tensor"
-#~ " network (need not be exhaustive -"
-#~ " one node from each disconnected "
-#~ "subnetwork is sufficient)."
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & -1.+0.j \\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "Construct a `QuScalar` directly from a"
-#~ " single tensor. This first wraps the"
-#~ " tensor in a `Node`, then constructs"
-#~ " the `QuScalar` from that `Node`."
+#~ "Apply **EXP** gate with parameters on"
+#~ " the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.exp_gate`."
#~ msgstr ""
-#~ msgid "The tensor for constructing a new QuScalar."
+#~ msgid ""
+#~ "Apply **EXP1** gate with parameters on"
+#~ " the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.exp1_gate`."
#~ msgstr ""
-#~ msgid "The new constructed QuScalar from the given tensor."
+#~ msgid ""
+#~ "Apply **FREDKIN** gate on the circuit."
+#~ " See :py:meth:`tensorcircuit.gates.fredkin_gate`."
#~ msgstr ""
-#~ msgid "Represents a (column) vector via a tensor network."
+#~ msgid ""
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
+#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 1.+0.j &"
+#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 1.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "Constructs a new `QuVector` from a "
-#~ "tensor network. This encapsulates an "
-#~ "existing tensor network, interpreting it "
-#~ "as a (column) vector."
+#~ "Apply **H** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.h_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Construct a `QuVector` directly from a"
-#~ " single tensor. This first wraps the"
-#~ " tensor in a `Node`, then constructs"
-#~ " the `QuVector` from that `Node`."
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} "
+#~ "0.70710677+0.j & 0.70710677+0.j\\\\ "
+#~ "0.70710677+0.j & -0.70710677+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "The tensor for constructing a \"QuVector\"."
+#~ msgid ""
+#~ "Apply **I** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.i_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Sequence of integer indices specifying "
-#~ "the order in which to interpret "
-#~ "the axes as subsystems (output edges)."
-#~ " If not specified, the axes are "
-#~ "taken in ascending order."
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j\\\\ 0.+0.j & 1.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "The new constructed QuVector from the given tensor."
+#~ msgid ""
+#~ "Apply **ISWAP** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.iswap_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Check the vector spaces represented by"
-#~ " two lists of edges are compatible."
-#~ " The number of edges must be "
-#~ "the same and the dimensions of "
-#~ "each pair of edges must match. "
-#~ "Otherwise, an exception is raised. "
-#~ ":param edges_1: List of edges "
-#~ "representing a many-body Hilbert space."
-#~ " :type edges_1: Sequence[Edge] :param "
-#~ "edges_2: List of edges representing a"
-#~ " many-body Hilbert space. :type "
-#~ "edges_2: Sequence[Edge]"
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 0.+0.j & 0.+1.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+1.j & 0.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "Hilbert-space mismatch: \"Cannot connect "
-#~ "{} subsystems with {} subsystems\", or"
-#~ " \"Input dimension {} != output "
-#~ "dimension {}.\""
+#~ "Apply **ORX** gate with parameters on"
+#~ " the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.orx_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Eliminates any connected CopyNodes that "
-#~ "are identity matrices. This will modify"
-#~ " the network represented by `nodes`. "
-#~ "Only identities that are connected to"
-#~ " other nodes are eliminated."
+#~ "Apply **ORY** gate with parameters on"
+#~ " the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.ory_gate`."
#~ msgstr ""
-#~ msgid "Collection of nodes to search."
+#~ msgid ""
+#~ "Apply **ORZ** gate with parameters on"
+#~ " the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.orz_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "The Dictionary mapping remaining Nodes "
-#~ "to any replacements, Dictionary specifying "
-#~ "all dangling-edge replacements."
+#~ "Apply **OX** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.ox_gate`."
#~ msgstr ""
-#~ msgid "Compute the entropy from the given density matrix ``rho``."
+#~ msgid ""
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 0.+0.j "
+#~ "& 1.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "1.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "[description], defaults to 1e-12"
+#~ msgid ""
+#~ "Apply **OY** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.oy_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Note: further jit is recommended. For"
-#~ " large Hilbert space, sparse Hamiltonian"
-#~ " is recommended"
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 0.+0.j "
+#~ "& 0.-1.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+1.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "Construct a 'QuOperator' representing the "
-#~ "identity on a given space. Internally,"
-#~ " this is done by constructing "
-#~ "'CopyNode's for each edge, with "
-#~ "dimension according to 'space'."
+#~ "Apply **OZ** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.oz_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "A sequence of integers for the "
-#~ "dimensions of the tensor product factors"
-#~ " of the space (the edges in the"
-#~ " tensor network)."
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & -1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "The data type (for conversion to dense)."
+#~ msgid ""
+#~ "Apply **R** gate with parameters on "
+#~ "the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.r_gate`."
#~ msgstr ""
-#~ msgid "The desired identity operator."
+#~ msgid ""
+#~ "Apply **RX** gate with parameters on "
+#~ "the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.rx_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Simulate the measuring of each qubit "
-#~ "of ``p`` in the computational basis, "
-#~ "thus producing output like that of "
-#~ "``qiskit``."
+#~ "Apply **RY** gate with parameters on "
+#~ "the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.ry_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "The quantum state, assumed to be "
-#~ "normalized, as either a ket or "
-#~ "density operator."
+#~ "Apply **RZ** gate with parameters on "
+#~ "the circuit. See "
+#~ ":py:meth:`tensorcircuit.gates.rz_gate`."
#~ msgstr ""
-#~ msgid "The number of counts to perform."
+#~ msgid ""
+#~ "Apply **S** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.s_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Defaults True. The bool indicating "
-#~ "whether the return form is in the"
-#~ " form of two array or one of"
-#~ " the same length as the ``state`` "
-#~ "(if ``sparse=False``)."
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j\\\\ 0.+0.j & 0.+1.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "The counts for each bit string measured."
+#~ msgid ""
+#~ "Apply **SD** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.sd_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Constructs an appropriately specialized "
-#~ "QuOperator. If there are no edges, "
-#~ "creates a QuScalar. If the are "
-#~ "only output (input) edges, creates a "
-#~ "QuVector (QuAdjointVector). Otherwise creates "
-#~ "a QuOperator."
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j\\\\ 0.+0.j & 0.-1.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "op = qu.quantum_constructor([], [psi_node[0], "
-#~ "psi_node[1]]) >>> show_attributes(op) op.is_scalar()"
-#~ " -> False op.is_vector() -> "
-#~ "False op.is_adjoint_vector() -> True "
-#~ "len(op.out_edges) -> 0 len(op.in_edges)"
-#~ " -> 2 >>> # psi_node[0] -> "
-#~ "op.in_edges[0] >>> # psi_node[1] -> "
-#~ "op.in_edges[1]"
+#~ "Apply **SWAP** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.swap_gate`."
#~ msgstr ""
-#~ msgid "output edges."
+#~ msgid ""
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
+#~ "0.+0.j & 0.+0.j & 1.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "in edges."
+#~ msgid ""
+#~ "Apply **T** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.t_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "reference nodes for the tensor network"
-#~ " (needed if there is a scalar "
-#~ "component)."
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1. &"
+#~ " +0.j & 0. & +0.j\\\\ 0. &"
+#~ " +0.j & 0.70710677+0.70710677j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "edges to ignore when checking the dimensionality of the tensor network."
+#~ msgid ""
+#~ "Apply **TD** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.td_gate`."
#~ msgstr ""
-#~ msgid "The new created QuOperator object."
+#~ msgid ""
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1. &"
+#~ " +0.j & 0. & +0.j\\\\ 0. &"
+#~ " +0.j & 0.70710677-0.70710677j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Compute the reduced density matrix from the quantum state ``state``."
+#~ msgid ""
+#~ "Apply **TOFFOLI** gate on the circuit."
+#~ " See :py:meth:`tensorcircuit.gates.toffoli_gate`."
#~ msgstr ""
-#~ msgid "Compute the trace of several inputs ``o`` as tensor or ``QuOperator``."
+#~ msgid ""
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
+#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
+#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
+#~ "1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j & 0.+0.j & "
+#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
+#~ " 0.+0.j & 1.+0.j\\\\ 0.+0.j & "
+#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
+#~ " & 0.+0.j & 1.+0.j & 0.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "\\mathrm{Tr}(\\prod_i O_i)"
+#~ msgid ""
+#~ "Apply **WROOT** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.wroot_gate`."
#~ msgstr ""
-#~ msgid "the trace of several inputs"
+#~ msgid ""
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} "
+#~ "0.70710677+0.j & -0.5 & -0.5j\\\\ 0.5"
+#~ " & -0.5j & 0.70710677+0.j \\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Tensornetwork Simplification"
+#~ msgid ""
+#~ "Apply **X** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.x_gate`."
#~ msgstr ""
#~ msgid ""
-#~ "Get the new shape of two nodes,"
-#~ " also supporting to return original "
-#~ "shapes of two nodes."
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 0.+0.j "
+#~ "& 1.+0.j\\\\ 1.+0.j & 0.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "node one"
+#~ msgid ""
+#~ "Apply **Y** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.y_gate`."
#~ msgstr ""
-#~ msgid "node two"
+#~ msgid ""
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 0.+0.j "
+#~ "& 0.-1.j\\\\ 0.+1.j & 0.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
-#~ msgid "Whether to include original shape of two nodes, default is True."
+#~ msgid ""
+#~ "Apply **Z** gate on the circuit. "
+#~ "See :py:meth:`tensorcircuit.gates.z_gate`."
#~ msgstr ""
-#~ msgid "The new shape of the two nodes."
+#~ msgid ""
+#~ "Qubit number that the gate applies "
+#~ "on. The matrix for the gate is"
+#~ " .. math:: \\begin{bmatrix} 1.+0.j "
+#~ "& 0.+0.j\\\\ 0.+0.j & -1.+0.j "
+#~ "\\end{bmatrix}"
#~ msgstr ""
#~ msgid ""
-#~ "Contract between Node ``a`` and ``b``,"
-#~ " with correct shape only and no "
-#~ "calculation"
+#~ "Apply unitary gates in ``kraus`` "
+#~ "randomly based on corresponding ``prob``. "
+#~ "If ``prob`` is ``None``, this is "
+#~ "reduced to kraus channel language."
#~ msgstr ""
-#~ msgid "Shortcuts for measurement patterns on circuit"
+#~ msgid "The density matrix simulator based on tensornetwork engine."
#~ msgstr ""
-#~ msgid "Some common graphs and lattices"
+#~ msgid "Number of qubits"
#~ msgstr ""
-#~ msgid "1D chain with ``n`` sites"
+#~ msgid "if True, nothing initialized, only for internal use, defaults to False"
#~ msgstr ""
-#~ msgid ""
-#~ "This measurements pattern is specifically "
-#~ "suitable for vmap. Parameterize the "
-#~ "Pauli string to be measured."
+#~ msgid "the state input for the circuit, defaults to None"
#~ msgstr ""
-#~ msgid ""
-#~ "parameter tensors determines what Pauli "
-#~ "string to be measured, shape is "
-#~ "[nwires, 4] if onehot is False."
+#~ msgid "the density matrix input for the circuit, defaults to None"
#~ msgstr ""
#~ msgid ""
-#~ "[description], defaults to False. If set"
-#~ " to be True, structures will first"
-#~ " go through onehot procedure."
-#~ msgstr ""
-
-#~ msgid "COO_sparse_matrix"
+#~ "Apply amplitudedamping quantum channel on "
+#~ "the circuit. See "
+#~ ":py:meth:`tensorcircuit.channels.amplitudedampingchannel`"
#~ msgstr ""
-#~ msgid "a real and scalar tensor of shape []"
+#~ msgid "Parameters for the channel."
#~ msgstr ""
-#~ msgid "Helper functions"
+#~ msgid "Return the output density matrix of the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "Return a callable function for output"
-#~ " ith parts of the original output "
-#~ "along the first axis. Original output"
-#~ " supports List and Tensor."
-#~ msgstr ""
-
-#~ msgid "The function to be applied this method"
+#~ "check whether the final return is "
+#~ "a legal density matrix, defaults to "
+#~ "False"
#~ msgstr ""
-#~ msgid "The ith parts of original output along the first axis (axis=0 or dim=0)"
+#~ msgid "whether to reuse previous results, defaults to True"
#~ msgstr ""
-#~ msgid "The modified callable function"
+#~ msgid "The output densitymatrix in 2D shape tensor form"
#~ msgstr ""
-#~ msgid "Visualization on circuits"
+#~ msgid ""
+#~ "Apply depolarizing quantum channel on "
+#~ "the circuit. See "
+#~ ":py:meth:`tensorcircuit.channels.depolarizingchannel`"
#~ msgstr ""
-#~ msgid "# TODO(@YHPeter): add examples"
+#~ msgid ""
+#~ "Apply phasedamping quantum channel on "
+#~ "the circuit. See "
+#~ ":py:meth:`tensorcircuit.channels.phasedampingchannel`"
#~ msgstr ""
#~ msgid ""
-#~ "Generate the PDF file with given "
-#~ "latex string and filename. Latex command"
-#~ " and file path can be specified. "
-#~ "When notebook is True, convert the "
-#~ "output PDF file to image and "
-#~ "return a Image object."
+#~ "Apply reset quantum channel on the "
+#~ "circuit. See "
+#~ ":py:meth:`tensorcircuit.channels.resetchannel`"
#~ msgstr ""
-#~ msgid "String of latex content"
+#~ msgid "Generate tensorflow sparse matrix from Pauli string sum"
#~ msgstr ""
-#~ msgid "File name, defaults to random UUID `str(uuid4())`"
+#~ msgid ""
+#~ "1D Tensor representing for a Pauli "
+#~ "string, e.g. [1, 0, 0, 3, 2] "
+#~ "is for :math:`X_0Z_3Y_4`"
#~ msgstr ""
-#~ msgid "Executable Latex command, defaults to `pdflatex`"
+#~ msgid ""
+#~ "the weight for the Pauli string "
+#~ "defaults to None (all Pauli strings "
+#~ "weight 1.0)"
#~ msgstr ""
-#~ msgid "File path, defaults to current working place `os.getcwd()`"
+#~ msgid "the tensorflow sparse matrix"
#~ msgstr ""
-#~ msgid "if notebook is True, return `Image` object; otherwise return `None`"
+#~ msgid ""
+#~ "2D Tensor, each row is for a "
+#~ "Pauli string, e.g. [1, 0, 0, 3,"
+#~ " 2] is for :math:`X_0Z_3Y_4`"
#~ msgstr ""
-#~ msgid "_summary_"
+#~ msgid ""
+#~ "1D Tensor, each element corresponds the"
+#~ " weight for each Pauli string "
+#~ "defaults to None (all Pauli strings "
+#~ "weight 1.0)"
#~ msgstr ""
-#~ msgid "_description_"
+#~ msgid "the tensorflow coo sparse matrix"
#~ msgstr ""
-#~ msgid "[description], default is None."
+#~ msgid "Generate scipy sparse matrix from Pauli string sum"
#~ msgstr ""
-#~ msgid "_description_, default is (1, -1)."
+#~ msgid "the scipy coo sparse matrix"
#~ msgstr ""
-#~ msgid "Apply fredkin gate on the circuit."
+#~ msgid "Generate tensorflow dense matrix from Pauli string sum"
#~ msgstr ""
-#~ msgid "Apply orx gate with parameters on the circuit."
+#~ msgid "the tensorflow dense matrix"
#~ msgstr ""
-#~ msgid "Apply ory gate with parameters on the circuit."
+#~ msgid ""
+#~ "The projector of the operator. The "
+#~ "operator, as a linear operator, on "
+#~ "the adjoint of the operator."
#~ msgstr ""
-#~ msgid "Apply orz gate with parameters on the circuit."
+#~ msgid ""
+#~ "Set :math:`A` is the operator in "
+#~ "matrix form, then the projector of "
+#~ "operator is defined as: :math:`A^\\dagger "
+#~ "A`"
#~ msgstr ""
-#~ msgid "Apply ox gate on the circuit."
+#~ msgid "The projector of the operator."
#~ msgstr ""
-#~ msgid "Apply oy gate on the circuit."
+#~ msgid "The reduced density of the operator."
#~ msgstr ""
-#~ msgid "Apply oz gate on the circuit."
+#~ msgid ""
+#~ "Set :math:`A` is the matrix of the"
+#~ " operator, then the reduced density "
+#~ "is defined as:"
#~ msgstr ""
-#~ msgid ""
-#~ "Random tensor between 0 or 1, "
-#~ "defaults to be None, the random "
-#~ "number will be generated automatically"
+#~ msgid "\\mathrm{Tr}_{subsystems}(A^\\dagger A)"
#~ msgstr ""
-#~ msgid "The str indicating the form of the output wavefunction."
+#~ msgid ""
+#~ "Firstly, take the projector of the "
+#~ "operator, then trace out the subsystems"
+#~ " to trace out are supplied as "
+#~ "indices, so that dangling edges are "
+#~ "connected to each other as: "
+#~ "`out_edges[i] ^ in_edges[i] for i in "
+#~ "subsystems_to_trace_out` This does not modify"
+#~ " the original network. The original "
+#~ "ordering of the remaining subsystems is"
+#~ " maintained."
#~ msgstr ""
#~ msgid ""
-#~ "A collection of useful function snippets"
-#~ " that irrelevant with the main "
-#~ "modules or await for further refactor"
+#~ "The QuOperator of the reduced density"
+#~ " of the operator with given "
+#~ "subsystems."
#~ msgstr ""
-#~ msgid "VQNHE application"
+#~ msgid ""
+#~ "Contracts the tensor network in place"
+#~ " and returns the final tensor in "
+#~ "two dimentional matrix. The default "
+#~ "ordering for the axes of the final"
+#~ " tensor is: (:math:`\\prod` dimension of"
+#~ " out_edges, :math:`\\prod` dimension of "
+#~ "in_edges)"
#~ msgstr ""
-#~ msgid "Apply **ANY** gate with parameters on the circuit."
+#~ msgid "The two-dimentional tensor representing the operator."
#~ msgstr ""
-#~ msgid "Apply **CR** gate with parameters on the circuit."
+#~ msgid ""
+#~ "Returns a bool indicating if QuOperator"
+#~ " is an adjoint vector. Examples can"
+#~ " be found in the `QuOperator.from_tensor`."
#~ msgstr ""
-#~ msgid "Apply **CRX** gate with parameters on the circuit."
+#~ msgid ""
+#~ "Returns a bool indicating if QuOperator"
+#~ " is a scalar. Examples can be "
+#~ "found in the `QuOperator.from_tensor`."
#~ msgstr ""
-#~ msgid "Apply **CRY** gate with parameters on the circuit."
+#~ msgid ""
+#~ "Returns a bool indicating if QuOperator"
+#~ " is a vector. Examples can be "
+#~ "found in the `QuOperator.from_tensor`."
#~ msgstr ""
-#~ msgid "Apply **CRZ** gate with parameters on the circuit."
+#~ msgid ""
+#~ "Tensor product with another operator. "
+#~ "Given two operators `A` and `B`, "
+#~ "produces a new operator `AB` "
+#~ "representing :math:`A ⊗ B`. The "
+#~ "`out_edges` (`in_edges`) of `AB` is "
+#~ "simply the concatenation of the "
+#~ "`out_edges` (`in_edges`) of `A.copy()` with"
+#~ " that of `B.copy()`: `new_out_edges = "
+#~ "[*out_edges_A_copy, *out_edges_B_copy]` `new_in_edges "
+#~ "= [*in_edges_A_copy, *in_edges_B_copy]`"
#~ msgstr ""
-#~ msgid "Apply **EXP** gate with parameters on the circuit."
+#~ msgid ""
+#~ "Set :math:`A` is the operator in "
+#~ "matrix form, then the projector of "
+#~ "operator is defined as: :math:`A "
+#~ "A^\\dagger`"
#~ msgstr ""
-#~ msgid "Apply **EXP1** gate with parameters on the circuit."
+#~ msgid "\\mathrm{Tr}_{subsystems}(A A^\\dagger)"
#~ msgstr ""
-#~ msgid "Apply **FREDKIN** gate on the circuit."
+#~ msgid "Compute the double state of the given Hamiltonian operator ``h``."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
-#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 1.+0.j &"
-#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 1.+0.j "
-#~ "\\end{bmatrix}"
+#~ msgid "Hamiltonian operator in form of Tensor."
#~ msgstr ""
-#~ msgid "Apply **ISWAP** gate on the circuit."
+#~ msgid "The double state of ``h`` with the given ``beta``."
#~ msgstr ""
-#~ msgid "Apply **ORX** gate with parameters on the circuit."
+#~ msgid "Return fidelity scalar between two states rho and rho0."
#~ msgstr ""
-#~ msgid "Apply **ORY** gate with parameters on the circuit."
+#~ msgid "\\operatorname{Tr}(\\sqrt{\\sqrt{rho} rho_0 \\sqrt{rho}})"
#~ msgstr ""
-#~ msgid "Apply **ORZ** gate with parameters on the circuit."
+#~ msgid "The density matrix in form of Tensor."
#~ msgstr ""
-#~ msgid "Apply **OX** gate on the circuit."
+#~ msgid "The sqrtm of a Hermitian matrix ``a``."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 0.+0.j "
-#~ "& 1.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "1.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ msgid "Compute the Gibbs state of the given Hamiltonian operator ``h``."
#~ msgstr ""
-#~ msgid "Apply **OY** gate on the circuit."
+#~ msgid "The Gibbs state of ``h`` with the given ``beta``."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 0.+0.j "
-#~ "& 0.-1.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+1.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ msgid "Mutual information between AB subsystem described by ``cut``."
#~ msgstr ""
-#~ msgid "Apply **OZ** gate on the circuit."
+#~ msgid "The AB subsystem."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number than the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & -1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ msgid "The mutual information between AB subsystem described by ``cut``."
#~ msgstr ""
-#~ msgid "Apply **R** gate with parameters on the circuit."
+#~ msgid "Taylor expansion of :math:`ln(x+1)`."
#~ msgstr ""
-#~ msgid "Apply **RX** gate with parameters on the circuit."
+#~ msgid "The :math:`k` th order, default is 2."
#~ msgstr ""
-#~ msgid "Apply **RY** gate with parameters on the circuit."
+#~ msgid "The :math:`k` th order of Taylor expansion of :math:`ln(x+1)`."
#~ msgstr ""
-#~ msgid "Apply **RZ** gate with parameters on the circuit."
+#~ msgid ""
+#~ "Compute the trace distance between two"
+#~ " density matrix ``rho`` and ``rho2``."
#~ msgstr ""
-#~ msgid "Apply **SD** gate on the circuit."
+#~ msgid "Epsilon, defaults to 1e-12"
#~ msgstr ""
-#~ msgid "Apply **TD** gate on the circuit."
+#~ msgid "The trace distance between two density matrix ``rho`` and ``rho2``."
#~ msgstr ""
-#~ msgid "Apply **TOFFOLI** gate on the circuit."
+#~ msgid "\\operatorname{Tr}(\\prod_i O_i)"
#~ msgstr ""
#~ msgid ""
-#~ "Apply unitary gates in ``kraus`` "
-#~ "randomly based on corresponding ``prob``."
+#~ "Compute the truncated free energy from"
+#~ " the given density matrix ``rho``."
#~ msgstr ""
-#~ msgid "Get the eigenvalues of matrix ``a``."
+#~ msgid "The :math:`k` th order, defaults to 2"
#~ msgstr ""
-#~ msgid "eigenvalues of ``a``"
+#~ msgid "The :math:`k` th order of the truncated free energy."
#~ msgstr ""
#~ msgid ""
-#~ "Apply **ANY** gate with parameters on"
-#~ " the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.any_gate`."
+#~ "The circuit ansatz is firstly one "
+#~ "layer of Hadamard gates and then "
+#~ "we have ``nlayers`` blocks of "
+#~ ":math:`e^{i\\theta Z_iZ_{i+1}}` two-qubit gate"
+#~ " in ladder layout, following rx gate."
#~ msgstr ""
-#~ msgid "Qubit number that the gate applies on."
+#~ msgid "The circuit"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **CNOT** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.cnot_gate`."
+#~ msgid "paramter tensor with 2*nlayer*n elements"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 1.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 1.+0.j & 0.+0.j \\end{bmatrix}"
+#~ msgid "number of ZZ+RX blocks, defaults to 2"
#~ msgstr ""
-#~ msgid "Qubit number that the gate applies on. The matrix for the gate is"
+#~ msgid ""
+#~ "whether use SVD split to reduce ZZ"
+#~ " gate bond dimension, defaults to "
+#~ "False"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **CR** gate with parameters on "
-#~ "the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.cr_gate`."
+#~ msgid "The circuit with example ansatz attached"
#~ msgstr ""
#~ msgid ""
-#~ "Apply **CRX** gate with parameters on"
-#~ " the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.crx_gate`."
+#~ "Function decorator wraps the function "
+#~ "with the first input and output in"
+#~ " the format of circuit, the wrapped"
+#~ " function has the first input and "
+#~ "the output as the state tensor."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **CRY** gate with parameters on"
-#~ " the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.cry_gate`."
+#~ msgid "Function with the fist input and the output as ``Circuit`` object."
#~ msgstr ""
#~ msgid ""
-#~ "Apply **CRZ** gate with parameters on"
-#~ " the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.crz_gate`."
+#~ "Wrapped function with the first input"
+#~ " and the output as the state "
+#~ "tensor correspondingly."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **CY** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.cy_gate`."
+#~ msgid "Two-dimensional grid lattice"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.-1.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+1.j & 0.+0.j \\end{bmatrix}"
+#~ msgid "number of rows"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **CZ** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.cz_gate`."
+#~ msgid "number of cols"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & -1.+0.j \\end{bmatrix}"
+#~ msgid "return all col edge with 1d index encoding"
#~ msgstr ""
#~ msgid ""
-#~ "Apply **EXP** gate with parameters on"
-#~ " the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.exp_gate`."
+#~ "whether to include pbc edges (periodic"
+#~ " boundary condition), defaults to False"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **EXP1** gate with parameters on"
-#~ " the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.exp1_gate`."
+#~ msgid "list of col edge"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **FREDKIN** gate on the circuit."
-#~ " See :py:meth:`tensorcircuit.gates.fredkin_gate`."
+#~ msgid "return all row edge with 1d index encoding"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
-#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 1.+0.j &"
-#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 1.+0.j "
-#~ "\\end{bmatrix}"
+#~ msgid "list of row edge"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **H** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.h_gate`."
+#~ msgid "Get the 2D grid lattice in ``nx.Graph`` format"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} "
-#~ "0.70710677+0.j & 0.70710677+0.j\\\\ "
-#~ "0.70710677+0.j & -0.70710677+0.j \\end{bmatrix}"
+#~ "whether to include pbc edges (periodic"
+#~ " boundary condition), defaults to True"
#~ msgstr ""
#~ msgid ""
-#~ "Apply **I** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.i_gate`."
+#~ "Generate a permutation matrix P. Due "
+#~ "to the different convention or qubits'"
+#~ " order in qiskit and tensorcircuit, "
+#~ "the unitary represented by the same "
+#~ "circuit is different. They are related"
+#~ " by this permutation matrix P: P "
+#~ "@ U_qiskit @ P = U_tc"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j\\\\ 0.+0.j & 1.+0.j "
-#~ "\\end{bmatrix}"
+#~ msgid "# of qubits"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **ISWAP** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.iswap_gate`."
+#~ msgid "The permutation matrix P"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 0.+0.j & 0.+1.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+1.j & 0.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ "Generate a qiskit quantum circuit using"
+#~ " the quantum intermediate representation "
+#~ "(qir) in tensorcircuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **ORX** gate with parameters on"
-#~ " the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.orx_gate`."
+#~ msgid "qiskit QuantumCircuit object"
#~ msgstr ""
#~ msgid ""
-#~ "Apply **ORY** gate with parameters on"
-#~ " the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.ory_gate`."
+#~ "Generate a tensorcircuit circuit using "
+#~ "the quantum circuit data in qiskit."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **ORZ** gate with parameters on"
-#~ " the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.orz_gate`."
+#~ msgid "Quantum circuit data from qiskit."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **OX** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.ox_gate`."
+#~ msgid "Input state of the circuit. Default is None."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 0.+0.j "
-#~ "& 1.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "1.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ msgid "A quantum circuit in tensorcircuit"
#~ msgstr ""
#~ msgid ""
-#~ "Apply **OY** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.oy_gate`."
+#~ "Translating from the gate name to "
+#~ "gate information including the number of"
+#~ " control qubits and the reduced gate"
+#~ " name."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 0.+0.j "
-#~ "& 0.-1.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+1.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ msgid "String of gate name"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **OZ** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.oz_gate`."
+#~ msgid "# of control qubits, reduced gate name"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & -1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ "Generate Tex code from 'qir' string "
+#~ "to illustrate the circuit structure. "
+#~ "This visualization is based on quantikz"
+#~ " package."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **R** gate with parameters on "
-#~ "the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.r_gate`."
+#~ msgid "The quantum intermediate representation of a circuit in tensorcircuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **RX** gate with parameters on "
-#~ "the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.rx_gate`."
+#~ msgid "Initial state, default is an all zero state '000...000'."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **RY** gate with parameters on "
-#~ "the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.ry_gate`."
+#~ msgid "Measurement Basis, default is None which means no"
#~ msgstr ""
#~ msgid ""
-#~ "Apply **RZ** gate with parameters on "
-#~ "the circuit. See "
-#~ ":py:meth:`tensorcircuit.gates.rz_gate`."
+#~ "measurement in the end of the "
+#~ "circuit. :type measure: Optional[List[str]] "
+#~ ":param rcompress: If true, a right "
+#~ "compression of the circuit will be "
+#~ "conducted. A right compression means we"
+#~ " will try to shift gates from "
+#~ "right to left if possible. Default "
+#~ "is false. :type rcompress: bool :param"
+#~ " lcompress: If true, a left "
+#~ "compression of the circuit will be "
+#~ "conducted. A left compression means we"
+#~ " will try to shift gates from "
+#~ "left to right if possible. Default "
+#~ "is false. :type lcompress: bool :param"
+#~ " standalone: If true, the tex code"
+#~ " will be designed to generate a "
+#~ "standalone document. Default is false "
+#~ "which means the generated tex code "
+#~ "is just a quantikz code block. "
+#~ ":type standalone: bool :param "
+#~ "return_string_table: If true, a string "
+#~ "table of tex code will also be "
+#~ "returned. Default is false. :type "
+#~ "return_string_table: bool :return: Tex code"
+#~ " of circuit visualization based on "
+#~ "quantikz package. If return_string_table is"
+#~ " true, a string table of tex "
+#~ "code will also be returned. :rtype: "
+#~ "Union[str, Tuple[str, List[List[str]]]]"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **S** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.s_gate`."
+#~ msgid ":math:`ket`."
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j\\\\ 0.+0.j & 0.+1.j "
-#~ "\\end{bmatrix}"
+#~ "Get Pauli string array and weights "
+#~ "array for a qubit Hamiltonian as a"
+#~ " sum of Pauli strings defined in "
+#~ "openfermion QubitOperator."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **SD** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.sd_gate`."
+#~ msgid "Apply mpo gate in MPO format on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j\\\\ 0.+0.j & 0.-1.j "
-#~ "\\end{bmatrix}"
+#~ msgid "Apply multicontrol gate in MPO format on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **SWAP** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.swap_gate`."
+#~ msgid "Returns the amplitude of the circuit given the bitstring l."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j\\\\ "
-#~ "0.+0.j & 0.+0.j & 1.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j \\end{bmatrix}"
+#~ msgid "Apply the gate to two bits with given indexes."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **T** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.t_gate`."
+#~ msgid "The Gate applied on bits."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1. &"
-#~ " +0.j & 0. & +0.j\\\\ 0. &"
-#~ " +0.j & 0.70710677+0.70710677j \\end{bmatrix}"
+#~ msgid "The index of the bit to apply the Gate."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **TD** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.td_gate`."
+#~ msgid "Apply the gate to the bit with the given index."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1. &"
-#~ " +0.j & 0. & +0.j\\\\ 0. &"
-#~ " +0.j & 0.70710677-0.70710677j \\end{bmatrix}"
+#~ msgid "The Gate applied on the bit."
#~ msgstr ""
#~ msgid ""
-#~ "Apply **TOFFOLI** gate on the circuit."
-#~ " See :py:meth:`tensorcircuit.gates.toffoli_gate`."
+#~ "Return the list of nodes that "
+#~ "consititues the expectation value just "
+#~ "before the contraction."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
-#~ "0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
-#~ " 0.+0.j & 0.+0.j & 0.+0.j & "
-#~ "1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j\\\\ 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 0.+0.j & 1.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j & 0.+0.j & "
-#~ "0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 0.+0.j & 0.+0.j &"
-#~ " 0.+0.j & 1.+0.j\\\\ 0.+0.j & "
-#~ "0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j"
-#~ " & 0.+0.j & 1.+0.j & 0.+0.j "
-#~ "\\end{bmatrix}"
+#~ msgid "whether contract the output state firstly, defaults to True"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **WROOT** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.wroot_gate`."
+#~ msgid "The tensor network for the expectation"
#~ msgstr ""
#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} "
-#~ "0.70710677+0.j & -0.5 & -0.5j\\\\ 0.5"
-#~ " & -0.5j & 0.70710677+0.j \\end{bmatrix}"
+#~ "if true, we sample from the final"
+#~ " state if memory allsows, True is "
+#~ "prefered, defaults to False"
#~ msgstr ""
#~ msgid ""
-#~ "Apply **X** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.x_gate`."
+#~ "List (if batch) of tuple (binary "
+#~ "configuration tensor and correponding "
+#~ "probability)"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 0.+0.j "
-#~ "& 1.+0.j\\\\ 1.+0.j & 0.+0.j "
-#~ "\\end{bmatrix}"
+#~ msgid "Sampling bistrings from the circuit output based on quantum amplitudes."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **Y** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.y_gate`."
+#~ msgid "tensorcircuit.densitymatrix2"
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 0.+0.j "
-#~ "& 0.-1.j\\\\ 0.+1.j & 0.+0.j "
-#~ "\\end{bmatrix}"
+#~ msgid "Apply **CNOT** gate on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply **Z** gate on the circuit. "
-#~ "See :py:meth:`tensorcircuit.gates.z_gate`."
+#~ msgid "Apply **CY** gate on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Qubit number that the gate applies "
-#~ "on. The matrix for the gate is"
-#~ " .. math:: \\begin{bmatrix} 1.+0.j "
-#~ "& 0.+0.j\\\\ 0.+0.j & -1.+0.j "
-#~ "\\end{bmatrix}"
+#~ msgid "Apply **CZ** gate on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply unitary gates in ``kraus`` "
-#~ "randomly based on corresponding ``prob``. "
-#~ "If ``prob`` is ``None``, this is "
-#~ "reduced to kraus channel language."
+#~ msgid "Apply **H** gate on the circuit."
#~ msgstr ""
-#~ msgid "The density matrix simulator based on tensornetwork engine."
+#~ msgid "Apply **I** gate on the circuit."
#~ msgstr ""
-#~ msgid "Number of qubits"
+#~ msgid "Apply **S** gate on the circuit."
#~ msgstr ""
-#~ msgid "if True, nothing initialized, only for internal use, defaults to False"
+#~ msgid "Apply **SWAP** gate on the circuit."
#~ msgstr ""
-#~ msgid "the state input for the circuit, defaults to None"
+#~ msgid "Apply **T** gate on the circuit."
#~ msgstr ""
-#~ msgid "the density matrix input for the circuit, defaults to None"
+#~ msgid "Apply **WROOT** gate on the circuit."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply amplitudedamping quantum channel on "
-#~ "the circuit. See "
-#~ ":py:meth:`tensorcircuit.channels.amplitudedampingchannel`"
+#~ msgid "Apply **X** gate on the circuit."
#~ msgstr ""
-#~ msgid "Parameters for the channel."
+#~ msgid "Apply **Y** gate on the circuit."
#~ msgstr ""
-#~ msgid "Return the output density matrix of the circuit."
+#~ msgid "Apply **Z** gate on the circuit."
#~ msgstr ""
#~ msgid ""
-#~ "check whether the final return is "
-#~ "a legal density matrix, defaults to "
-#~ "False"
+#~ "Compute :math:`\\prod_{i\\in \\text{index}} s_i`,"
+#~ " where the probability for each "
+#~ "bitstring is given as a vector "
+#~ "``results``."
#~ msgstr ""
-#~ msgid "whether to reuse previous results, defaults to True"
+#~ msgid "Generate sparse tensor from Pauli string sum"
#~ msgstr ""
-#~ msgid "The output densitymatrix in 2D shape tensor form"
+#~ msgid "Generate dense matrix from Pauli string sum"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply depolarizing quantum channel on "
-#~ "the circuit. See "
-#~ ":py:meth:`tensorcircuit.channels.depolarizingchannel`"
+#~ msgid "Generate Heisenberg Hamiltonian with possible external fields."
#~ msgstr ""
-#~ msgid ""
-#~ "Apply phasedamping quantum channel on "
-#~ "the circuit. See "
-#~ ":py:meth:`tensorcircuit.channels.phasedampingchannel`"
+#~ msgid "calibration qubit list"
#~ msgstr ""
-#~ msgid ""
-#~ "Apply reset quantum channel on the "
-#~ "circuit. See "
-#~ ":py:meth:`tensorcircuit.channels.resetchannel`"
+#~ msgid "tensorcircuit.cloud"
#~ msgstr ""
-#~ msgid "Generate tensorflow sparse matrix from Pauli string sum"
+#~ msgid "tensorcircuit.cloud.config"
#~ msgstr ""
-#~ msgid ""
-#~ "1D Tensor representing for a Pauli "
-#~ "string, e.g. [1, 0, 0, 3, 2] "
-#~ "is for :math:`X_0Z_3Y_4`"
+#~ msgid "gate name list to be counted, defaults to None (counting all gates)"
#~ msgstr ""
#~ msgid ""
-#~ "the weight for the Pauli string "
-#~ "defaults to None (all Pauli strings "
-#~ "weight 1.0)"
+#~ "Bases: "
+#~ ":py:class:`tensornetwork.backends.jax.jax_backend.JaxBackend`, "
+#~ ":py:class:`tensorcircuit.backends.abstract_backend.ExtendedBackend`"
#~ msgstr ""
-#~ msgid "the tensorflow sparse matrix"
+#~ msgid ""
+#~ "Bases: "
+#~ ":py:class:`tensornetwork.backends.numpy.numpy_backend.NumPyBackend`,"
+#~ " "
+#~ ":py:class:`tensorcircuit.backends.abstract_backend.ExtendedBackend`"
#~ msgstr ""
#~ msgid ""
-#~ "2D Tensor, each row is for a "
-#~ "Pauli string, e.g. [1, 0, 0, 3,"
-#~ " 2] is for :math:`X_0Z_3Y_4`"
+#~ "Bases: "
+#~ ":py:class:`tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend`,"
+#~ " "
+#~ ":py:class:`tensorcircuit.backends.abstract_backend.ExtendedBackend`"
#~ msgstr ""
#~ msgid ""
-#~ "1D Tensor, each element corresponds the"
-#~ " weight for each Pauli string "
-#~ "defaults to None (all Pauli strings "
-#~ "weight 1.0)"
+#~ "Bases: "
+#~ ":py:class:`tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend`,"
+#~ " "
+#~ ":py:class:`tensorcircuit.backends.abstract_backend.ExtendedBackend`"
#~ msgstr ""
-#~ msgid "the tensorflow coo sparse matrix"
+#~ msgid "Bases: :py:class:`tensorcircuit.abstractcircuit.AbstractCircuit`"
#~ msgstr ""
-#~ msgid "Generate scipy sparse matrix from Pauli string sum"
+#~ msgid "Return a Depolarizing Channel for 1 qubit or 2 qubits"
#~ msgstr ""
-#~ msgid "the scipy coo sparse matrix"
+#~ msgid "number of qubits, 1 and 2 are avaliable, defaults 1"
#~ msgstr ""
-#~ msgid "Generate tensorflow dense matrix from Pauli string sum"
+#~ msgid "Bases: :py:class:`tensorcircuit.basecircuit.BaseCircuit`"
#~ msgstr ""
-#~ msgid "the tensorflow dense matrix"
+#~ msgid "Bases: :py:class:`torch.nn.modules.module.Module`"
#~ msgstr ""
#~ msgid ""
-#~ "The projector of the operator. The "
-#~ "operator, as a linear operator, on "
-#~ "the adjoint of the operator."
+#~ "This allows better BC support for "
+#~ ":meth:`load_state_dict`. In :meth:`state_dict`, the"
+#~ " version number will be saved as "
+#~ "in the attribute `_metadata` of the "
+#~ "returned state dict, and thus pickled."
+#~ " `_metadata` is a dictionary with "
+#~ "keys that follow the naming convention"
+#~ " of state dict. See "
+#~ "``_load_from_state_dict`` on how to use "
+#~ "this information in loading."
#~ msgstr ""
#~ msgid ""
-#~ "Set :math:`A` is the operator in "
-#~ "matrix form, then the projector of "
-#~ "operator is defined as: :math:`A^\\dagger "
-#~ "A`"
+#~ "If new parameters/buffers are added/removed"
+#~ " from a module, this number shall "
+#~ "be bumped, and the module's "
+#~ "`_load_from_state_dict` method can compare the"
+#~ " version number and do appropriate "
+#~ "changes if the state dict is from"
+#~ " before the change."
#~ msgstr ""
-#~ msgid "The projector of the operator."
+#~ msgid ""
+#~ "Note that extra state should be "
+#~ "pickleable to ensure working serialization "
+#~ "of the state_dict. We only provide "
+#~ "provide backwards compatibility guarantees for"
+#~ " serializing Tensors; other objects may "
+#~ "break backwards compatibility if their "
+#~ "serialized pickled form changes."
#~ msgstr ""
-#~ msgid "The reduced density of the operator."
+#~ msgid "*(string, torch.Tensor)* -- Tuple containing the name and buffer"
#~ msgstr ""
-#~ msgid ""
-#~ "Set :math:`A` is the matrix of the"
-#~ " operator, then the reduced density "
-#~ "is defined as:"
+#~ msgid "*(string, Module)* -- Tuple containing a name and child module"
#~ msgstr ""
-#~ msgid "\\mathrm{Tr}_{subsystems}(A^\\dagger A)"
+#~ msgid "*(string, Module)* -- Tuple of name and module"
#~ msgstr ""
-
-#~ msgid ""
-#~ "Firstly, take the projector of the "
-#~ "operator, then trace out the subsystems"
-#~ " to trace out are supplied as "
-#~ "indices, so that dangling edges are "
-#~ "connected to each other as: "
-#~ "`out_edges[i] ^ in_edges[i] for i in "
-#~ "subsystems_to_trace_out` This does not modify"
-#~ " the original network. The original "
-#~ "ordering of the remaining subsystems is"
-#~ " maintained."
+
+#~ msgid "*(string, Parameter)* -- Tuple containing the name and parameter"
#~ msgstr ""
#~ msgid ""
-#~ "The QuOperator of the reduced density"
-#~ " of the operator with given "
-#~ "subsystems."
+#~ "The hook will be called every time"
+#~ " after :func:`forward` has computed an "
+#~ "output. It should have the following "
+#~ "signature::"
#~ msgstr ""
#~ msgid ""
-#~ "Contracts the tensor network in place"
-#~ " and returns the final tensor in "
-#~ "two dimentional matrix. The default "
-#~ "ordering for the axes of the final"
-#~ " tensor is: (:math:`\\prod` dimension of"
-#~ " out_edges, :math:`\\prod` dimension of "
-#~ "in_edges)"
+#~ "The input contains only the positional"
+#~ " arguments given to the module. "
+#~ "Keyword arguments won't be passed to "
+#~ "the hooks and only to the "
+#~ "``forward``. The hook can modify the "
+#~ "output. It can modify the input "
+#~ "inplace but it will not have "
+#~ "effect on forward since this is "
+#~ "called after :func:`forward` is called."
#~ msgstr ""
-#~ msgid "The two-dimentional tensor representing the operator."
+#~ msgid ""
+#~ "The hook will be called every time"
+#~ " before :func:`forward` is invoked. It "
+#~ "should have the following signature::"
#~ msgstr ""
#~ msgid ""
-#~ "Returns a bool indicating if QuOperator"
-#~ " is an adjoint vector. Examples can"
-#~ " be found in the `QuOperator.from_tensor`."
+#~ "The input contains only the positional"
+#~ " arguments given to the module. "
+#~ "Keyword arguments won't be passed to "
+#~ "the hooks and only to the "
+#~ "``forward``. The hook can modify the "
+#~ "input. User can either return a "
+#~ "tuple or a single modified value "
+#~ "in the hook. We will wrap the "
+#~ "value into a tuple if a single "
+#~ "value is returned(unless that value is"
+#~ " already a tuple)."
#~ msgstr ""
#~ msgid ""
-#~ "Returns a bool indicating if QuOperator"
-#~ " is a scalar. Examples can be "
-#~ "found in the `QuOperator.from_tensor`."
+#~ "The hook will be called every time"
+#~ " the gradients with respect to module"
+#~ " inputs are computed. The hook should"
+#~ " have the following signature::"
#~ msgstr ""
-#~ msgid ""
-#~ "Returns a bool indicating if QuOperator"
-#~ " is a vector. Examples can be "
-#~ "found in the `QuOperator.from_tensor`."
+#~ msgid "Returns a dictionary containing a whole state of the module."
#~ msgstr ""
#~ msgid ""
-#~ "Tensor product with another operator. "
-#~ "Given two operators `A` and `B`, "
-#~ "produces a new operator `AB` "
-#~ "representing :math:`A ⊗ B`. The "
-#~ "`out_edges` (`in_edges`) of `AB` is "
-#~ "simply the concatenation of the "
-#~ "`out_edges` (`in_edges`) of `A.copy()` with"
-#~ " that of `B.copy()`: `new_out_edges = "
-#~ "[*out_edges_A_copy, *out_edges_B_copy]` `new_in_edges "
-#~ "= [*in_edges_A_copy, *in_edges_B_copy]`"
+#~ "Visualise the circuit. This method "
+#~ "recevies the keywords as same as "
+#~ "qiskit.circuit.QuantumCircuit.draw. More details can"
+#~ " be found here: "
+#~ "https://qiskit.org/documentation/stubs/qiskit.circuit.QuantumCircuit.draw.html."
+#~ msgstr ""
+
+#~ msgid "the corresponding qubit"
+#~ msgstr ""
+
+#~ msgid "Bases: :py:class:`~keras.engine.training.Model`"
#~ msgstr ""
#~ msgid ""
-#~ "Set :math:`A` is the operator in "
-#~ "matrix form, then the projector of "
-#~ "operator is defined as: :math:`A "
-#~ "A^\\dagger`"
+#~ "This method can also be called "
+#~ "directly on a Functional Model during"
+#~ " construction. In this case, any loss"
+#~ " Tensors passed to this Model must"
+#~ " be symbolic and be able to be"
+#~ " traced back to the model's `Input`s."
+#~ " These losses become part of the "
+#~ "model's topology and are tracked in "
+#~ "`get_config`."
#~ msgstr ""
-#~ msgid "\\mathrm{Tr}_{subsystems}(A A^\\dagger)"
+#~ msgid ""
+#~ "Additional keyword arguments for backward "
+#~ "compatibility. Accepted values: inputs - "
+#~ "Deprecated, will be automatically inferred."
#~ msgstr ""
-#~ msgid "Compute the double state of the given Hamiltonian operator ``h``."
+#~ msgid ""
+#~ "Additional keyword arguments for backward "
+#~ "compatibility. Accepted values:"
#~ msgstr ""
-#~ msgid "Hamiltonian operator in form of Tensor."
+#~ msgid "inputs - Deprecated, will be automatically inferred."
#~ msgstr ""
-#~ msgid "The double state of ``h`` with the given ``beta``."
+#~ msgid "Deprecated, will be automatically inferred."
#~ msgstr ""
-#~ msgid "Return fidelity scalar between two states rho and rho0."
+#~ msgid "Whether to use `ResourceVariable`."
#~ msgstr ""
-#~ msgid "\\operatorname{Tr}(\\sqrt{\\sqrt{rho} rho_0 \\sqrt{rho}})"
+#~ msgid ""
+#~ "When giving unsupported dtype and no "
+#~ "initializer or when trainable has "
+#~ "been set to True with synchronization"
+#~ " set as `ON_READ`."
#~ msgstr ""
-#~ msgid "The density matrix in form of Tensor."
+#~ msgid "This is an alias of `self.__call__`."
#~ msgstr ""
-#~ msgid "The sqrtm of a Hermitian matrix ``a``."
+#~ msgid "Input tensor(s)."
#~ msgstr ""
-#~ msgid "Compute the Gibbs state of the given Hamiltonian operator ``h``."
+#~ msgid "additional positional arguments to be passed to `self.call`."
#~ msgstr ""
-#~ msgid "The Gibbs state of ``h`` with the given ``beta``."
+#~ msgid "additional keyword arguments to be passed to `self.call`."
#~ msgstr ""
-#~ msgid "Mutual information between AB subsystem described by ``cut``."
+#~ msgid "Output tensor(s)."
#~ msgstr ""
-#~ msgid "The AB subsystem."
+#~ msgid ""
+#~ "1. In case of invalid user-"
+#~ "provided data (not of type tuple,"
+#~ " list, `TensorShape`, or dict). "
+#~ "2. If the model requires call "
+#~ "arguments that are agnostic to "
+#~ "the input shapes (positional or keyword"
+#~ " arg in call signature). 3. If"
+#~ " not all layers were properly built."
+#~ " 4. If float type inputs are "
+#~ "not supported within the layers."
#~ msgstr ""
-#~ msgid "The mutual information between AB subsystem described by ``cut``."
+#~ msgid ""
+#~ "In case of invalid user-provided "
+#~ "data (not of type tuple, list,"
+#~ " `TensorShape`, or dict). 2. If "
+#~ "the model requires call arguments that"
+#~ " are agnostic to the input "
+#~ "shapes (positional or keyword arg in "
+#~ "call signature). 3. If not all "
+#~ "layers were properly built. 4. If"
+#~ " float type inputs are not supported"
+#~ " within the layers."
#~ msgstr ""
-#~ msgid "Taylor expansion of :math:`ln(x+1)`."
+#~ msgid ""
+#~ "A mask or list of masks. A "
+#~ "mask can be either a boolean "
+#~ "tensor or None (no mask). For more"
+#~ " details, check the guide "
+#~ "[here](https://www.tensorflow.org/guide/keras/masking_and_padding)."
#~ msgstr ""
-#~ msgid "The :math:`k` th order, default is 2."
+#~ msgid ""
+#~ "A mask or list of masks. A "
+#~ "mask can be either a boolean "
+#~ "tensor or None (no mask). For more"
+#~ " details, check the guide"
#~ msgstr ""
-#~ msgid "The :math:`k` th order of Taylor expansion of :math:`ln(x+1)`."
+#~ msgid ""
+#~ "Loss function. Maybe be a string "
+#~ "(name of loss function), or a "
+#~ "`tf.keras.losses.Loss` instance. See "
+#~ "`tf.keras.losses`. A loss function is "
+#~ "any callable with the signature `loss"
+#~ " = fn(y_true, y_pred)`, where `y_true` "
+#~ "are the ground truth values, and "
+#~ "`y_pred` are the model's predictions. "
+#~ "`y_true` should have shape `(batch_size, "
+#~ "d0, .. dN)` (except in the case"
+#~ " of sparse loss functions such as "
+#~ "sparse categorical crossentropy which expects"
+#~ " integer arrays of shape `(batch_size, "
+#~ "d0, .. dN-1)`). `y_pred` should have "
+#~ "shape `(batch_size, d0, .. dN)`. The "
+#~ "loss function should return a float "
+#~ "tensor. If a custom `Loss` instance "
+#~ "is used and reduction is set to"
+#~ " `None`, return value has shape "
+#~ "`(batch_size, d0, .. dN-1)` i.e. per-"
+#~ "sample or per-timestep loss values; "
+#~ "otherwise, it is a scalar. If the"
+#~ " model has multiple outputs, you can"
+#~ " use a different loss on each "
+#~ "output by passing a dictionary or "
+#~ "a list of losses. The loss value"
+#~ " that will be minimized by the "
+#~ "model will then be the sum of "
+#~ "all individual losses, unless `loss_weights`"
+#~ " is specified."
#~ msgstr ""
#~ msgid ""
-#~ "Compute the trace distance between two"
-#~ " density matrix ``rho`` and ``rho2``."
+#~ "List of metrics to be evaluated by"
+#~ " the model during training and "
+#~ "testing. Each of this can be a "
+#~ "string (name of a built-in "
+#~ "function), function or a "
+#~ "`tf.keras.metrics.Metric` instance. See "
+#~ "`tf.keras.metrics`. Typically you will use "
+#~ "`metrics=['accuracy']`. A function is any "
+#~ "callable with the signature `result ="
+#~ " fn(y_true, y_pred)`. To specify different"
+#~ " metrics for different outputs of a"
+#~ " multi-output model, you could also"
+#~ " pass a dictionary, such as "
+#~ "`metrics={'output_a': 'accuracy', 'output_b': "
+#~ "['accuracy', 'mse']}`. You can also pass"
+#~ " a list to specify a metric or"
+#~ " a list of metrics for each "
+#~ "output, such as `metrics=[['accuracy'], "
+#~ "['accuracy', 'mse']]` or `metrics=['accuracy', "
+#~ "['accuracy', 'mse']]`. When you pass the"
+#~ " strings 'accuracy' or 'acc', we "
+#~ "convert this to one of "
+#~ "`tf.keras.metrics.BinaryAccuracy`, "
+#~ "`tf.keras.metrics.CategoricalAccuracy`, "
+#~ "`tf.keras.metrics.SparseCategoricalAccuracy` based on "
+#~ "the loss function used and the "
+#~ "model output shape. We do a "
+#~ "similar conversion for the strings "
+#~ "'crossentropy' and 'ce' as well."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional list or dictionary specifying "
+#~ "scalar coefficients (Python floats) to "
+#~ "weight the loss contributions of "
+#~ "different model outputs. The loss value"
+#~ " that will be minimized by the "
+#~ "model will then be the *weighted "
+#~ "sum* of all individual losses, weighted"
+#~ " by the `loss_weights` coefficients. If"
+#~ " a list, it is expected to have"
+#~ " a 1:1 mapping to the model's "
+#~ "outputs. If a dict, it is expected"
+#~ " to map output names (strings) to"
+#~ " scalar coefficients."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional list or dictionary specifying "
+#~ "scalar coefficients (Python floats) to "
+#~ "weight the loss contributions of "
+#~ "different model outputs. The loss value"
+#~ " that will be minimized by the "
+#~ "model will then be the *weighted "
+#~ "sum* of all individual losses, weighted"
+#~ " by the `loss_weights` coefficients."
+#~ msgstr ""
+
+#~ msgid "If a list, it is expected to have a 1:1 mapping to the model's"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "outputs. If a dict, it is expected"
+#~ " to map output names (strings) to "
+#~ "scalar coefficients."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Bool. Defaults to `False`. If `True`,"
+#~ " this `Model`'s logic will not be "
+#~ "wrapped in a `tf.function`. Recommended "
+#~ "to leave this as `None` unless "
+#~ "your `Model` cannot be run inside "
+#~ "a `tf.function`. `run_eagerly=True` is not "
+#~ "supported when using "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Int. Defaults to 1. The number of"
+#~ " batches to run during each "
+#~ "`tf.function` call. Running multiple batches"
+#~ " inside a single `tf.function` call "
+#~ "can greatly improve performance on TPUs"
+#~ " or small models with a large "
+#~ "Python overhead. At most, one full "
+#~ "epoch will be run each execution. "
+#~ "If a number larger than the size"
+#~ " of the epoch is passed, the "
+#~ "execution will be truncated to the "
+#~ "size of the epoch. Note that if"
+#~ " `steps_per_execution` is set to `N`, "
+#~ "`Callback.on_batch_begin` and `Callback.on_batch_end` "
+#~ "methods will only be called every "
+#~ "`N` batches (i.e. before/after each "
+#~ "`tf.function` execution)."
#~ msgstr ""
-#~ msgid "Epsilon, defaults to 1e-12"
+#~ msgid ""
+#~ "If the layer has not been built,"
+#~ " this method will call `build` on "
+#~ "the layer. This assumes that the "
+#~ "layer will later be used with "
+#~ "inputs that match the input shape "
+#~ "provided here."
#~ msgstr ""
-#~ msgid "The trace distance between two density matrix ``rho`` and ``rho2``."
+#~ msgid ""
+#~ "Shape tuple (tuple of integers) or "
+#~ "list of shape tuples (one per "
+#~ "output tensor of the layer). Shape "
+#~ "tuples can include None for free "
+#~ "dimensions, instead of an integer."
#~ msgstr ""
-#~ msgid "\\operatorname{Tr}(\\prod_i O_i)"
+#~ msgid "An input shape tuple."
#~ msgstr ""
#~ msgid ""
-#~ "Compute the truncated free energy from"
-#~ " the given density matrix ``rho``."
+#~ "Single TensorSpec or nested structure of"
+#~ " TensorSpec objects, describing how the"
+#~ " layer would transform the provided "
+#~ "input."
#~ msgstr ""
-#~ msgid "The :math:`k` th order, defaults to 2"
+#~ msgid "Single TensorSpec or nested structure of TensorSpec objects, describing"
#~ msgstr ""
-#~ msgid "The :math:`k` th order of the truncated free energy."
+#~ msgid "how the layer would transform the provided input."
#~ msgstr ""
#~ msgid ""
-#~ "The circuit ansatz is firstly one "
-#~ "layer of Hadamard gates and then "
-#~ "we have ``nlayers`` blocks of "
-#~ ":math:`e^{i\\theta Z_iZ_{i+1}}` two-qubit gate"
-#~ " in ladder layout, following rx gate."
+#~ "Input data. It could be: - A "
+#~ "Numpy array (or array-like), or a"
+#~ " list of arrays (in case the "
+#~ "model has multiple inputs). - A "
+#~ "TensorFlow tensor, or a list of "
+#~ "tensors (in case the model has "
+#~ "multiple inputs). - A dict mapping "
+#~ "input names to the corresponding "
+#~ "array/tensors, if the model has named"
+#~ " inputs. - A `tf.data` dataset. "
+#~ "Should return a tuple of either "
+#~ "`(inputs, targets)` or `(inputs, targets,"
+#~ " sample_weights)`. - A generator or "
+#~ "`keras.utils.Sequence` returning `(inputs, targets)`"
+#~ " or `(inputs, targets, sample_weights)`. "
+#~ "A more detailed description of unpacking"
+#~ " behavior for iterator types (Dataset, "
+#~ "generator, Sequence) is given in the "
+#~ "`Unpacking behavior for iterator-like "
+#~ "inputs` section of `Model.fit`."
#~ msgstr ""
-#~ msgid "The circuit"
+#~ msgid "0 or 1. Verbosity mode. 0 = silent, 1 = progress bar."
#~ msgstr ""
-#~ msgid "paramter tensor with 2*nlayer*n elements"
+#~ msgid ""
+#~ "Optional Numpy array of weights for "
+#~ "the test samples, used for weighting "
+#~ "the loss function. You can either "
+#~ "pass a flat (1D) Numpy array with"
+#~ " the same length as the input "
+#~ "samples (1:1 mapping between weights "
+#~ "and samples), or in the case of"
+#~ " temporal data, you can pass a"
+#~ " 2D array with shape `(samples, "
+#~ "sequence_length)`, to apply a different "
+#~ "weight to every timestep of every"
+#~ " sample. This argument is not "
+#~ "supported when `x` is a dataset, "
+#~ "instead pass sample weights as the "
+#~ "third element of `x`."
#~ msgstr ""
-#~ msgid "number of ZZ+RX blocks, defaults to 2"
-#~ msgstr ""
+#~ msgid ""
+#~ "List of `keras.callbacks.Callback` instances. "
+#~ "List of callbacks to apply during "
+#~ "evaluation. See "
+#~ "[callbacks](/api_docs/python/tf/keras/callbacks)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "`Model.evaluate` is not yet supported "
+#~ "with `tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
#~ msgid ""
-#~ "whether use SVD split to reduce ZZ"
-#~ " gate bond dimension, defaults to "
-#~ "False"
+#~ "Trains the model for a fixed "
+#~ "number of epochs (iterations on a "
+#~ "dataset)."
#~ msgstr ""
-#~ msgid "The circuit with example ansatz attached"
+#~ msgid ""
+#~ "Input data. It could be: - A "
+#~ "Numpy array (or array-like), or a"
+#~ " list of arrays (in case the "
+#~ "model has multiple inputs). - A "
+#~ "TensorFlow tensor, or a list of "
+#~ "tensors (in case the model has "
+#~ "multiple inputs). - A dict mapping "
+#~ "input names to the corresponding "
+#~ "array/tensors, if the model has named"
+#~ " inputs. - A `tf.data` dataset. "
+#~ "Should return a tuple of either "
+#~ "`(inputs, targets)` or `(inputs, targets,"
+#~ " sample_weights)`. - A generator or "
+#~ "`keras.utils.Sequence` returning `(inputs, targets)`"
+#~ " or `(inputs, targets, sample_weights)`. "
+#~ "- A `tf.keras.utils.experimental.DatasetCreator`, "
+#~ "which wraps a callable that takes "
+#~ "a single argument of type "
+#~ "`tf.distribute.InputContext`, and returns a "
+#~ "`tf.data.Dataset`. `DatasetCreator` should be "
+#~ "used when users prefer to specify "
+#~ "the per-replica batching and sharding"
+#~ " logic for the `Dataset`. See "
+#~ "`tf.keras.utils.experimental.DatasetCreator` doc for "
+#~ "more information. A more detailed "
+#~ "description of unpacking behavior for "
+#~ "iterator types (Dataset, generator, Sequence)"
+#~ " is given below. If using "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`, only "
+#~ "`DatasetCreator` type is supported for "
+#~ "`x`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "A more detailed description of unpacking"
+#~ " behavior for iterator types (Dataset, "
+#~ "generator, Sequence) is given below. If"
+#~ " using `tf.distribute.experimental.ParameterServerStrategy`,"
+#~ " only `DatasetCreator` type is supported"
+#~ " for `x`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "'auto', 0, 1, or 2. Verbosity "
+#~ "mode. 0 = silent, 1 = progress "
+#~ "bar, 2 = one line per epoch. "
+#~ "'auto' defaults to 1 for most "
+#~ "cases, but 2 when used with "
+#~ "`ParameterServerStrategy`. Note that the "
+#~ "progress bar is not particularly useful"
+#~ " when logged to a file, so "
+#~ "verbose=2 is recommended when not "
+#~ "running interactively (eg, in a "
+#~ "production environment)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Float between 0 and 1. Fraction "
+#~ "of the training data to be used"
+#~ " as validation data. The model will"
+#~ " set apart this fraction of the "
+#~ "training data, will not train on "
+#~ "it, and will evaluate the loss "
+#~ "and any model metrics on this "
+#~ "data at the end of each epoch."
+#~ " The validation data is selected "
+#~ "from the last samples in the `x`"
+#~ " and `y` data provided, before "
+#~ "shuffling. This argument is not "
+#~ "supported when `x` is a dataset, "
+#~ "generator or `keras.utils.Sequence` instance. "
+#~ "`validation_split` is not yet supported "
+#~ "with `tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
+
+#~ msgid "Float between 0 and 1."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Fraction of the training data to "
+#~ "be used as validation data. The "
+#~ "model will set apart this fraction "
+#~ "of the training data, will not "
+#~ "train on it, and will evaluate the"
+#~ " loss and any model metrics on "
+#~ "this data at the end of each "
+#~ "epoch. The validation data is selected"
+#~ " from the last samples in the "
+#~ "`x` and `y` data provided, before "
+#~ "shuffling. This argument is not "
+#~ "supported when `x` is a dataset, "
+#~ "generator or"
+#~ msgstr ""
+
+#~ msgid "`keras.utils.Sequence` instance."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "`validation_split` is not yet supported "
+#~ "with `tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Data on which to evaluate the loss"
+#~ " and any model metrics at the "
+#~ "end of each epoch. The model will"
+#~ " not be trained on this data. "
+#~ "Thus, note the fact that the "
+#~ "validation loss of data provided using"
+#~ " `validation_split` or `validation_data` is "
+#~ "not affected by regularization layers "
+#~ "like noise and dropout. `validation_data` "
+#~ "will override `validation_split`. `validation_data`"
+#~ " could be: - A tuple `(x_val, "
+#~ "y_val)` of Numpy arrays or tensors."
+#~ " - A tuple `(x_val, y_val, "
+#~ "val_sample_weights)` of NumPy arrays. - "
+#~ "A `tf.data.Dataset`. - A Python "
+#~ "generator or `keras.utils.Sequence` returning "
+#~ "`(inputs, targets)` or `(inputs, targets, "
+#~ "sample_weights)`. `validation_data` is not yet"
+#~ " supported with "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional dictionary mapping class indices "
+#~ "(integers) to a weight (float) value,"
+#~ " used for weighting the loss function"
+#~ " (during training only). This can be"
+#~ " useful to tell the model to "
+#~ "\"pay more attention\" to samples from"
+#~ " an under-represented class."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional Numpy array of weights for "
+#~ "the training samples, used for weighting"
+#~ " the loss function (during training "
+#~ "only). You can either pass a flat"
+#~ " (1D) Numpy array with the same "
+#~ "length as the input samples (1:1 "
+#~ "mapping between weights and samples), "
+#~ "or in the case of temporal data,"
+#~ " you can pass a 2D array with"
+#~ " shape `(samples, sequence_length)`, to "
+#~ "apply a different weight to every "
+#~ "timestep of every sample. This argument"
+#~ " is not supported when `x` is a"
+#~ " dataset, generator, or `keras.utils.Sequence`"
+#~ " instance, instead provide the "
+#~ "sample_weights as the third element of"
+#~ " `x`."
+#~ msgstr ""
+
+#~ msgid "Optional Numpy array of weights for"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "the training samples, used for weighting"
+#~ " the loss function (during training "
+#~ "only). You can either pass a flat"
+#~ " (1D) Numpy array with the same "
+#~ "length as the input samples (1:1 "
+#~ "mapping between weights and samples), or"
+#~ " in the case of temporal data, "
+#~ "you can pass a 2D array with "
+#~ "shape `(samples, sequence_length)`, to apply"
+#~ " a different weight to every timestep"
+#~ " of every sample. This argument is"
+#~ " not supported when `x` is a "
+#~ "dataset, generator, or"
+#~ msgstr ""
+
+#~ msgid "`keras.utils.Sequence` instance, instead provide the sample_weights"
+#~ msgstr ""
+
+#~ msgid "as the third element of `x`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Integer or `None`. Total number of "
+#~ "steps (batches of samples) before "
+#~ "declaring one epoch finished and "
+#~ "starting the next epoch. When training"
+#~ " with input tensors such as "
+#~ "TensorFlow data tensors, the default "
+#~ "`None` is equal to the number of"
+#~ " samples in your dataset divided by"
+#~ " the batch size, or 1 if that"
+#~ " cannot be determined. If x is "
+#~ "a `tf.data` dataset, and 'steps_per_epoch' "
+#~ "is None, the epoch will run until"
+#~ " the input dataset is exhausted. When"
+#~ " passing an infinitely repeating dataset,"
+#~ " you must specify the `steps_per_epoch` "
+#~ "argument. If `steps_per_epoch=-1` the training"
+#~ " will run indefinitely with an "
+#~ "infinitely repeating dataset. This argument"
+#~ " is not supported with array inputs."
+#~ " When using "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`: * "
+#~ "`steps_per_epoch=None` is not supported."
#~ msgstr ""
#~ msgid ""
-#~ "Function decorator wraps the function "
-#~ "with the first input and output in"
-#~ " the format of circuit, the wrapped"
-#~ " function has the first input and "
-#~ "the output as the state tensor."
+#~ "Integer or `None`. Total number of "
+#~ "steps (batches of samples) before "
+#~ "declaring one epoch finished and "
+#~ "starting the next epoch. When training"
+#~ " with input tensors such as "
+#~ "TensorFlow data tensors, the default "
+#~ "`None` is equal to the number of"
+#~ " samples in your dataset divided by"
+#~ " the batch size, or 1 if that"
+#~ " cannot be determined. If x is "
+#~ "a `tf.data` dataset, and 'steps_per_epoch' "
+#~ "is None, the epoch will run until"
+#~ " the input dataset is exhausted. When"
+#~ " passing an infinitely repeating dataset,"
+#~ " you must specify the `steps_per_epoch` "
+#~ "argument. If `steps_per_epoch=-1` the training"
+#~ " will run indefinitely with an "
+#~ "infinitely repeating dataset. This argument"
+#~ " is not supported with array inputs."
+#~ " When using "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`:"
#~ msgstr ""
-#~ msgid "Function with the fist input and the output as ``Circuit`` object."
+#~ msgid ""
+#~ "Only relevant if validation data is "
+#~ "provided. Integer or `collections.abc.Container` "
+#~ "instance (e.g. list, tuple, etc.). If"
+#~ " an integer, specifies how many "
+#~ "training epochs to run before a "
+#~ "new validation run is performed, e.g."
+#~ " `validation_freq=2` runs validation every "
+#~ "2 epochs. If a Container, specifies "
+#~ "the epochs on which to run "
+#~ "validation, e.g. `validation_freq=[1, 2, 10]`"
+#~ " runs validation at the end of "
+#~ "the 1st, 2nd, and 10th epochs."
#~ msgstr ""
#~ msgid ""
-#~ "Wrapped function with the first input"
-#~ " and the output as the state "
-#~ "tensor correspondingly."
+#~ "tf.keras.utils.Sequence to the `x` argument"
+#~ " of fit, which will in fact "
+#~ "yield not only features (x) but "
+#~ "optionally targets (y) and sample "
+#~ "weights. Keras requires that the output"
+#~ " of such iterator-likes be "
+#~ "unambiguous. The iterator should return "
+#~ "a tuple of length 1, 2, or "
+#~ "3, where the optional second and "
+#~ "third elements will be used for y"
+#~ " and sample_weight respectively. Any other"
+#~ " type provided will be wrapped in "
+#~ "a length one tuple, effectively treating"
+#~ " everything as 'x'. When yielding "
+#~ "dicts, they should still adhere to "
+#~ "the top-level tuple structure. e.g. "
+#~ "`({\"x0\": x0, \"x1\": x1}, y)`. Keras"
+#~ " will not attempt to separate "
+#~ "features, targets, and weights from the"
+#~ " keys of a single dict."
#~ msgstr ""
-#~ msgid "Two-dimensional grid lattice"
+#~ msgid "A notable unsupported data type is the namedtuple. The reason is that"
#~ msgstr ""
-#~ msgid "number of rows"
+#~ msgid ""
+#~ "it behaves like both an ordered "
+#~ "datatype (tuple) and a mapping datatype"
+#~ " (dict). So given a namedtuple of "
+#~ "the form:"
#~ msgstr ""
-#~ msgid "number of cols"
+#~ msgid "Retrieves losses relevant to a specific set of inputs."
#~ msgstr ""
-#~ msgid "return all col edge with 1d index encoding"
+#~ msgid "Input tensor or list/tuple of input tensors."
#~ msgstr ""
-#~ msgid ""
-#~ "whether to include pbc edges (periodic"
-#~ " boundary condition), defaults to False"
+#~ msgid "List of loss tensors of the layer that depend on `inputs`."
#~ msgstr ""
-#~ msgid "list of col edge"
+#~ msgid "Retrieves updates relevant to a specific set of inputs."
#~ msgstr ""
-#~ msgid "return all row edge with 1d index encoding"
+#~ msgid "List of update ops of the layer that depend on `inputs`."
#~ msgstr ""
-#~ msgid "list of row edge"
+#~ msgid "Deprecated, do NOT use! Only for compatibility with external Keras."
#~ msgstr ""
-#~ msgid "Get the 2D grid lattice in ``nx.Graph`` format"
+#~ msgid ""
+#~ "Loads all layer weights, either from "
+#~ "a TensorFlow or an HDF5 weight "
+#~ "file."
#~ msgstr ""
#~ msgid ""
-#~ "whether to include pbc edges (periodic"
-#~ " boundary condition), defaults to True"
+#~ "If `by_name` is False weights are "
+#~ "loaded based on the network's topology."
+#~ " This means the architecture should "
+#~ "be the same as when the weights"
+#~ " were saved. Note that layers that"
+#~ " don't have weights are not taken "
+#~ "into account in the topological "
+#~ "ordering, so adding or removing layers"
+#~ " is fine as long as they don't"
+#~ " have weights."
#~ msgstr ""
#~ msgid ""
-#~ "Generate a permutation matrix P. Due "
-#~ "to the different convention or qubits'"
-#~ " order in qiskit and tensorcircuit, "
-#~ "the unitary represented by the same "
-#~ "circuit is different. They are related"
-#~ " by this permutation matrix P: P "
-#~ "@ U_qiskit @ P = U_tc"
+#~ "If `by_name` is True, weights are "
+#~ "loaded into layers only if they "
+#~ "share the same name. This is "
+#~ "useful for fine-tuning or transfer-"
+#~ "learning models where some of the "
+#~ "layers have changed."
#~ msgstr ""
-#~ msgid "# of qubits"
+#~ msgid ""
+#~ "Only topological loading (`by_name=False`) is"
+#~ " supported when loading weights from "
+#~ "the TensorFlow format. Note that "
+#~ "topological loading differs slightly between"
+#~ " TensorFlow and HDF5 formats for "
+#~ "user-defined classes inheriting from "
+#~ "`tf.keras.Model`: HDF5 loads based on a"
+#~ " flattened list of weights, while the"
+#~ " TensorFlow format loads based on the"
+#~ " object-local names of attributes to"
+#~ " which layers are assigned in the "
+#~ "`Model`'s constructor."
#~ msgstr ""
-#~ msgid "The permutation matrix P"
+#~ msgid ""
+#~ "String, path to the weights file "
+#~ "to load. For weight files in "
+#~ "TensorFlow format, this is the file "
+#~ "prefix (the same as was passed to"
+#~ " `save_weights`). This can also be a"
+#~ " path to a SavedModel saved from "
+#~ "`model.save`."
#~ msgstr ""
#~ msgid ""
-#~ "Generate a qiskit quantum circuit using"
-#~ " the quantum intermediate representation "
-#~ "(qir) in tensorcircuit."
+#~ "Boolean, whether to load weights by "
+#~ "name or by topological order. Only "
+#~ "topological loading is supported for "
+#~ "weight files in TensorFlow format."
#~ msgstr ""
-#~ msgid "qiskit QuantumCircuit object"
+#~ msgid ""
+#~ "Boolean, whether to skip loading of "
+#~ "layers where there is a mismatch "
+#~ "in the number of weights, or a "
+#~ "mismatch in the shape of the "
+#~ "weight (only valid when `by_name=True`)."
#~ msgstr ""
#~ msgid ""
-#~ "Generate a tensorcircuit circuit using "
-#~ "the quantum circuit data in qiskit."
+#~ "Optional `tf.train.CheckpointOptions` object that"
+#~ " specifies options for loading weights."
#~ msgstr ""
-#~ msgid "Quantum circuit data from qiskit."
+#~ msgid ""
+#~ "When loading a weight file in "
+#~ "TensorFlow format, returns the same "
+#~ "status object as `tf.train.Checkpoint.restore`. "
+#~ "When graph building, restore ops are "
+#~ "run automatically as soon as the "
+#~ "network is built (on first call "
+#~ "for user-defined classes inheriting from"
+#~ " `Model`, immediately if it is "
+#~ "already built). When loading weights in"
+#~ " HDF5 format, returns `None`."
#~ msgstr ""
-#~ msgid "Input state of the circuit. Default is None."
+#~ msgid ""
+#~ "When loading a weight file in "
+#~ "TensorFlow format, returns the same "
+#~ "status object as `tf.train.Checkpoint.restore`. "
+#~ "When graph building, restore ops are "
+#~ "run automatically as soon as the "
+#~ "network is built (on first call "
+#~ "for user-defined classes inheriting from"
+#~ " `Model`, immediately if it is "
+#~ "already built)."
#~ msgstr ""
-#~ msgid "A quantum circuit in tensorcircuit"
+#~ msgid "When loading weights in HDF5 format, returns `None`."
#~ msgstr ""
-#~ msgid ""
-#~ "Translating from the gate name to "
-#~ "gate information including the number of"
-#~ " control qubits and the reduced gate"
-#~ " name."
+#~ msgid "If `h5py` is not available and the weight file is in HDF5 format."
#~ msgstr ""
-#~ msgid "String of gate name"
+#~ msgid "If `skip_mismatch` is set to `True` when `by_name` is `False`."
#~ msgstr ""
-#~ msgid "# of control qubits, reduced gate name"
+#~ msgid ""
+#~ "Returns the model's metrics added using"
+#~ " `compile()`, `add_metric()` APIs."
#~ msgstr ""
#~ msgid ""
-#~ "Generate Tex code from 'qir' string "
-#~ "to illustrate the circuit structure. "
-#~ "This visualization is based on quantikz"
-#~ " package."
+#~ "Computation is done in batches. This "
+#~ "method is designed for performance in"
+#~ " large scale inputs. For small amount"
+#~ " of inputs that fit in one "
+#~ "batch, directly using `__call__()` is "
+#~ "recommended for faster execution, e.g., "
+#~ "`model(x)`, or `model(x, training=False)` if"
+#~ " you have layers such as "
+#~ "`tf.keras.layers.BatchNormalization` that behaves "
+#~ "differently during inference. Also, note "
+#~ "the fact that test loss is not "
+#~ "affected by regularization layers like "
+#~ "noise and dropout."
#~ msgstr ""
-#~ msgid "The quantum intermediate representation of a circuit in tensorcircuit."
+#~ msgid "Verbosity mode, 0 or 1."
#~ msgstr ""
-#~ msgid "Initial state, default is an all zero state '000...000'."
+#~ msgid ""
+#~ "List of `keras.callbacks.Callback` instances. "
+#~ "List of callbacks to apply during "
+#~ "prediction. See "
+#~ "[callbacks](/api_docs/python/tf/keras/callbacks)."
#~ msgstr ""
-#~ msgid "Measurement Basis, default is None which means no"
+#~ msgid "If `model.predict_on_batch` is wrapped in a `tf.function`."
#~ msgstr ""
#~ msgid ""
-#~ "measurement in the end of the "
-#~ "circuit. :type measure: Optional[List[str]] "
-#~ ":param rcompress: If true, a right "
-#~ "compression of the circuit will be "
-#~ "conducted. A right compression means we"
-#~ " will try to shift gates from "
-#~ "right to left if possible. Default "
-#~ "is false. :type rcompress: bool :param"
-#~ " lcompress: If true, a left "
-#~ "compression of the circuit will be "
-#~ "conducted. A left compression means we"
-#~ " will try to shift gates from "
-#~ "left to right if possible. Default "
-#~ "is false. :type lcompress: bool :param"
-#~ " standalone: If true, the tex code"
-#~ " will be designed to generate a "
-#~ "standalone document. Default is false "
-#~ "which means the generated tex code "
-#~ "is just a quantikz code block. "
-#~ ":type standalone: bool :param "
-#~ "return_string_table: If true, a string "
-#~ "table of tex code will also be "
-#~ "returned. Default is false. :type "
-#~ "return_string_table: bool :return: Tex code"
-#~ " of circuit visualization based on "
-#~ "quantikz package. If return_string_table is"
-#~ " true, a string table of tex "
-#~ "code will also be returned. :rtype: "
-#~ "Union[str, Tuple[str, List[List[str]]]]"
+#~ "This method should contain the "
+#~ "mathematical logic for one step of "
+#~ "inference. This typically includes the "
+#~ "forward pass."
#~ msgstr ""
-#~ msgid ":math:`ket`."
+#~ msgid "Saves the model to Tensorflow SavedModel or a single HDF5 file."
#~ msgstr ""
#~ msgid ""
-#~ "Get Pauli string array and weights "
-#~ "array for a qubit Hamiltonian as a"
-#~ " sum of Pauli strings defined in "
-#~ "openfermion QubitOperator."
+#~ "Please see `tf.keras.models.save_model` or the"
+#~ " [Serialization and Saving "
+#~ "guide](https://keras.io/guides/serialization_and_saving/) for"
+#~ " details."
#~ msgstr ""
-#~ msgid "Apply mpo gate in MPO format on the circuit."
+#~ msgid "String, PathLike, path to SavedModel or H5 file to save the model."
#~ msgstr ""
-#~ msgid "Apply multicontrol gate in MPO format on the circuit."
+#~ msgid "If True, save optimizer's state together."
#~ msgstr ""
-#~ msgid "Returns the amplitude of the circuit given the bitstring l."
+#~ msgid ""
+#~ "Either `'tf'` or `'h5'`, indicating "
+#~ "whether to save the model to "
+#~ "Tensorflow SavedModel or HDF5. Defaults "
+#~ "to 'tf' in TF 2.X, and 'h5' "
+#~ "in TF 1.X."
#~ msgstr ""
-#~ msgid "Apply the gate to two bits with given indexes."
+#~ msgid ""
+#~ "Signatures to save with the SavedModel."
+#~ " Applicable to the 'tf' format only."
+#~ " Please see the `signatures` argument "
+#~ "in `tf.saved_model.save` for details."
#~ msgstr ""
-#~ msgid "The Gate applied on bits."
+#~ msgid ""
+#~ "(only applies to SavedModel format) "
+#~ "`tf.saved_model.SaveOptions` object that specifies"
+#~ " options for saving to SavedModel."
#~ msgstr ""
-#~ msgid "The index of the bit to apply the Gate."
+#~ msgid ""
+#~ "(only applies to SavedModel format) When"
+#~ " enabled, the SavedModel will store "
+#~ "the function traces for each layer. "
+#~ "This can be disabled, so that only"
+#~ " the configs of each layer are "
+#~ "stored. Defaults to `True`. Disabling "
+#~ "this will decrease serialization time "
+#~ "and reduce file size, but it "
+#~ "requires that all custom layers/models "
+#~ "implement a `get_config()` method."
#~ msgstr ""
-#~ msgid "Apply the gate to the bit with the given index."
+#~ msgid "```python from keras.models import load_model"
#~ msgstr ""
-#~ msgid "The Gate applied on the bit."
+#~ msgid ""
+#~ "model.save('my_model.h5') # creates a HDF5"
+#~ " file 'my_model.h5' del model # "
+#~ "deletes the existing model"
#~ msgstr ""
#~ msgid ""
-#~ "Return the list of nodes that "
-#~ "consititues the expectation value just "
-#~ "before the contraction."
+#~ "# returns a compiled model # "
+#~ "identical to the previous one model "
+#~ "= load_model('my_model.h5') ```"
#~ msgstr ""
-#~ msgid "whether contract the output state firstly, defaults to True"
+#~ msgid "Returns the `tf.TensorSpec` of call inputs as a tuple `(args, kwargs)`."
#~ msgstr ""
-#~ msgid "The tensor network for the expectation"
+#~ msgid ""
+#~ "# arg_specs is `[tf.TensorSpec(...), ...]`."
+#~ " kwarg_specs, in this example, is #"
+#~ " an empty dict since functional "
+#~ "models do not use keyword arguments. "
+#~ "arg_specs, kwarg_specs = model.save_spec()"
#~ msgstr ""
#~ msgid ""
-#~ "if true, we sample from the final"
-#~ " state if memory allsows, True is "
-#~ "prefered, defaults to False"
+#~ "'serving_default': serve.get_concrete_function(*arg_specs, "
+#~ "**kwarg_specs)"
#~ msgstr ""
#~ msgid ""
-#~ "List (if batch) of tuple (binary "
-#~ "configuration tensor and correponding "
-#~ "probability)"
+#~ "The TensorFlow format matches objects "
+#~ "and variables by starting at a "
+#~ "root object, `self` for `save_weights`, "
+#~ "and greedily matching attribute names. "
+#~ "For `Model.save` this is the `Model`,"
+#~ " and for `Checkpoint.save` this is "
+#~ "the `Checkpoint` even if the "
+#~ "`Checkpoint` has a model attached. This"
+#~ " means saving a `tf.keras.Model` using "
+#~ "`save_weights` and loading into a "
+#~ "`tf.train.Checkpoint` with a `Model` attached"
+#~ " (or vice versa) will not match "
+#~ "the `Model`'s variables. See the [guide"
+#~ " to training "
+#~ "checkpoints](https://www.tensorflow.org/guide/checkpoint) for"
+#~ " details on the TensorFlow format."
#~ msgstr ""
-#~ msgid "Sampling bistrings from the circuit output based on quantum amplitudes."
+#~ msgid ""
+#~ "Either 'tf' or 'h5'. A `filepath` "
+#~ "ending in '.h5' or '.keras' will "
+#~ "default to HDF5 if `save_format` is "
+#~ "`None`. Otherwise `None` defaults to "
+#~ "'tf'."
#~ msgstr ""
-#~ msgid "tensorcircuit.densitymatrix2"
+#~ msgid "If `h5py` is not available when attempting to save in HDF5 format."
#~ msgstr ""
-#~ msgid "Apply **CNOT** gate on the circuit."
+#~ msgid ""
+#~ "Relative or absolute positions of log"
+#~ " elements in each line. If not "
+#~ "provided, defaults to `[.33, .55, .67,"
+#~ " 1.]`."
#~ msgstr ""
-#~ msgid "Apply **CY** gate on the circuit."
+#~ msgid ""
+#~ "Print function to use. Defaults to "
+#~ "`print`. It will be called on each"
+#~ " line of the summary. You can "
+#~ "set it to a custom function in "
+#~ "order to capture the string summary."
#~ msgstr ""
-#~ msgid "Apply **CZ** gate on the circuit."
+#~ msgid ""
+#~ "Whether to expand the nested models. "
+#~ "If not provided, defaults to `False`."
#~ msgstr ""
-#~ msgid "Apply **H** gate on the circuit."
+#~ msgid ""
+#~ "Input data. It could be: - A "
+#~ "Numpy array (or array-like), or a"
+#~ " list of arrays (in case the "
+#~ "model has multiple inputs). - A "
+#~ "TensorFlow tensor, or a list of "
+#~ "tensors (in case the model has "
+#~ "multiple inputs). - A dict mapping "
+#~ "input names to the corresponding "
+#~ "array/tensors, if the model has "
+#~ "named inputs."
#~ msgstr ""
-#~ msgid "Apply **I** gate on the circuit."
+#~ msgid "A dict mapping input names to the corresponding array/tensors, if"
#~ msgstr ""
-#~ msgid "Apply **S** gate on the circuit."
+#~ msgid "the model has named inputs."
#~ msgstr ""
-#~ msgid "Apply **SWAP** gate on the circuit."
+#~ msgid "If `model.test_on_batch` is wrapped in a `tf.function`."
#~ msgstr ""
-#~ msgid "Apply **T** gate on the circuit."
+#~ msgid "Additional keyword arguments to be passed to `json.dumps()`."
#~ msgstr ""
-#~ msgid "Apply **WROOT** gate on the circuit."
+#~ msgid ""
+#~ "Optional dictionary mapping class indices "
+#~ "(integers) to a weight (float) to "
+#~ "apply to the model's loss for the"
+#~ " samples from this class during "
+#~ "training. This can be useful to "
+#~ "tell the model to \"pay more "
+#~ "attention\" to samples from an under-"
+#~ "represented class."
#~ msgstr ""
-#~ msgid "Apply **X** gate on the circuit."
+#~ msgid ""
+#~ "This method can be overridden to "
+#~ "support custom training logic. For "
+#~ "concrete examples of how to override "
+#~ "this method see [Customizing what "
+#~ "happends in "
+#~ "fit](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit)."
+#~ " This method is called by "
+#~ "`Model.make_train_function`."
#~ msgstr ""
-#~ msgid "Apply **Y** gate on the circuit."
+#~ msgid ""
+#~ "This method should contain the "
+#~ "mathematical logic for one step of "
+#~ "training. This typically includes the "
+#~ "forward pass, loss calculation, "
+#~ "backpropagation, and metric updates."
#~ msgstr ""
-#~ msgid "Apply **Z** gate on the circuit."
+#~ msgid "Bases: :py:class:`~keras.engine.base_layer.Layer`"
#~ msgstr ""
#~ msgid ""
-#~ "Compute :math:`\\prod_{i\\in \\text{index}} s_i`,"
-#~ " where the probability for each "
-#~ "bitstring is given as a vector "
-#~ "``results``."
+#~ "Input tensor, or dict/list/tuple of "
+#~ "input tensors. The first positional "
+#~ "`inputs` argument is subject to special"
+#~ " rules: - `inputs` must be explicitly"
+#~ " passed. A layer cannot have zero"
+#~ " arguments, and `inputs` cannot be "
+#~ "provided via the default value of "
+#~ "a keyword argument. - NumPy array "
+#~ "or Python scalar values in `inputs` "
+#~ "get cast as tensors. - Keras mask"
+#~ " metadata is only collected from "
+#~ "`inputs`. - Layers are built "
+#~ "(`build(input_shape)` method) using shape "
+#~ "info from `inputs` only. - `input_spec`"
+#~ " compatibility is only checked against "
+#~ "`inputs`. - Mixed precision input "
+#~ "casting is only applied to `inputs`."
+#~ " If a layer has tensor arguments"
+#~ " in `*args` or `**kwargs`, their "
+#~ "casting behavior in mixed precision "
+#~ "should be handled manually. - The "
+#~ "SavedModel input specification is generated"
+#~ " using `inputs` only. - Integration "
+#~ "with various ecosystem packages like "
+#~ "TFMOT, TFLite, TF.js, etc is only "
+#~ "supported for `inputs` and not for "
+#~ "tensors in positional and keyword "
+#~ "arguments."
#~ msgstr ""
-#~ msgid "Generate sparse tensor from Pauli string sum"
+#~ msgid ""
+#~ "Additional keyword arguments. May contain "
+#~ "tensors, although this is not "
+#~ "recommended, for the reasons above. The"
+#~ " following optional keyword arguments are"
+#~ " reserved: - `training`: Boolean scalar "
+#~ "tensor of Python boolean indicating "
+#~ "whether the `call` is meant for "
+#~ "training or inference. - `mask`: Boolean"
+#~ " input mask. If the layer's `call()`"
+#~ " method takes a `mask` argument, "
+#~ "its default value will be set to"
+#~ " the mask generated for `inputs` by"
+#~ " the previous layer (if `input` did"
+#~ " come from a layer that generated"
+#~ " a corresponding mask, i.e. if it "
+#~ "came from a Keras layer with "
+#~ "masking support)."
#~ msgstr ""
-#~ msgid "Generate dense matrix from Pauli string sum"
+#~ msgid ""
+#~ "Bases: "
+#~ ":py:class:`~keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule`"
#~ msgstr ""
-#~ msgid "Generate Heisenberg Hamiltonian with possible external fields."
+#~ msgid ""
+#~ "Get Pauli string array and weights "
+#~ "array for a qubit Hamiltonian as a"
+#~ " sum of Pauli strings defined in "
+#~ "openfermion ``QubitOperator``."
#~ msgstr ""
-#~ msgid "calibration qubit list"
+#~ msgid "``openfermion.ops.operators.qubit_operator.QubitOperator``"
#~ msgstr ""
-#~ msgid "tensorcircuit.cloud"
+#~ msgid "The number of qubits"
#~ msgstr ""
-#~ msgid "tensorcircuit.cloud.config"
+#~ msgid "Pauli String array and weights array"
#~ msgstr ""
diff --git a/docs/source/locale/zh/LC_MESSAGES/contribs.po b/docs/source/locale/zh/LC_MESSAGES/contribs.po
index cb11a0fd..004ca724 100644
--- a/docs/source/locale/zh/LC_MESSAGES/contribs.po
+++ b/docs/source/locale/zh/LC_MESSAGES/contribs.po
@@ -9,142 +9,222 @@ msgid ""
msgstr ""
"Project-Id-Version: tensorcircuit \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-02-02 14:19+0800\n"
+"POT-Creation-Date: 2023-07-14 15:43+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language-Team: LANGUAGE \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.9.1\n"
+"Generated-By: Babel 2.12.1\n"
+#: ../../source/contribs/development_Mac.md:1
#: ../../source/contribs/development_MacARM.md:1
+#: ../../source/contribs/development_MacM2.md:1
msgid "Tensorcircuit Installation Guide on MacOS"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:3
-msgid "Contributed by Mark (Zixuan) Song"
+#: ../../source/contribs/development_Mac.md:3
+msgid "Contributed by [_Mark (Zixuan) Song_](https://marksong.tech)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:5
+#: ../../source/contribs/development_Mac.md:5
+msgid ""
+"Apple has updated Tensorflow (for MacOS) so that installation on M-series"
+" (until M2) and Intel-series Mac can follow the exact same procedure."
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:7
+#: ../../source/contribs/development_MacARM.md:8
+#: ../../source/contribs/development_MacM2.md:10
msgid "Starting From Scratch"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:7
-msgid "For completely new macos or macos without xcode and brew"
+#: ../../source/contribs/development_Mac.md:9
+msgid "For completely new Macos or Macos without Xcode and Homebrew installed."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:9
+#: ../../source/contribs/development_Mac.md:11
+#: ../../source/contribs/development_MacARM.md:12
+#: ../../source/contribs/development_MacM2.md:12
msgid "Install Xcode Command Line Tools"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:11
+#: ../../source/contribs/development_Mac.md:13
+#: ../../source/contribs/development_MacARM.md:14
+#: ../../source/contribs/development_MacM2.md:14
msgid "Need graphical access to the machine."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:13
+#: ../../source/contribs/development_Mac.md:15
+#: ../../source/contribs/development_MacARM.md:16
+#: ../../source/contribs/development_MacM2.md:16
msgid "Run `xcode-select --install` to install if on optimal internet."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:15
+#: ../../source/contribs/development_Mac.md:17
msgid ""
-"Or Download from [Apple](https://developer.apple.com/download/more/) "
-"Command Line Tools installation image then install if internet connection"
-" is weak."
+"Or Download it from [Apple](https://developer.apple.com/download/more/) "
+"Command Line Tools installation image then install it if the internet "
+"connection is weak."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:17
+#: ../../source/contribs/development_Mac.md:19
+#: ../../source/contribs/development_MacARM.md:20
+#: ../../source/contribs/development_MacM2.md:20
msgid "Install Miniconda"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:19
+#: ../../source/contribs/development_Mac.md:21
msgid ""
-"Due to the limitation of MacOS and packages, the lastest version of "
-"python does not always function as desired, thus miniconda installation "
-"is advised to solve the issues."
+"Due to the limitation of MacOS and packages, the latest version of Python"
+" does not always function as desired, thus miniconda installation is "
+"advised to solve the issues."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:28
-msgid "Install TC Prerequisites"
-msgstr ""
-
-#: ../../source/contribs/development_MacARM.md:34
+#: ../../source/contribs/development_Mac.md:30
+#: ../../source/contribs/development_MacARM.md:37
msgid "Install TC Backends"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:36
-msgid "There are four backends to choose from, Numpy, Tensorflow, Jax, Torch."
+#: ../../source/contribs/development_Mac.md:32
+msgid "There are four backends to choose from, Numpy, Tensorflow, Jax, and Torch."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:38
+#: ../../source/contribs/development_Mac.md:34
+#: ../../source/contribs/development_MacARM.md:41
msgid "Install Jax, Pytorch, Qiskit, Cirq (Optional)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:44
+#: ../../source/contribs/development_Mac.md:40
+#: ../../source/contribs/development_MacARM.md:47
msgid "Install Tensorflow (Optional)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:46
-msgid "Install Tensorflow (Recommended Approach)"
+#: ../../source/contribs/development_Mac.md:42
+msgid "Installation"
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:44
+msgid "For Tensorflow version 2.13 or later:"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:48
+#: ../../source/contribs/development_Mac.md:50
+msgid "For Tensorflow version 2.12 or earlier:"
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:56
+#: ../../source/contribs/development_MacARM.md:57
+#: ../../source/contribs/development_MacARM.md:89
+msgid "Verify Tensorflow Installation"
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:74
+#: ../../source/contribs/development_MacARM.md:107
+msgid "Install Tensorcircuit"
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:80
msgid ""
-"❗️ Tensorflow with MacOS optimization would not function correctly in "
-"version 2.11.0 and before. Do not use this version of tensorflow if you "
-"intented to train any machine learning model."
+"Until July 2023, this has been tested on Intel Macs running Ventura, M1 "
+"Macs running Ventura, M2 Macs running Ventura, and M2 Macs running Sonoma"
+" beta."
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:3
+msgid "Contributed by Mark (Zixuan) Song"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:5
+#: ../../source/contribs/development_MacM2.md:5
+msgid ""
+".. warning:: This page is deprecated. Please visit `the update "
+"tutorial `_ for the latest information."
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:10
+msgid "For completely new macos or macos without xcode and brew"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:50
+#: ../../source/contribs/development_MacARM.md:18
+#: ../../source/contribs/development_MacM2.md:18
msgid ""
-"FYI: Error can occur when machine learning training or gpu related code "
-"is involved."
+"Or Download from [Apple](https://developer.apple.com/download/more/) "
+"Command Line Tools installation image then install if internet connection"
+" is weak."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:52
+#: ../../source/contribs/development_MacARM.md:22
msgid ""
-"⚠️ Tensorflow without macos optimization does not support Metal API and "
-"utilizing GPU (both intel chips and M-series chips) until at least "
-"tensorflow 2.11. Tensorflow-macos would fail when running "
-"`tc.backend.to_dense()`"
+"Due to the limitation of MacOS and packages, the lastest version of "
+"python does not always function as desired, thus miniconda installation "
+"is advised to solve the issues."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:60
-msgid "Verify Tensorflow Installation"
+#: ../../source/contribs/development_MacARM.md:31
+msgid "Install TC Prerequisites"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:78
-msgid "Install Tensorcircuit"
+#: ../../source/contribs/development_MacARM.md:39
+msgid "There are four backends to choose from, Numpy, Tensorflow, Jax, Torch."
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:49
+msgid "Install Tensorflow without MacOS optimization"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:75
+msgid "Install Tensorflow with MacOS optimization (Recommended)"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:77
+msgid "For tensorflow version 2.13 or later:"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:83
+msgid "For tensorflow version 2.12 or earlier:"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:84
-msgid "Testing Platform (Tested Feb 2023)"
+#: ../../source/contribs/development_MacARM.md:113
+msgid "Testing Platform (Tested Jun 2023)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:86
+#: ../../source/contribs/development_MacARM.md:115
msgid "Platform 1:"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:87
+#: ../../source/contribs/development_MacARM.md:116
msgid "MacOS Ventura 13.1 (Build version 22C65)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:88
+#: ../../source/contribs/development_MacARM.md:117
msgid "M1 Ultra"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:89
+#: ../../source/contribs/development_MacARM.md:118
msgid "Platform 2:"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:90
+#: ../../source/contribs/development_MacARM.md:119
msgid "MacOS Ventura 13.2 (Build version 22D49)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:91
+#: ../../source/contribs/development_MacARM.md:120
msgid "M1 Ultra (Virtual)"
msgstr ""
+#: ../../source/contribs/development_MacARM.md:121
+msgid "Platform 4:"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:122
+msgid "MacOS Sonoma 14.0 Beta 2 (Build version 23A5276g)"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:123
+msgid "M2 Max"
+msgstr ""
+
#: ../../source/contribs/development_MacM1.rst:2
msgid "Run TensorCircuit on TensorlowBackend with Apple M1"
msgstr ""
@@ -156,7 +236,7 @@ msgstr ""
#: ../../source/contribs/development_MacM1.rst:7
msgid ""
"This page is deprecated. Please visit `the update tutorial "
-"`_ for latest information."
+"`_ for the latest information."
msgstr ""
#: ../../source/contribs/development_MacM1.rst:11
@@ -256,6 +336,146 @@ msgstr ""
msgid "Then unpackage it, and cd into the folder with \"setup.py\". Conducting"
msgstr ""
+#: ../../source/contribs/development_MacM2.md:3
+msgid "Contributed by [Hong-Ye Hu](https://github.com/hongyehu)"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:8
+msgid ""
+"The key issue addressed in this document is **how to install both "
+"TensorFlow and Jax on a M2 chip MacOS without conflict**."
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:22
+msgid ""
+"Due to the limitation of MacOS and packages, the lastest version of "
+"python does not always function as desired, thus miniconda installation "
+"is advised to solve the issues. And use anaconda virtual environment is "
+"always a good habit."
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:30
+msgid "Install Packages"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:31
+msgid ""
+"First, create a virtual environment, and make sure the python version is "
+"3.8.5 by"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:36
+msgid ""
+"Then, install the TensorFlow from `.whl` file (file can be downloaded "
+"from this "
+"[URL](https://drive.google.com/drive/folders/1oSipZLnoeQB0Awz8U68KYeCPsULy_dQ7))."
+" This will install TensorFlow version 2.4.1"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:40
+msgid "Next, one need to install **Jax** and **Optax** by"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:45
+msgid ""
+"Now, hopefully, you should be able to use both Jax and TensorFlow in this"
+" environment. But sometimes, it may give you an error \"ERROR: package "
+"Chardet not found.\". If that is the case, you can install it by `conda "
+"install chardet`. Lastly, install tensorcircuit"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:51
+msgid ""
+"This is the solution that seems to work for M2-chip MacOS. Please let me "
+"know if there is a better solution!"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:1
+msgid "MacOS Tensorcircuit 安装教程"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:3
+msgid "[_Mark (Zixuan) Song_](https://marksong.tech) 撰写"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:5
+msgid "由于苹果更新了Tensorflow,因此M系列(直到M2)和英特尔系列Mac上的安装可以遵循完全相同的过程。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:7
+msgid "从头开始"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:9
+msgid "对于全新的Macos或未安装Xcode和Homebrew的Macos。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:11
+msgid "安装Xcode命令行工具"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:13
+msgid "需要对机器的图形访问。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:15
+msgid "如果网络良好,请运行`xcode-select --install`进行安装。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:17
+msgid "或者,如果网络连接较弱,请从[苹果](https://developer.apple.com/download/more/)下载命令行工具安装映像,然后进行安装。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:19
+msgid "安装Miniconda"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:21
+msgid "由于MacOS和软件包的限制,因此建议安装miniconda以解决问题。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:30
+msgid "安装TC后端"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:32
+msgid "有四个后端可供选择,Numpy,Tensorflow,Jax和Torch。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:34
+msgid "安装Jax,Pytorch,Qiskit,Cirq(可选)"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:40
+msgid "安装Tensorflow(可选)"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:42
+msgid "安装步骤"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:44
+msgid "Tensorflow版本2.13或之后:"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:50
+msgid "Tensorflow版本2.12或之前:"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:56
+msgid "验证Tensorflow安装"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:74
+msgid "安装Tensorcircuit"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:80
+msgid ""
+"直到2023年7月,这已在运行Ventura的英特尔i9 Mac、运行Ventura的M1 Mac、运行Ventura的M2 "
+"Mac、运行Sonoma测试版的M2 Mac上进行了测试。"
+msgstr ""
+
#: ../../source/contribs/development_windows.rst:2
msgid "Run TensorCircuit on Windows Machine with Docker"
msgstr ""
@@ -666,3 +886,39 @@ msgstr ""
#~ msgid "Testing Platform"
#~ msgstr ""
+#~ msgid "Install Tensorflow (Recommended Approach)"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "❗️ Tensorflow with MacOS optimization "
+#~ "would not function correctly in version"
+#~ " 2.11.0 and before. Do not use "
+#~ "this version of tensorflow if you "
+#~ "intented to train any machine learning"
+#~ " model."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "FYI: Error can occur when machine "
+#~ "learning training or gpu related code"
+#~ " is involved."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "⚠️ Tensorflow without macos optimization "
+#~ "does not support Metal API and "
+#~ "utilizing GPU (both intel chips and "
+#~ "M-series chips) until at least "
+#~ "tensorflow 2.11. Tensorflow-macos would "
+#~ "fail when running `tc.backend.to_dense()`"
+#~ msgstr ""
+
+#~ msgid "Testing Platform (Tested Feb 2023)"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "This page is deprecated. Please visit"
+#~ " `the update tutorial `_"
+#~ " for latest information."
+#~ msgstr ""
+
diff --git a/docs/source/locale/zh/LC_MESSAGES/index.po b/docs/source/locale/zh/LC_MESSAGES/index.po
index 1ee54686..20ed436d 100644
--- a/docs/source/locale/zh/LC_MESSAGES/index.po
+++ b/docs/source/locale/zh/LC_MESSAGES/index.po
@@ -6,165 +6,354 @@
#
msgid ""
msgstr ""
-"Project-Id-Version: tensorcircuit\n"
+"Project-Id-Version: tensorcircuit\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-02-02 13:41+0800\n"
-"PO-Revision-Date: 2022-04-16 22:37+0800\n"
+"POT-Creation-Date: 2023-05-28 14:36+0800\n"
+"PO-Revision-Date: 2023-05-28 14:39+0800\n"
"Last-Translator: Xinghan Yang\n"
-"Language: cn\n"
"Language-Team: \n"
+"Language: cn\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.9.1\n"
+"X-Generator: Poedit 3.2.2\n"
#: ../../source/index.rst:2
-msgid "Guide to TensorCircuit"
-msgstr "TensorCircuit 指南"
+msgid "TensorCircuit Documentation"
+msgstr "参考文档"
+
+#: ../../source/index.rst:8
+msgid "**Welcome and congratulations! You have found TensorCircuit.** 👏"
+msgstr "**祝贺你发现了 TensorCircuit!** 👏"
+
+#: ../../source/index.rst:11
+msgid "Introduction"
+msgstr "介绍"
-#: ../../source/index.rst:7
+#: ../../source/index.rst:13
msgid ""
-"TensorCircuit is an open source quantum circuit and algorithm simulation "
-"framework."
-msgstr "TensorCircuit 是开源的量子线路和量子算法模拟软件框架。"
+"TensorCircuit is an open-source high-performance quantum computing software "
+"framework in Python."
+msgstr "TensorCircuit 是基于 Python 的开源高性能量子计算软件框架。"
-#: ../../source/index.rst:9
-msgid "It is built for human beings. 👽"
+#: ../../source/index.rst:15
+msgid "It is built for humans. 👽"
msgstr "适合人类。👽"
-#: ../../source/index.rst:11
+#: ../../source/index.rst:17
msgid "It is designed for speed, flexibility and elegance. 🚀"
msgstr "速度,灵活,优雅。🚀"
-#: ../../source/index.rst:13
+#: ../../source/index.rst:19
msgid "It is empowered by advanced tensor network simulator engine. 🔋"
msgstr "先进张量网络引擎赋能。🔋"
-#: ../../source/index.rst:15
+#: ../../source/index.rst:21
+msgid ""
+"It is ready for quantum hardware access with CPU/GPU/QPU (local/cloud) hybrid "
+"solutions. 🖥"
+msgstr "量子硬件支持,优雅 CPU/GPU/QPU 混合部署方案。 🖥"
+
+#: ../../source/index.rst:23
msgid ""
"It is implemented with industry-standard machine learning frameworks: "
"TensorFlow, JAX, and PyTorch. 🤖"
msgstr "业界标准机器学习框架 TensorFlow,JAX,PyTorch 实现。🤖"
-#: ../../source/index.rst:17
+#: ../../source/index.rst:25
msgid ""
"It is compatible with machine learning engineering paradigms: automatic "
-"differentiation, just-in-time compilation, vectorized parallelism and GPU"
-" acceleration. 🛠"
+"differentiation, just-in-time compilation, vectorized parallelism and GPU "
+"acceleration. 🛠"
msgstr "与机器学习工程实践兼容:自动微分,即时编译,向量并行化和 GPU 加速。🛠"
-#: ../../source/index.rst:20
-msgid "Links"
-msgstr "重要链接"
+#: ../../source/index.rst:27
+msgid ""
+"With the help of TensorCircuit, now get ready to efficiently and elegantly "
+"solve interesting and challenging quantum computing problems: from academic "
+"research prototype to industry application deployment."
+msgstr ""
+"有了 TensorCircuit,你现在可以高效优雅地解决量子计算中的各种问题:从学术研究的原"
+"型开发到工业应用的部署。"
+
+#: ../../source/index.rst:33
+msgid "Relevant Links"
+msgstr "相关链接"
+
+#: ../../source/index.rst:35
+msgid ""
+"TensorCircuit is created and maintained by `Shi-Xin Zhang `_ and this version is released by `Tencent Quantum Lab `_."
+msgstr ""
+"TensorCircuit 由 `Shi-Xin Zhang `_ 创建和维"
+"护;此版本由 `腾讯量子实验室 `_ 发布。"
+
+#: ../../source/index.rst:37
+msgid ""
+"The current core authors of TensorCircuit are `Shi-Xin Zhang `_ and `Yu-Qin Chen `_. We also "
+"thank `contributions `_ from the lab and the open source community."
+msgstr ""
+"TensorCircuit 当前主要作者为 `Shi-Xin Zhang `_,`Yu-Qin Chen `_。同时感谢来自实验室和开源社"
+"区的 `贡献 `_。"
-#: ../../source/index.rst:22
+#: ../../source/index.rst:40
msgid ""
-"TensorCircuit is created and maintained by `Shi-Xin Zhang "
-"`_ and this version of the software is"
-" released by `Tencent Quantum Lab `_. The "
-"current core authors of TensorCircuit are `Shi-Xin Zhang "
-"`_ and `Yu-Qin Chen "
-"`_. We also thank `contributions "
-"`_ from the lab and the open source"
-" community."
-msgstr ""
-
-#: ../../source/index.rst:26
-msgid "Source code: https://github.com/tencent-quantum-lab/tensorcircuit"
-msgstr "源代码: https://github.com/tencent-quantum-lab/tensorcircuit"
-
-#: ../../source/index.rst:28
-#, fuzzy
-msgid "Software Whitepaper: https://arxiv.org/abs/2205.10091"
-msgstr "白皮书文章: https://arxiv.org/abs/2205.10091"
-
-#: ../../source/index.rst:30
-msgid "Documentation: https://tensorcircuit.readthedocs.io"
-msgstr "文档: https://tensorcircuit.readthedocs.io"
-
-#: ../../source/index.rst:32
-msgid "Issue Tracker: https://github.com/tencent-quantum-lab/tensorcircuit/issues"
-msgstr "问题跟踪: https://github.com/tencent-quantum-lab/tensorcircuit/issues"
-
-#: ../../source/index.rst:34
-msgid "Forum: https://github.com/tencent-quantum-lab/tensorcircuit/discussions"
-msgstr "论坛社区: https://github.com/tencent-quantum-lab/tensorcircuit/discussions"
-
-#: ../../source/index.rst:36
-msgid "PyPI page: https://pypi.org/project/tensorcircuit"
-msgstr "PyPI 页面: https://pypi.org/project/tensorcircuit"
-
-#: ../../source/index.rst:38
+"If you have any further questions or collaboration ideas, please use the issue "
+"tracker or forum below, or send email to shixinzhang#tencent.com."
+msgstr ""
+"如果关于 TensorCircuit 有任何问题咨询或合作意向,请在 issue 或 discussion 提问,"
+"或发送邮件到 shixinzhang#tencent.com。"
+
+#: ../../source/index.rst:45
+msgid "Source code"
+msgstr "源代码"
+
+#: ../../source/index.rst:49
+msgid "GitHub"
+msgstr ""
+
+#: ../../source/index.rst:52
+msgid "Documentation"
+msgstr "参考文档"
+
+#: ../../source/index.rst:56
+msgid "Readthedocs"
+msgstr ""
+
+#: ../../source/index.rst:59
+msgid "Whitepaper"
+msgstr "白皮书"
+
+#: ../../source/index.rst:63
+msgid "*Quantum* journal"
+msgstr "Quantum 期刊"
+
+#: ../../source/index.rst:66
+msgid "Issue Tracker"
+msgstr "问题跟踪"
+
+#: ../../source/index.rst:70
+msgid "GitHub Issues"
+msgstr ""
+
+#: ../../source/index.rst:73
+msgid "Forum"
+msgstr "论坛"
+
+#: ../../source/index.rst:77
+msgid "GitHub Discussions"
+msgstr ""
+
+#: ../../source/index.rst:80
+msgid "PyPI"
+msgstr ""
+
+#: ../../source/index.rst:84
+msgid "``pip install``"
+msgstr ""
+
+#: ../../source/index.rst:87
+msgid "DockerHub"
+msgstr ""
+
+#: ../../source/index.rst:91
+msgid "``docker pull``"
+msgstr ""
+
+#: ../../source/index.rst:94
+msgid "Application"
+msgstr "应用"
+
+#: ../../source/index.rst:98
+msgid "Research using TC"
+msgstr "研究项目"
+
+#: ../../source/index.rst:101
+msgid "Cloud"
+msgstr "量子云"
+
+#: ../../source/index.rst:104
+msgid "Tencent Quantum Cloud"
+msgstr "腾讯量子云平台"
+
+#: ../../source/index.rst:131
+msgid "Unified Quantum Programming"
+msgstr "统一量子编程"
+
+#: ../../source/index.rst:133
msgid ""
-"DockerHub page: "
-"https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit"
+"TensorCircuit is unifying infrastructures and interfaces for quantum computing."
+msgstr "TensorCircuit 尝试统一量子计算的基础设施和编程界面。"
+
+#: ../../source/index.rst:140
+msgid "Unified Backends"
+msgstr "统一后端"
+
+#: ../../source/index.rst:144
+msgid "Jax/TensorFlow/PyTorch/Numpy/Cupy"
+msgstr ""
+
+#: ../../source/index.rst:146
+msgid "Unified Devices"
+msgstr "统一设备"
+
+#: ../../source/index.rst:150
+msgid "CPU/GPU/TPU"
msgstr ""
-"DockerHub 页面: "
-"https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit"
-#: ../../source/index.rst:43
+#: ../../source/index.rst:152
+msgid "Unified Providers"
+msgstr "统一平台"
+
+#: ../../source/index.rst:156
+msgid "QPUs from different vendors"
+msgstr "不同供应商的 QPU"
+
+#: ../../source/index.rst:158
+msgid "Unified Resources"
+msgstr "统一资源"
+
+#: ../../source/index.rst:162
+msgid "local/cloud/HPC"
+msgstr "本地/云/集群"
+
+#: ../../source/index.rst:170
+msgid "Unified Interfaces"
+msgstr "统一接口"
+
+#: ../../source/index.rst:174
+msgid "numerical sim/hardware exp"
+msgstr "数值模拟/硬件实验"
+
+#: ../../source/index.rst:176
+msgid "Unified Engines"
+msgstr "统一引擎"
+
+#: ../../source/index.rst:180
+msgid "ideal/noisy/approximate simulation"
+msgstr "理想/含噪/近似模拟"
+
+#: ../../source/index.rst:182
+msgid "Unified Representations"
+msgstr "统一表示"
+
+#: ../../source/index.rst:186
+msgid "from/to_IR/qiskit/openqasm/json"
+msgstr ""
+
+#: ../../source/index.rst:188
+msgid "Unified Pipelines"
+msgstr "统一流程"
+
+#: ../../source/index.rst:192
+msgid "stateless functional programming/stateful ML models"
+msgstr "函数式编程/面向对象模型"
+
+#: ../../source/index.rst:198
msgid "Reference Documentation"
msgstr "参考文档"
-#: ../../source/index.rst:45
+#: ../../source/index.rst:200
msgid ""
-"The following documentation sections briefly introduce TensorCircuit to "
-"the users and developpers."
+"The following documentation sections briefly introduce TensorCircuit to the "
+"users and developpers."
msgstr "以下文档向用户和开发者简要介绍了 TensorCircuit 软件。"
-#: ../../source/index.rst:58
+#: ../../source/index.rst:213
msgid "Tutorials"
msgstr "教程"
-#: ../../source/index.rst:60
+#: ../../source/index.rst:215
msgid ""
-"The following documentation sections include integrated examples in the "
-"form of Jupyter Notebook."
-msgstr "以下 Jupyter Notebook 格式的文档包括了一系列使用 TensorCircuit 的集成案例。"
+"The following documentation sections include integrated examples in the form of "
+"Jupyter Notebook."
+msgstr ""
+"以下 Jupyter Notebook 格式的文档包括了一系列使用 TensorCircuit 的集成案例。"
-#: ../../source/index.rst:74
+#: ../../source/index.rst:229
msgid "API References"
msgstr "API 参考"
-#: ../../source/index.rst:83
+#: ../../source/index.rst:238
msgid "Indices and Tables"
msgstr "索引和表格"
-#: ../../source/index.rst:85
+#: ../../source/index.rst:240
msgid ":ref:`genindex`"
msgstr ":ref:`genindex`"
-#: ../../source/index.rst:86
+#: ../../source/index.rst:241
msgid ":ref:`modindex`"
msgstr ":ref:`modindex`"
-#: ../../source/index.rst:87
+#: ../../source/index.rst:242
msgid ":ref:`search`"
msgstr ":ref:`search`"
#~ msgid ""
-#~ "Binder online: https://mybinder.org/v2/gh/refraction-"
-#~ "ray/tc-env/master?urlpath=git-pull?repo=https://github.com"
-#~ "/tencent-quantum-"
-#~ "lab/tensorcircuit&urlpath=lab/tree/tensorcircuit/&branch=master"
+#~ "Binder online: https://mybinder.org/v2/gh/refraction-ray/tc-env/master?"
+#~ "urlpath=git-pull?repo=https://github.com/tencent-quantum-lab/"
+#~ "tensorcircuit&urlpath=lab/tree/tensorcircuit/&branch=master"
#~ msgstr ""
-#~ "在线 Binder Jupyter: https://mybinder.org/v2/gh"
-#~ "/refraction-ray/tc-env/master?urlpath=git-"
-#~ "pull?repo=https://github.com/tencent-quantum-"
-#~ "lab/tensorcircuit&urlpath=lab/tree/tensorcircuit/&branch=master"
+#~ "在线 Binder Jupyter: https://mybinder.org/v2/gh/refraction-ray/tc-env/master?"
+#~ "urlpath=git-pull?repo=https://github.com/tencent-quantum-lab/"
+#~ "tensorcircuit&urlpath=lab/tree/tensorcircuit/&branch=master"
+
+#~ msgid "Software Whitepaper: https://arxiv.org/abs/2205.10091"
+#~ msgstr "白皮书文章: https://arxiv.org/abs/2205.10091"
+
+#~ msgid "Guide to TensorCircuit"
+#~ msgstr "TensorCircuit 指南"
+
+#~ msgid "Links"
+#~ msgstr "重要链接"
+
+#~ msgid "Source code: https://github.com/tencent-quantum-lab/tensorcircuit"
+#~ msgstr "源代码: https://github.com/tencent-quantum-lab/tensorcircuit"
+
+#~ msgid "Documentation: https://tensorcircuit.readthedocs.io"
+#~ msgstr "文档: https://tensorcircuit.readthedocs.io"
#~ msgid ""
-#~ "This project is released by `Tencent "
-#~ "Quantum Lab `_ and "
-#~ "is created and maintained by `Shi-"
-#~ "Xin Zhang `_ "
-#~ "The current core authors are `Shi-"
-#~ "Xin Zhang `_ "
-#~ "and `Yu-Qin Chen "
-#~ "`_. We also thank "
-#~ "`contributions `_ from the "
-#~ "lab and the open source community."
+#~ "Software Whitepaper (published in Quantum): https://quantum-journal.org/"
+#~ "papers/q-2023-02-02-912/"
#~ msgstr ""
+#~ "软件白皮书 (发表于 Quantum): https://quantum-journal.org/papers/"
+#~ "q-2023-02-02-912/"
+
+#~ msgid ""
+#~ "Issue Tracker: https://github.com/tencent-quantum-lab/tensorcircuit/issues"
+#~ msgstr "问题跟踪: https://github.com/tencent-quantum-lab/tensorcircuit/issues"
+
+#~ msgid "Forum: https://github.com/tencent-quantum-lab/tensorcircuit/discussions"
+#~ msgstr ""
+#~ "论坛社区: https://github.com/tencent-quantum-lab/tensorcircuit/discussions"
+
+#~ msgid "PyPI page: https://pypi.org/project/tensorcircuit"
+#~ msgstr "PyPI 页面: https://pypi.org/project/tensorcircuit"
+
+#~ msgid ""
+#~ "DockerHub page: https://hub.docker.com/repository/docker/tensorcircuit/"
+#~ "tensorcircuit"
+#~ msgstr ""
+#~ "DockerHub 页面: https://hub.docker.com/repository/docker/tensorcircuit/"
+#~ "tensorcircuit"
+
+#~ msgid ""
+#~ "Research and projects based on TensorCircuit: https://github.com/tencent-"
+#~ "quantum-lab/tensorcircuit#research-and-applications"
+#~ msgstr ""
+#~ "基于 TensorCircuit 的研究和项目: https://github.com/tencent-quantum-lab/"
+#~ "tensorcircuit#research-and-applications"
+
+#~ msgid "Tencent Quantum Cloud Service: https://quantum.tencent.com/cloud/"
+#~ msgstr "腾讯量子云服务: https://quantum.tencent.com/cloud/"
+#~ msgid "Research based on TC"
+#~ msgstr "基于 TC 的研究项目"
diff --git a/docs/source/locale/zh/LC_MESSAGES/infras.po b/docs/source/locale/zh/LC_MESSAGES/infras.po
index 6a2077b5..b285ed77 100644
--- a/docs/source/locale/zh/LC_MESSAGES/infras.po
+++ b/docs/source/locale/zh/LC_MESSAGES/infras.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: tensorcircuit\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-01-13 11:04+0800\n"
+"POT-Creation-Date: 2023-05-27 18:52+0800\n"
"PO-Revision-Date: 2022-04-18 20:44+0800\n"
"Last-Translator: Xinghan Yang\n"
"Language: cn\n"
@@ -120,10 +120,11 @@ msgid "**ML Interfaces Related Modules:**"
msgstr "**机器学习接口相关模块:**"
#: ../../source/infras.rst:35
+#, fuzzy
msgid ""
":py:mod:`tensorcircuit.interfaces`: Provide interfaces when quantum "
"simulation backend is different from neural libraries. Currently include "
-"PyTorch and scipy optimizer interfaces."
+"PyTorch, TensorFlow, NumPy and SciPy optimizer interfaces."
msgstr ""
":py:mod:`tensorcircuit.interfaces`: 当量子模拟后端与神经库不同时提供接口。 目前包括 PyTorch 和 "
"scipy 优化器接口。"
@@ -216,30 +217,46 @@ msgstr ""
#: ../../source/infras.rst:65
msgid ""
":py:mod:`tensorcircuit.results`: Provide tools to process count dict and "
-"to apply error mitigation"
+"to apply error mitigation."
msgstr ""
#: ../../source/infras.rst:67
+msgid "**Cloud quantum hardware access module:**"
+msgstr ""
+
+#: ../../source/infras.rst:69
+msgid ""
+":py:mod:`tensorcircuit.cloud`: Provide quantum cloud SDK that can access "
+"and program the real quantum hardware."
+msgstr ""
+
+#: ../../source/infras.rst:71
+msgid ""
+":py:mod:`tensorcircuit.compiler`: Provide compiler chains to compile and "
+"transform quantum circuits."
+msgstr ""
+
+#: ../../source/infras.rst:73
msgid "**Shortcuts and Templates for Circuit Manipulation:**"
msgstr "**电路操作的快捷方式和模板:**"
-#: ../../source/infras.rst:69
+#: ../../source/infras.rst:75
msgid ""
":py:mod:`tensorcircuit.templates`: provide handy shortcuts functions for "
"expectation or circuit building patterns."
msgstr ":py:mod:`tensorcircuit.templates`: 为期望或电路构建模式提供方便的快捷函数。"
-#: ../../source/infras.rst:71
+#: ../../source/infras.rst:77
msgid "**Applications:**"
msgstr "**应用:**"
-#: ../../source/infras.rst:73
+#: ../../source/infras.rst:79
msgid ""
":py:mod:`tensorcircuit.applications`: most code here is not maintained "
"and deprecated, use at your own risk."
msgstr ":py:mod:`tensorcircuit.applications`: 这里的大多数代码都没有维护并且被弃用了,使用风险自负。"
-#: ../../source/infras.rst:77
+#: ../../source/infras.rst:83
msgid ""
"Recommend reading order -- only read the part of code you care about for "
"your purpose. If you want to get an overview of the codebase, please read"
@@ -248,11 +265,11 @@ msgstr ""
"推荐阅读顺序——只阅读你关心的部分代码。如果您想了解代码库的概述,之后可以阅读 ``tc.circuit`` 后面的 ``tc.cons`` 和 "
"``tc.gates``。"
-#: ../../source/infras.rst:82
+#: ../../source/infras.rst:88
msgid "Relation between TensorCircuit and TensorNetwork"
msgstr "TensorCircuit 和 TensorNetwork 之间的关系"
-#: ../../source/infras.rst:84
+#: ../../source/infras.rst:90
msgid ""
"TensorCircuit has a strong connection with the `TensorNetwork package "
"`_ released by Google. Since the"
@@ -266,7 +283,7 @@ msgstr ""
"包的文档和教程很差,大多数时候,我们需要深入研究 TensorNetwork 的代码库来弄清楚发生了什么。换句话说,要阅读 "
"TensorCircuit 代码库,可能需要经常参考 TensorNetwork 代码库。"
-#: ../../source/infras.rst:86
+#: ../../source/infras.rst:92
msgid ""
"Inside TensorCircuit, we heavily utilize TensorNetwork-related APIs from "
"the TensorNetwork package and highly customized several modules from "
@@ -275,7 +292,7 @@ msgstr ""
"在 TensorCircuit 内部,我们大量使用了 TensorNetwork 包中与 TensorNetwork 相关的 "
"API,并通过继承和重写从 TensorNetwork 中高度定制了几个模块:"
-#: ../../source/infras.rst:88
+#: ../../source/infras.rst:94
msgid ""
"We implement our own /backends from TensorNetwork's /backends by adding "
"much more APIs and fixing lots of bugs in TensorNetwork's implementations"
@@ -285,7 +302,7 @@ msgstr ""
"我们从 TensorNetwork 的后端实现我们自己的后端,方法是添加更多 API,并通过猴子补丁修复 TensorNetwork "
"在某些后端的实现中的许多错误。(上游是不活跃的,反馈不够灵敏)"
-#: ../../source/infras.rst:90
+#: ../../source/infras.rst:96
msgid ""
"We borrow TensorNetwork's code in /quantum to our ``tc.quantum`` module, "
"since TensorNetwork has no ``__init__.py`` file to export these MPO and "
@@ -296,7 +313,7 @@ msgstr ""
"TensorNetwork 没有 ``__init__.py`` 文件来导出这些 MPO 和 MPS "
"相关对象。当然,从那时起,我们已经取得了实质性的代码改进。"
-#: ../../source/infras.rst:92
+#: ../../source/infras.rst:98
msgid ""
"We borrow the TensorNetwork's code in /matrixproductstates as "
"``tc.mps_base`` for bug fixing and jit/AD compatibility, so that we have "
@@ -305,15 +322,15 @@ msgstr ""
"我们借用 /matrixproductstates 中 TensorNetwork 的代码作为 ``tc.mps_base`` "
"用于错误修复和即时编译/自动微分兼容性,以便我们更好地支持基于 MPS 的量子电路模拟器。"
-#: ../../source/infras.rst:96
+#: ../../source/infras.rst:102
msgid "Relations of Circuit-like classes"
msgstr ""
-#: ../../source/infras.rst:108
+#: ../../source/infras.rst:114
msgid "QuOperator/QuVector and MPO/MPS"
msgstr "QuOperator/QuVector 和 MPO/MPS"
-#: ../../source/infras.rst:110
+#: ../../source/infras.rst:116
msgid ""
":py:class:`tensorcircuit.quantum.QuOperator`, "
":py:class:`tensorcircuit.quantum.QuVector` and "
@@ -327,19 +344,19 @@ msgstr ""
":py:class:`tensorcircuit.quantum.QuAdjointVector` 是从 TensorNetwork "
"包中采用的类。它们的行为类似于与其他成分交互时的矩阵/向量(列或行),而内部结构由张量网络维护以提高效率和紧凑性。"
-#: ../../source/infras.rst:113
+#: ../../source/infras.rst:119
msgid ""
"We use code examples and associated tensor diagrams to illustrate these "
"object abstractions."
msgstr "我们使用代码示例和相关的张量图来说明这些对象抽象。"
-#: ../../source/infras.rst:117
+#: ../../source/infras.rst:123
msgid ""
"``QuOperator`` can express MPOs and ``QuVector`` can express MPSs, but "
"they can express more than these fixed structured tensor networks."
msgstr "``QuOperator`` 可以表达 MPO,``QuVector`` 可以表达 MPS,但它们可以表达的不仅仅是这些固定的结构化张量网络。"
-#: ../../source/infras.rst:145
+#: ../../source/infras.rst:151
msgid ""
"Note how in this example, ``matrix`` is not a typical MPO but still can "
"be expressed as ``QuOperator``. Indeed, any tensor network with two sets "
@@ -351,7 +368,7 @@ msgstr ""
"``QuOperator``。事实上,任何具有两组相同维度的悬边的张量网络都可以被视为 `` QuOperator``。``QuVector`` "
"更加灵活,因为我们可以将所有悬空边视为向量维度。"
-#: ../../source/infras.rst:147
+#: ../../source/infras.rst:153
msgid ""
"Also, note how ``^`` is overloaded as ``tn.connect`` to connect edges "
"between different nodes in TensorNetwork. And indexing the node gives the"
@@ -360,7 +377,7 @@ msgstr ""
"还要注意 ``^`` 是如何被重载为 ``tn.connect`` 以连接 TensorNetwork "
"中不同节点之间的边。索引节点给出了节点的边,例如 ``n1[0]`` 意味着 ``节点 n1`` 的第一条边。"
-#: ../../source/infras.rst:149
+#: ../../source/infras.rst:155
msgid ""
"The convention to define the ``QuOperator`` is firstly giving "
"``out_edges`` (left index or row index of the matrix) and then giving "
@@ -370,7 +387,7 @@ msgstr ""
"定义 ``QuOperator`` 的惯例是首先给出 ``out_edges``(矩阵的左索引或行索引),然后给出 "
"``in_edges``(矩阵的右索引或列索引)。边列表包含来自 TensorNetwork 库的边对象。"
-#: ../../source/infras.rst:151
+#: ../../source/infras.rst:157
msgid ""
"Such QuOperator/QuVector abstraction support various calculations only "
"possible on matrix/vectors, such as matmul (``@``), adjoint "
@@ -386,6 +403,62 @@ msgstr ""
"(``*``)、张量乘积(``|``)和偏迹(``.partial_trace(subsystems_to_trace_out)``)。要提取这些对象的矩阵信息,我们可以使用"
" ``.eval()`` 或 ``.eval_matrix() ``,前者保留了张量网络的形状信息,而后者给出了形状秩为2的矩阵表示。"
+#: ../../source/infras.rst:162
+msgid "Quantum Cloud SDK: Layerwise API design"
+msgstr ""
+
+#: ../../source/infras.rst:164
+msgid "From lower level to higher level, a view of API layers invoking QPU calls"
+msgstr ""
+
+#: ../../source/infras.rst:166
+msgid ""
+"Vendor specific implementation of functional API in, e.g., "
+":py:mod:`tensorcircuit.cloud.tencent`"
+msgstr ""
+
+#: ../../source/infras.rst:168
+msgid ""
+"Provider agnostic functional lower level API for task/device management "
+"in :py:mod:`tensorcircuit.cloud.apis`"
+msgstr ""
+
+#: ../../source/infras.rst:170
+msgid ""
+"Object oriented abstraction for Provider/Device/Task in "
+":py:mod:`tensorcircuit.cloud.abstraction`"
+msgstr ""
+
+#: ../../source/infras.rst:172
+msgid ""
+"Unified batch submission interface as standarized in "
+":py:meth:`tensorcircuit.cloud.wrapper.batch_submit_template`"
+msgstr ""
+
+#: ../../source/infras.rst:174
+msgid ""
+"Numerical and experimental unified all-in-one interface as "
+":py:meth:`tensorcircuit.cloud.wrapper.batch_expectation_ps`"
+msgstr ""
+
+#: ../../source/infras.rst:176
+msgid ""
+"Application level code with QPU calls built directly on "
+"``batch_expectation_ps`` or more fancy algorithms can be built on "
+"``batch_submit_func`` so that these algorithms can be reused as long as "
+"one function ``batch_submit_func`` is defined for a given vendor (cheaper"
+" than defining a new provider from lower level)."
+msgstr ""
+
+#: ../../source/infras.rst:181
+msgid ""
+"For compiler, error mitigation and results post-processing parts, they "
+"can be carefully designed to decouple with the QPU calls, so they are "
+"separately implemented in :py:mod:`tensorcircuit.compiler` and "
+":py:mod:`tensorcircuit.results`, and they can be independently useful "
+"even without tc's cloud access."
+msgstr ""
+
#~ msgid ""
#~ ":py:mod:`tensorcircuit.densitymatrix2`: Highly efficient"
#~ " implementation of "
@@ -397,3 +470,9 @@ msgstr ""
#~ ":py:obj:`tensorcircuit.densitymatrix2.DMCircuit2` "
#~ "类的高效实现,总是比参考的实现更适用。"
+#~ msgid ""
+#~ ":py:mod:`tensorcircuit.results`: Provide tools to"
+#~ " process count dict and to apply "
+#~ "error mitigation"
+#~ msgstr ""
+
diff --git a/docs/source/locale/zh/LC_MESSAGES/quickstart.po b/docs/source/locale/zh/LC_MESSAGES/quickstart.po
index fb97d394..b4275455 100644
--- a/docs/source/locale/zh/LC_MESSAGES/quickstart.po
+++ b/docs/source/locale/zh/LC_MESSAGES/quickstart.po
@@ -8,15 +8,15 @@ msgid ""
msgstr ""
"Project-Id-Version: tensorcircuit\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-02-02 14:19+0800\n"
-"PO-Revision-Date: 2022-04-11 08:23+0800\n"
+"POT-Creation-Date: 2023-07-14 15:43+0800\n"
+"PO-Revision-Date: 2023-05-07 11:01+0800\n"
"Last-Translator: Xinghan Yang\n"
"Language: cn\n"
"Language-Team: Xinghan Yang\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.9.1\n"
+"Generated-By: Babel 2.12.1\n"
#: ../../source/quickstart.rst:3
msgid "Quick Start"
@@ -24,15 +24,15 @@ msgstr "快速上手"
#: ../../source/quickstart.rst:6
msgid "Installation"
-msgstr ""
+msgstr "安装"
#: ../../source/quickstart.rst:8
-msgid "For x86 Linux or Mac,"
-msgstr ""
+msgid "For x86 Linux,"
+msgstr "x64 Linux"
#: ../../source/quickstart.rst:10
msgid "``pip install tensorcircuit``"
-msgstr ""
+msgstr "``pip install tensorcircuit``"
#: ../../source/quickstart.rst:12
msgid ""
@@ -71,19 +71,24 @@ msgstr ""
#: ../../source/quickstart.rst:28
msgid ""
-"For Windows, due to the lack of support for Jax, we recommend to use "
-"docker or WSL, please refer to `TC via windows docker "
-"`_ or `TC via WSL "
-"`_."
+"For more details on docker setup, please refer to `docker readme "
+"`_."
msgstr ""
#: ../../source/quickstart.rst:30
msgid ""
-"For Mac with M series chips (arm architecture), please refer to `TC on "
-"Mac M series `_."
+"For Windows, due to the lack of support for Jax, we recommend to use "
+"docker or WSL, please refer to `TC via windows docker "
+"`_ or `TC via WSL "
+"`_."
msgstr ""
#: ../../source/quickstart.rst:32
+msgid "For MacOS, please refer to `TC on Mac `_."
+msgstr "For MacOS, please refer to `在Mac上安装TC `_."
+
+#: ../../source/quickstart.rst:34
msgid ""
"Overall, the installation of TensorCircuit is simple, since it is purely "
"in Python and hence very portable. As long as the users can take care of "
@@ -91,30 +96,36 @@ msgid ""
"TensorCircuit will work as expected."
msgstr ""
-#: ../../source/quickstart.rst:36
+#: ../../source/quickstart.rst:37
+msgid ""
+"To debug the installation issue or report bugs, please check the "
+"environment information by ``tc.about()``."
+msgstr ""
+
+#: ../../source/quickstart.rst:40
msgid ""
"We also provide a nightly build of tensorcircuit via PyPI which can be "
"accessed by ``pip uninstall tensorcircuit``, then ``pip install "
"tensorcircuit-nightly``"
msgstr ""
-#: ../../source/quickstart.rst:42
+#: ../../source/quickstart.rst:46
msgid "Circuit Object"
msgstr "电路对象"
-#: ../../source/quickstart.rst:44
+#: ../../source/quickstart.rst:48
msgid "The basic object for TensorCircuit is ``tc.Circuit``."
msgstr "TensorCircuit的基本对象是 ``tc.Circuit``。"
-#: ../../source/quickstart.rst:46
+#: ../../source/quickstart.rst:50
msgid "Initialize the circuit with the number of qubits ``c=tc.Circuit(n)``."
msgstr "用量子比特数(n) ``c=tc.Circuit(n)`` 来初始化电路。"
-#: ../../source/quickstart.rst:48
+#: ../../source/quickstart.rst:52
msgid "**Input States:**"
msgstr "**输入状态:**"
-#: ../../source/quickstart.rst:50
+#: ../../source/quickstart.rst:54
msgid ""
"The default input function for the circuit is :math:`\\vert 0^n "
"\\rangle`. One can change this to other wavefunctions by directly feeding"
@@ -123,17 +134,17 @@ msgstr ""
"电路的默认输入函数是 :math:`\\vert 0^n \\rangle` 。可以通过直接输入输入状态向量 w 将其更改为其他波函数: "
"``c=tc.Circuit(n, inputs=w)``。"
-#: ../../source/quickstart.rst:52
+#: ../../source/quickstart.rst:56
msgid ""
"One can also feed matrix product states as input states for the circuit, "
"but we leave MPS/MPO usage for future sections."
msgstr "也可以将矩阵乘积状态作为电路的输入状态,但我们将矩阵乘积状态/矩阵乘积算子的使用留待后续讲解。"
-#: ../../source/quickstart.rst:54
+#: ../../source/quickstart.rst:58
msgid "**Quantum Gates:**"
msgstr "**量子门:**"
-#: ../../source/quickstart.rst:56
+#: ../../source/quickstart.rst:60
msgid ""
"We can apply gates on circuit objects. For example, using ``c.H(1)`` or "
"``c.rx(2, theta=0.2)``, we can apply Hadamard gate on qubit 1 (0-based) "
@@ -142,15 +153,15 @@ msgstr ""
"我们可以将门应用于电路对象。 例如,使用 ``c.H(1)`` 或 ``c.rx(2, theta=0.2)``,我们可以将 Hadamard "
"门应用于量子比特1 (基于0)或将 Rx 门应用于量子比特2 :math:`e^{-i\\theta/2 X}`。"
-#: ../../source/quickstart.rst:58
+#: ../../source/quickstart.rst:62
msgid "The same rule also applies to multi-qubit gates, such as ``c.cnot(0, 1)``."
msgstr "同样的规则亦适用于多量子比特门,例如 ``c.cnot(0, 1)`` 。"
-#: ../../source/quickstart.rst:60
+#: ../../source/quickstart.rst:64
msgid "There are also highly customizable gates, two instances are:"
msgstr "这些量子门也是高度可定制的,下面是两个例子"
-#: ../../source/quickstart.rst:62
+#: ../../source/quickstart.rst:66
msgid ""
"``c.exp1(0, 1, unitary=m, theta=0.2)`` which is for the exponential gate "
":math:`e^{i\\theta m}` of any matrix m as long as :math:`m^2=1`."
@@ -158,33 +169,33 @@ msgstr ""
"``c.exp1(0, 1, unitary=m, theta=0.2)`` 用于任何矩阵 m 的指数门 :math:`e^{i\\theta "
"m}`,只要 m 满足 :math:`m^2=1`。"
-#: ../../source/quickstart.rst:64
+#: ../../source/quickstart.rst:68
msgid ""
"``c.any(0, 1, unitary=m)`` which is for applying the unitary gate m on "
"the circuit."
msgstr "``c.any(0, 1, unitary=m)`` 在电路上作用任意的幺正量子门。"
-#: ../../source/quickstart.rst:66
+#: ../../source/quickstart.rst:70
msgid "These two examples are flexible and support gates on any number of qubits."
msgstr "这两个例子很灵活,支持任意数量的量子比特上的门。"
-#: ../../source/quickstart.rst:68
+#: ../../source/quickstart.rst:72
msgid "**Measurements and Expectations:**"
msgstr "**测量与期望**"
-#: ../../source/quickstart.rst:70
+#: ../../source/quickstart.rst:74
msgid ""
"The most straightforward way to get the output from the circuit object is"
" by getting the output wavefunction in vector form as ``c.state()``."
msgstr "从电路对象中获取输出的最直接的方法是通过 ``c.state()`` 以向量形式获取输出波函数。"
-#: ../../source/quickstart.rst:72
+#: ../../source/quickstart.rst:76
msgid ""
"For bitstring sampling, we have ``c.perfect_sampling()`` which returns "
"the bitstring and the corresponding probability amplitude."
msgstr "对于位串采样,我们有 ``c.perfect_sampling()``,它返回位串和相应的概率幅度。"
-#: ../../source/quickstart.rst:74
+#: ../../source/quickstart.rst:78
msgid ""
"To measure part of the qubits, we can use ``c.measure(0, 1)``, if we want"
" to know the corresponding probability of the measurement output, try "
@@ -195,18 +206,18 @@ msgstr ""
"``c.measure(0, 1, with_prob=True)``。 测量 API 在默认情况下是不可即时编译的 "
",但我们也有一个可即时编译的版本,如 ``c.measure_jit(0, 1)``。"
-#: ../../source/quickstart.rst:76
+#: ../../source/quickstart.rst:80
msgid ""
"The measurement and sampling utilize advanced algorithms based on "
"tensornetwork and thus require no knowledge or space for the full "
"wavefunction."
msgstr "测量和采样使用了基于张量网络的高级算法,因此不需要任何相关知识或者空间来获取全波函数。"
-#: ../../source/quickstart.rst:78
+#: ../../source/quickstart.rst:82
msgid "See the example below:"
msgstr "请看下面的例子:"
-#: ../../source/quickstart.rst:96
+#: ../../source/quickstart.rst:100
msgid ""
"To compute expectation values for local observables, we have "
"``c.expectation([tc.gates.z(), [0]], [tc.gates.z(), [1]])`` for "
@@ -218,35 +229,35 @@ msgstr ""
"``c.expectation([tc.gates.x(), [0]])`` 对应的期望为 :math:`\\langle X_0 "
"\\rangle`时."
-#: ../../source/quickstart.rst:98
+#: ../../source/quickstart.rst:102
msgid ""
"This expectation API is rather flexible, as one can measure an m on "
"several qubits as ``c.expectation([m, [0, 1, 2]])``."
msgstr "因为可以在几个量子比特上测量一个 m,这种计算期望值的 API 相当灵活:``c.expectation([m, [0, 1, 2]])``。"
-#: ../../source/quickstart.rst:100
+#: ../../source/quickstart.rst:104
msgid ""
"We can also extract the unitary matrix underlying the whole circuit as "
"follows:"
msgstr "我们还可以提取整个电路下面的幺正矩阵,如下所示:"
-#: ../../source/quickstart.rst:113
+#: ../../source/quickstart.rst:117
msgid "**Circuit Transformations:**"
msgstr "**电路可视化**"
-#: ../../source/quickstart.rst:115
+#: ../../source/quickstart.rst:119
msgid ""
"We currently support transform ``tc.Circuit`` from and to Qiskit "
"``QuantumCircuit`` object."
msgstr "我们目前支持 ``tc.Circuit`` 与 Qiskit ``QuantumCircuit`` 对象之间的互相转换。"
-#: ../../source/quickstart.rst:117
+#: ../../source/quickstart.rst:121
msgid ""
"Export to Qiskit (possible for further hardware experiment, compiling, "
"and visualization): ``c.to_qiskit()``."
msgstr "导出到 Qiskit(可能用于进一步的硬件实验、编译和可视化):``c.to_qiskit()``。"
-#: ../../source/quickstart.rst:119
+#: ../../source/quickstart.rst:123
msgid ""
"Import from Qiskit: ``c = tc.Circuit.from_qiskit(QuantumCircuit, n)``. "
"Parameterized Qiskit circuit is supported by passing the parameters to "
@@ -254,11 +265,11 @@ msgid ""
"similar to the ``assign_parameters`` function in Qiskit."
msgstr ""
-#: ../../source/quickstart.rst:123
+#: ../../source/quickstart.rst:127
msgid "**Circuit Visualization:**"
msgstr "**电路可视化**"
-#: ../../source/quickstart.rst:125
+#: ../../source/quickstart.rst:129
msgid ""
"``c.vis_tex()`` can generate tex code for circuit visualization based on "
"LaTeX `quantikz `__ package."
@@ -266,14 +277,14 @@ msgstr ""
"``c.vis_tex()`` 可以基于 `quantikz `__ "
"package 生成用于电路可视化的 tex 代码。"
-#: ../../source/quickstart.rst:127
+#: ../../source/quickstart.rst:131
msgid ""
"There are also some automatic pipeline helper functions to directly "
"generate figures from tex code, but they require extra installations in "
"the environment."
msgstr "还有一些自动辅助函数可以直接从 tex 代码生成图形,但它们需要在环境中进行额外安装。"
-#: ../../source/quickstart.rst:129
+#: ../../source/quickstart.rst:133
msgid ""
"``render_pdf(tex)`` function requires full installation of LaTeX locally."
" And in the Jupyter environment, we may prefer ``render_pdf(tex, "
@@ -285,7 +296,7 @@ msgstr ""
"``render_pdf(tex, notebook=True)`` 来返回 jpg 图形,这需要安装 wand magicwand 库,请参阅 "
"`这里 `__ 。"
-#: ../../source/quickstart.rst:131
+#: ../../source/quickstart.rst:135
msgid ""
"Or since we can transform ``tc.Circuit`` into QuantumCircuit easily, we "
"have a simple pipeline to first transform ``tc.Circuit`` into Qiskit and "
@@ -296,18 +307,18 @@ msgstr ""
"或者因为我们可以轻松地将 ``tc.Circuit`` 转换为 QuantumCircuit,我们有一个简单的管道来首先转换 "
"``tc.Circuit`` 为 Qiskit,然后调用 Qiskit 中内置的可视化。 也就是说,我们有 ``c.draw()`` API。"
-#: ../../source/quickstart.rst:133
+#: ../../source/quickstart.rst:137
msgid "**Circuit Intermediate Representation:**"
msgstr "**电路中间表示:**"
-#: ../../source/quickstart.rst:135
+#: ../../source/quickstart.rst:139
msgid ""
"TensorCircuit provides its own circuit IR as a python list of dicts. This"
" IR can be further utilized to run compiling, generate serialization "
"qasm, or render circuit figures."
msgstr "TensorCircuit 提供自己的中间表示是元素是字典的列表。此中间表示可进一步用于运行编译、生成序列化 qasm 或渲染电路图。"
-#: ../../source/quickstart.rst:137
+#: ../../source/quickstart.rst:141
msgid ""
"The IR is given as a list, each element is a dict containing information "
"on one gate that is applied to the circuit. Note gate attr in the dict is"
@@ -316,18 +327,18 @@ msgstr ""
"中间表示以列表形式给出,每个元素都是一个字典,其中包含应用于电路的一个量子门的信息。 注意字典中的 gate atrr "
"实际上是一个返回此量子门的节点的 python 函数。"
-#: ../../source/quickstart.rst:149
+#: ../../source/quickstart.rst:153
msgid "Programming Paradigm"
msgstr "编程范式"
-#: ../../source/quickstart.rst:151
+#: ../../source/quickstart.rst:155
msgid ""
"The most common case and the most typical programming paradigm for "
"TensorCircuit are to evaluate the circuit output and the corresponding "
"quantum gradients, which is common in variational quantum algorithms."
msgstr "TensorCircuit 最常见的情况和最典型的编程范式是评估电路的输出以及相应的量子梯度,这在变分量子算法中很常见。"
-#: ../../source/quickstart.rst:178
+#: ../../source/quickstart.rst:182
#, fuzzy
msgid ""
"Also for a non-quantum example (linear regression) demonstrating the "
@@ -344,7 +355,7 @@ msgstr ""
"dev/blob/master/examples/universal_lr.py>`_ 。 "
"这个例子可能对机器学习的用户更友好,因为它纯粹是经典的,同时也展示了 TensorCircuit 的主要特征和范式。"
-#: ../../source/quickstart.rst:181
+#: ../../source/quickstart.rst:185
msgid ""
"If the user has no intention to maintain the application code in a "
"backend agnostic fashion, the API for ML frameworks can be more handily "
@@ -353,11 +364,11 @@ msgstr ""
"如果用户无意以与后端无关的方式维护应用程序代码,则可以更方便地使用用于机器学习框架的 API 并将其与 TensorCircuit API "
"交替使用。"
-#: ../../source/quickstart.rst:216
+#: ../../source/quickstart.rst:220
msgid "Automatic Differentiation, JIT, and Vectorized Parallelism"
msgstr "自动微分、即时编译和矢量化并行 "
-#: ../../source/quickstart.rst:218
+#: ../../source/quickstart.rst:222
msgid ""
"For concepts of AD, JIT and VMAP, please refer to `Jax documentation "
"`__ ."
@@ -365,7 +376,7 @@ msgstr ""
"关于自动微分、即时编译和向量并行化,请参考 `Jax 文档 "
"`__ 。"
-#: ../../source/quickstart.rst:220
+#: ../../source/quickstart.rst:224
msgid ""
"The related API design in TensorCircuit closely follows the functional "
"programming design pattern in Jax with some slight differences. So we "
@@ -375,21 +386,21 @@ msgstr ""
"TensorCircuit 中的相关 API 设计与 Jax 中的函数式编程的设计模式密切相关,但是略有不同。因此,我们强烈建议用户学习一些有关 "
"Jax 的基础知识,无论他们打算使用哪种机器学习后端。"
-#: ../../source/quickstart.rst:222
+#: ../../source/quickstart.rst:226
msgid "**AD Support:**"
msgstr "**自动微分支持**"
-#: ../../source/quickstart.rst:224
+#: ../../source/quickstart.rst:228
msgid ""
"Gradients, vjps, jvps, natural gradients, Jacobians, and Hessians. AD is "
"the base for all modern machine learning libraries."
msgstr "梯度、矢量雅可比乘积、自然梯度、 Jacobian 矩阵和 Hessian 矩阵。自动微分是所有现代机器学习库的基础。"
-#: ../../source/quickstart.rst:228
+#: ../../source/quickstart.rst:232
msgid "**JIT Support:**"
msgstr "**自动微分支持**"
-#: ../../source/quickstart.rst:230
+#: ../../source/quickstart.rst:234
msgid ""
"Parameterized quantum circuits can run in a blink. Always use jit if the "
"circuit will get evaluations multiple times, it can greatly boost the "
@@ -404,11 +415,11 @@ msgstr ""
" 即时编译,否则,即时编译的函数可能会返回意外结果或每次在点击时都重新编译(浪费大量时间)。要了解更多关于即时编译机制的信息,可以参考关于 "
"``tf.function`` 或 ``jax.jit`` 的文档或博客,即使这两者仍然存在细微差别。"
-#: ../../source/quickstart.rst:234
+#: ../../source/quickstart.rst:238
msgid "**VMAP Support:**"
msgstr "**自动微分支持**"
-#: ../../source/quickstart.rst:236
+#: ../../source/quickstart.rst:240
msgid ""
"Inputs, parameters, measurements, circuit structures, and Monte Carlo "
"noise can all be evaluated in parallel. To learn more about vmap "
@@ -418,11 +429,11 @@ msgstr ""
"输入、参数、测量、电路结构、蒙特卡洛噪声都可以并行测算。 要了解有关矢量并行化机制的更多信息,可以参考 ``tf.vectorized_map``"
" 或 ``jax.vmap`` 上的文档或博客。"
-#: ../../source/quickstart.rst:241
+#: ../../source/quickstart.rst:245
msgid "Backend Agnosticism"
msgstr "后端无关特性"
-#: ../../source/quickstart.rst:243
+#: ../../source/quickstart.rst:247
msgid ""
"TensorCircuit supports TensorFlow, Jax, and PyTorch backends. We "
"recommend using TensorFlow or Jax backend since PyTorch lacks advanced "
@@ -431,7 +442,7 @@ msgstr ""
"TensorCircuit 支持 TensorFlow、Jax 和 PyTorch 后端。 我们建议使用 TensorFlow 或 Jax "
"后端,因为 PyTorch 缺乏高级 jit 和 vmap 功能。"
-#: ../../source/quickstart.rst:245
+#: ../../source/quickstart.rst:249
msgid ""
"The backend can be set as ``K=tc.set_backend(\"jax\")`` and ``K`` is the "
"backend with a full set of APIs as a conventional ML framework, which can"
@@ -440,7 +451,7 @@ msgstr ""
"后端可以设置为 ``K=tc.set_backend(\"jax\")`` ,``K``作为常规机器学习框架的全套API的后端,也可以通过``tc"
" .backend`` 被访问。"
-#: ../../source/quickstart.rst:268
+#: ../../source/quickstart.rst:272
#, fuzzy
msgid ""
"The supported APIs in the backend come from two sources, one part is "
@@ -455,11 +466,15 @@ msgstr ""
" 另一个来自 `TensorCircuit package `__。"
-#: ../../source/quickstart.rst:422
+#: ../../source/quickstart.rst:427
+msgid ""
+msgstr ""
+
+#: ../../source/quickstart.rst:430
msgid "Switch the Dtype"
msgstr "转换 dtype"
-#: ../../source/quickstart.rst:424
+#: ../../source/quickstart.rst:432
msgid ""
"TensorCircuit supports simulation using 32/64 bit precession. The default"
" dtype is 32-bit as \"complex64\". Change this by "
@@ -469,24 +484,24 @@ msgstr ""
"\"complex64\"。可以通过 ``tc.set_dtype(\"complex128\")`` 把 dtype 改为 \"complex"
" 128\" 。"
-#: ../../source/quickstart.rst:427
+#: ../../source/quickstart.rst:435
msgid ""
"``tc.dtypestr`` always returns the current dtype string: either "
"\"complex64\" or \"complex128\"."
msgstr "``tc.dtypestr`` 总会返回当前的 dtype 字符串: 不是 \"complex64\" 就是 \"complex128\"."
-#: ../../source/quickstart.rst:431
+#: ../../source/quickstart.rst:439
msgid "Setup the Contractor"
msgstr "设置 contractor"
-#: ../../source/quickstart.rst:433
+#: ../../source/quickstart.rst:441
msgid ""
"TensorCircuit is a tensornetwork contraction-based quantum circuit "
"simulator. A contractor is for searching for the optimal contraction path"
" of the circuit tensornetwork."
msgstr "TensorCircuit 是一个基于张量网络收缩的量子电路模拟器。 contractor 用于搜索电路张量网络的最佳收缩路径。"
-#: ../../source/quickstart.rst:435
+#: ../../source/quickstart.rst:443
msgid ""
"There are various advanced contractors provided by third-party packages, "
"such as `opt-einsum `__ and "
@@ -496,7 +511,7 @@ msgstr ""
"`__ 和 `cotengra "
"`__ 。"
-#: ../../source/quickstart.rst:437
+#: ../../source/quickstart.rst:445
msgid ""
"`opt-einsum` is shipped with TensorNetwork package. To use cotengra, one "
"needs to pip install it; kahypar is also recommended to install with "
@@ -505,11 +520,11 @@ msgstr ""
"`opt-einsum` 随 TensorNetwork 软件包一起。如要使用 cotengra,则需要 pip 安装它; 还建议安装 "
"cotengra 随 kahypar 一起使用。"
-#: ../../source/quickstart.rst:439
+#: ../../source/quickstart.rst:447
msgid "Some setup cases:"
msgstr "一些设置案例:"
-#: ../../source/quickstart.rst:465
+#: ../../source/quickstart.rst:473
#, fuzzy
msgid ""
"For advanced configurations on cotengra contractors, please refer to "
@@ -524,25 +539,25 @@ msgstr ""
"`contractor 教程 `__."
-#: ../../source/quickstart.rst:467
+#: ../../source/quickstart.rst:475
msgid "**Setup in Function or Context Level**"
msgstr "**函数和上下文级别的设置**"
-#: ../../source/quickstart.rst:469
+#: ../../source/quickstart.rst:477
msgid ""
"Beside global level setup, we can also setup the backend, the dtype, and "
"the contractor at the function level or context manager level:"
msgstr "除了全局级别设置,我们还可以在函数级别或上下文管理器级别设置后端、dtype 和contractor:"
-#: ../../source/quickstart.rst:487
+#: ../../source/quickstart.rst:495
msgid "Noisy Circuit Simulation"
msgstr "噪声电路模拟"
-#: ../../source/quickstart.rst:489
+#: ../../source/quickstart.rst:497
msgid "**Monte Carlo State Simulator:**"
msgstr "**蒙特卡洛态模拟器**"
-#: ../../source/quickstart.rst:491
+#: ../../source/quickstart.rst:499
msgid ""
"For the Monte Carlo trajectory noise simulator, the unitary Kraus channel"
" can be handled easily. TensorCircuit also supports fully jittable and "
@@ -551,40 +566,40 @@ msgstr ""
"对于蒙特卡洛轨迹噪声模拟器,可以轻松处理幺正的 Kraus 通道。 不过,TensorCircuit 还支持完全可即时编译和可微分的通用 "
"Kraus 通道蒙特卡罗模拟。"
-#: ../../source/quickstart.rst:518
+#: ../../source/quickstart.rst:526
msgid "**Density Matrix Simulator:**"
msgstr "**密度矩阵模拟器**"
-#: ../../source/quickstart.rst:520
+#: ../../source/quickstart.rst:528
msgid ""
"Density matrix simulator ``tc.DMCircuit`` simulates the noise in a full "
"form, but takes twice qubits to do noiseless simulation. The API is the "
"same as ``tc.Circuit``."
msgstr "密度矩阵模拟器``tc.DMCircuit`` 以完整形式模拟噪声,但需要两倍的量子比特。API 与 ``tc.Circuit`` 基本相同。"
-#: ../../source/quickstart.rst:539
+#: ../../source/quickstart.rst:547
msgid "**Experiment with quantum errors:**"
msgstr ""
-#: ../../source/quickstart.rst:541
+#: ../../source/quickstart.rst:549
msgid "Multiple quantum errors can be added on circuit."
msgstr ""
-#: ../../source/quickstart.rst:557
+#: ../../source/quickstart.rst:565
msgid "**Experiment with readout error:**"
msgstr ""
-#: ../../source/quickstart.rst:559
+#: ../../source/quickstart.rst:567
msgid ""
"Readout error can be added in experiments for sampling and expectation "
"value calculation."
msgstr ""
-#: ../../source/quickstart.rst:585
+#: ../../source/quickstart.rst:593
msgid "MPS and MPO"
msgstr "矩阵乘积状态和矩阵乘积算子"
-#: ../../source/quickstart.rst:587
+#: ../../source/quickstart.rst:595
msgid ""
"TensorCircuit has its class for MPS and MPO originally defined in "
"TensorNetwork as ``tc.QuVector``, ``tc.QuOperator``."
@@ -592,7 +607,7 @@ msgstr ""
"TensorCircuit 有自己的 MPS 和 MPO 类,起初在 TensorNetwork 中定义为“tc.QuVector” 和 "
"“tc.QuOperator”。"
-#: ../../source/quickstart.rst:589
+#: ../../source/quickstart.rst:597
msgid ""
"``tc.QuVector`` can be extracted from ``tc.Circuit`` as the tensor "
"network form for the output state (uncontracted) by ``c.quvector()``."
@@ -600,7 +615,7 @@ msgstr ""
"作为``c.quvector()`` 的输出状态(未收缩)的张量网络形式,``tc.QuVector`` 可以从``tc.Circuit`` "
"中提取。"
-#: ../../source/quickstart.rst:591
+#: ../../source/quickstart.rst:599
msgid ""
"The QuVector forms a wavefunction w, which can also be fed into Circuit "
"as the inputs state as ``c=tc.Circuit(n, mps_inputs=w)``."
@@ -608,61 +623,61 @@ msgstr ""
"QuVector 形成一个波函数 w,它也可以作为 ``c=tc.Circuit(n, mps_inputs=w)`` 的输入状态输入到 "
"Circuit 中。"
-#: ../../source/quickstart.rst:593
+#: ../../source/quickstart.rst:601
msgid "MPS as input state for circuit"
msgstr "MPS 作为电路的输入状态"
-#: ../../source/quickstart.rst:595
+#: ../../source/quickstart.rst:603
msgid ""
"The MPS/QuVector representation of the input state has a more efficient "
"and compact form."
msgstr "输入状态的 MPS/QuVector 表示具有更高效和紧凑的形式。"
-#: ../../source/quickstart.rst:607
+#: ../../source/quickstart.rst:615
msgid "MPS as (uncomputed) output state for circuit"
msgstr "MPS 作为电路的(未计算的)输出状态"
-#: ../../source/quickstart.rst:609
+#: ../../source/quickstart.rst:617
msgid ""
"For example, a quick way to calculate the wavefunction overlap without "
"explicitly computing the state amplitude is given as below:"
msgstr "例如,在不显式计算状态幅度的情况下,计算波函数重叠的快速方法如下:"
-#: ../../source/quickstart.rst:626
+#: ../../source/quickstart.rst:634
msgid "MPO as the gate on the circuit"
msgstr "MPO 作为电路上的门"
-#: ../../source/quickstart.rst:628
+#: ../../source/quickstart.rst:636
msgid ""
"Instead of a common quantum gate in matrix/node format, we can directly "
"apply a gate in MPO/QuOperator format."
msgstr "代替矩阵/节点格式的普通量子门,我们可以直接应用 MPO/QuOperator 格式的门。"
-#: ../../source/quickstart.rst:639
+#: ../../source/quickstart.rst:647
msgid ""
"The representative gate defined in MPO format is the ``multicontrol`` "
"gate."
msgstr "以 MPO 格式定义的代表门是 ``multicontrol`` 门。"
-#: ../../source/quickstart.rst:641
+#: ../../source/quickstart.rst:649
msgid "MPO as the operator for expectation evaluation on a circuit"
msgstr "MPO作为电路期望估测算子"
-#: ../../source/quickstart.rst:643
+#: ../../source/quickstart.rst:651
msgid ""
"We can also measure operator expectation on the circuit output state "
"where the operator is in MPO/QuOperator format."
msgstr "我们还可以测量运算符对 MPO/QuOperator 格式的电路输出状态的期望。"
-#: ../../source/quickstart.rst:655
+#: ../../source/quickstart.rst:663
msgid "Interfaces"
msgstr "接口"
-#: ../../source/quickstart.rst:657
+#: ../../source/quickstart.rst:665
msgid "**PyTorch Interface to Hybrid with PyTorch Modules:**"
msgstr "**与 PyTorch 模块混合的 PyTorch 接口:**"
-#: ../../source/quickstart.rst:659
+#: ../../source/quickstart.rst:667
msgid ""
"As we have mentioned in the backend section, the PyTorch backend may lack"
" advanced features. This doesn't mean we cannot hybrid the advanced "
@@ -673,7 +688,7 @@ msgstr ""
"正如我们在后端部分提到的,PyTorch 后端可能缺少高级功能。 这并不意味着我们不能将高级量子电路模块与 PyTorch 神经模块混合。 "
"我们可以在 TensorFlow 或 Jax 后端运行量子函数,同时使用 Torch 接口包装它。 "
-#: ../../source/quickstart.rst:686
+#: ../../source/quickstart.rst:694
msgid ""
"For a GPU/CPU, torch/tensorflow, quantum/classical hybrid machine "
"learning pipeline enabled by tensorcircuit, see `example script "
@@ -681,96 +696,102 @@ msgid ""
"lab/tensorcircuit/blob/master/examples/hybrid_gpu_pipeline.py>`__."
msgstr ""
-#: ../../source/quickstart.rst:688
+#: ../../source/quickstart.rst:696
+msgid ""
+"There is also a more flexible torch interface that support static non-"
+"tensor inputs as keyword arguments, which can be utilized as below:"
+msgstr ""
+
+#: ../../source/quickstart.rst:710
msgid ""
"We also provider wrapper of quantum function for torch module as "
":py:meth:`tensorcircuit.TorchLayer` alias to "
":py:meth:`tensorcircuit.torchnn.QuantumNet`."
msgstr ""
-#: ../../source/quickstart.rst:690
+#: ../../source/quickstart.rst:712
msgid ""
"For ``TorchLayer``, ``use_interface=True`` is by default, which natively "
"allow the quantum function defined on other tensorcircuit backends, such "
"as jax or tf for speed consideration."
msgstr ""
-#: ../../source/quickstart.rst:692
+#: ../../source/quickstart.rst:714
msgid ""
"``TorchLayer`` can process multiple input arguments as multiple function "
"inputs, following torch practice."
msgstr ""
-#: ../../source/quickstart.rst:720
+#: ../../source/quickstart.rst:742
msgid "**TensorFlow interfaces:**"
msgstr ""
-#: ../../source/quickstart.rst:722
+#: ../../source/quickstart.rst:744
msgid ""
"Similar rules apply similar as torch interface. The interface can even be"
" used within jit environment outside. See "
":py:meth:`tensorcircuit.interfaces.tensorflow.tensorflow_interface`."
msgstr ""
-#: ../../source/quickstart.rst:725
+#: ../../source/quickstart.rst:747
msgid ""
"We also provider ``enable_dlpack=True`` option in torch and tf "
"interfaces, which allow the tensor transformation happen without memory "
"transfer via dlpack, higher version of tf or torch package required."
msgstr ""
-#: ../../source/quickstart.rst:728
+#: ../../source/quickstart.rst:750
msgid ""
"We also provider wrapper of quantum function for keras layer as "
":py:meth:`tensorcircuit.KerasLayer` alias to "
":py:meth:`tensorcircuit.keras.KerasLayer`."
msgstr ""
-#: ../../source/quickstart.rst:730
+#: ../../source/quickstart.rst:752
msgid ""
"``KerasLayer`` can process multiple input arguments with the input as a "
"dict, following the common keras practice, see example below."
msgstr ""
-#: ../../source/quickstart.rst:752
+#: ../../source/quickstart.rst:774
msgid "**Scipy Interface to Utilize Scipy Optimizers:**"
msgstr "**使用 scipy接口使用scipy优化器:**"
-#: ../../source/quickstart.rst:754
+#: ../../source/quickstart.rst:776
msgid ""
"Automatically transform quantum functions as scipy-compatible values and "
"grad functions as provided for scipy interface with ``jac=True``."
msgstr "为带有 jac=True 的 scipy 接口自动将量子函数转换为与 scipy 兼容的 value 和 grad 函数。"
-#: ../../source/quickstart.rst:780
+#: ../../source/quickstart.rst:802
msgid "Templates as Shortcuts"
msgstr "捷径模板"
-#: ../../source/quickstart.rst:782
+#: ../../source/quickstart.rst:804
msgid "**Measurements:**"
msgstr "**测量**"
-#: ../../source/quickstart.rst:784
+#: ../../source/quickstart.rst:806
msgid "Ising type Hamiltonian defined on a general graph"
msgstr "在一般图上定义的伊辛型哈密顿量"
-#: ../../source/quickstart.rst:786
+#: ../../source/quickstart.rst:808
msgid ""
"See "
":py:meth:`tensorcircuit.templates.measurements.spin_glass_measurements`"
msgstr "参考 :py:meth:`tensorcircuit.templates.measurements.spin_glass_measurements`"
-#: ../../source/quickstart.rst:788
+#: ../../source/quickstart.rst:810
msgid "Heisenberg Hamiltonian on a general graph with possible external fields"
msgstr "具有可能存在的外场的一般图上的海森堡哈密顿量"
-#: ../../source/quickstart.rst:790
+#: ../../source/quickstart.rst:812
msgid ""
"See "
":py:meth:`tensorcircuit.templates.measurements.heisenberg_measurements`"
msgstr "参考 :py:meth:`tensorcircuit.templates.measurements.heisenberg_measurements`"
-#: ../../source/quickstart.rst:792
+#: ../../source/quickstart.rst:814
msgid "**Circuit Blocks:**"
msgstr "**电路块**"
@@ -842,3 +863,13 @@ msgstr "**电路块**"
#~ msgid "Import from Qiskit: ``c = tc.Circuit.from_qiskit(QuantumCircuit, n)``"
#~ msgstr "从 Qiskit 导入:``c = tc.Circuit.from_qiskit(QuantumCircuit, n)``"
+#~ msgid "For x86 Linux or Mac,"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "For Mac with M series chips (arm"
+#~ " architecture), please refer to `TC "
+#~ "on Mac M series "
+#~ "`_."
+#~ msgstr ""
+
diff --git a/docs/source/locale/zh/LC_MESSAGES/sharpbits.po b/docs/source/locale/zh/LC_MESSAGES/sharpbits.po
index 95459bb9..5b2bfadc 100644
--- a/docs/source/locale/zh/LC_MESSAGES/sharpbits.po
+++ b/docs/source/locale/zh/LC_MESSAGES/sharpbits.po
@@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: tensorcircuit \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-06-27 20:10+0800\n"
+"POT-Creation-Date: 2023-05-07 10:47+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language-Team: LANGUAGE \n"
@@ -27,9 +27,7 @@ msgid ""
"Be fast is never for free, though much cheaper in TensorCircuit, but you "
"have to be cautious especially in terms of AD, JIT compatibility. We will"
" go through the main sharp edges 🔪 in this note."
-msgstr ""
-"虽然在TensorCircuit中速度很快,但是你必须小心,尤其是在AD和JIT兼容性"
-"方面。"
+msgstr "虽然在TensorCircuit中速度很快,但是你必须小心,尤其是在AD和JIT兼容性方面。"
#: ../../source/sharpbits.rst:9
msgid "Jit Compatibility"
@@ -45,10 +43,7 @@ msgid ""
" otherwise the recompilation is incurred which is time-consuming. "
"Therefore, if there are input args that are non-tensor or varying shape "
"tensors and frequently change, jit is not recommend."
-msgstr ""
-"输入必须是张量形式,且输入张量的形状必须固定,否则会重新编译,这是非常耗"
-"时的。因此,如果有输入参数是非张量或者变化形状的张量,且经常变化,不建议"
-"使用jit。"
+msgstr "输入必须是张量形式,且输入张量的形状必须固定,否则会重新编译,这是非常耗时的。因此,如果有输入参数是非张量或者变化形状的张量,且经常变化,不建议使用jit。"
#: ../../source/sharpbits.rst:38
msgid "Mix use of numpy and ML backend APIs"
@@ -63,17 +58,14 @@ msgid ""
"For numpy ops, they will be only called in jit staging time (the first "
"run)."
msgstr ""
-"为了使函数可jit和可AD,函数中的每个操作都应该通过ML后端(``tc.backend`` API"
-"或者直接调用后端API ``tf`` 或者 ``jax``)。这是因为ML后端必须创建计算"
-"图来"进行AD和JIT转换。对于numpy操作,它们只会在jit编译阶段被调用(第一"
-"次运行)。"
+"为了使函数可jit和可AD,函数中的每个操作都应该通过ML后端(``tc.backend`` API或者直接调用后端API ``tf`` 或者 "
+"``jax``)。这是因为ML后端必须创建计算图来\"进行AD和JIT转换。对于numpy操作,它们只会在jit编译阶段被调用(第一次运行)。"
#: ../../source/sharpbits.rst:54
msgid ""
"Numpy call inside jitted function can be helpful if you are sure of the "
"behavior is what you expect."
-msgstr ""
-"如果你确定numpy调用的行为是你期望的,那么在jit函数中调用numpy是有帮助的。"
+msgstr "如果你确定numpy调用的行为是你期望的,那么在jit函数中调用numpy是有帮助的。"
#: ../../source/sharpbits.rst:83
msgid "list append under if"
@@ -84,37 +76,77 @@ msgid ""
"Append something to a Python list within if whose condition is based on "
"tensor values will lead to wrong results. Actually values of both branch "
"will be attached to the list. See example below."
-msgstr ""
-"在if条件基于张量值的情况下,将内容附加到Python列表中会导致错误的结果。实际"
-"上,两个分支的值都会被附加到列表中。参见下面的例子。"
+msgstr "在if条件基于张量值的情况下,将内容附加到Python列表中会导致错误的结果。实际上,两个分支的值都会被附加到列表中。参见下面的例子。"
#: ../../source/sharpbits.rst:108
msgid ""
"The above code raise ``ConcretizationTypeError`` exception directly for "
"Jax backend since Jax jit doesn't support tensor value if condition."
-msgstr ""
-"上面的代码直接为Jax后端引发了``ConcretizationTypeError``异常,因为Jax "
-"jit不支持张量值if条件。"
+msgstr "上面的代码直接为Jax后端引发了``ConcretizationTypeError``异常,因为Jax jit不支持张量值if条件。"
#: ../../source/sharpbits.rst:110
msgid "Similarly, conditional gate application must be takend carefully."
msgstr "类似地,必须小心地应用条件门。"
#: ../../source/sharpbits.rst:145
+msgid "Tensor variables consistency"
+msgstr ""
+
+#: ../../source/sharpbits.rst:148
+msgid ""
+"All tensor variables' backend (tf vs jax vs ..), dtype (float vs "
+"complex), shape and device (cpu vs gpu) must be compatible/consistent."
+msgstr ""
+
+#: ../../source/sharpbits.rst:150
+msgid "Inspect the backend, dtype, shape and device using the following codes."
+msgstr ""
+
+#: ../../source/sharpbits.rst:162
+msgid ""
+"If the backend is inconsistent, one can convert the tensor backend via "
+":py:meth:`tensorcircuit.interfaces.tensortrans.general_args_to_backend`."
+msgstr ""
+
+#: ../../source/sharpbits.rst:173
+msgid ""
+"If the dtype is inconsistent, one can convert the tensor dtype using "
+"``tc.backend.cast``."
+msgstr ""
+
+#: ../../source/sharpbits.rst:184
+msgid ""
+"Also note the jax issue on float64/complex128, see `jax gotcha "
+"