diff --git a/.all-contributorsrc b/.all-contributorsrc
index 3bacaeb2f..1e46cd5c1 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -171,9 +171,7 @@
"avatar_url": "https://avatars.githubusercontent.com/u/2626883?v=4",
"profile": "https://www.linkedin.com/in/shengzha/",
"contributions": [
- "ideas",
- "review",
- "talk"
+ "ideas"
]
},
{
@@ -308,6 +306,326 @@
"contributions": [
"ideas"
]
+ },
+ {
+ "login": "lezcano",
+ "name": "Mario Lezcano Casado",
+ "avatar_url": "https://avatars.githubusercontent.com/u/3291265?v=4",
+ "profile": "https://github.com/lezcano",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "BvB93",
+ "name": "Bas van Beek",
+ "avatar_url": "https://avatars.githubusercontent.com/u/43369155?v=4",
+ "profile": "https://github.com/BvB93",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "seberg",
+ "name": "Sebastian Berg",
+ "avatar_url": "https://avatars.githubusercontent.com/u/61977?v=4",
+ "profile": "https://github.com/seberg",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "IsaacBreen",
+ "name": "Isaac Breen",
+ "avatar_url": "https://avatars.githubusercontent.com/u/57783927?v=4",
+ "profile": "https://github.com/IsaacBreen",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "kmaehashi",
+ "name": "Kenichi Maehashi",
+ "avatar_url": "https://avatars.githubusercontent.com/u/939877?v=4",
+ "profile": "https://github.com/kmaehashi",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "cnpryer",
+ "name": "Chris Pryer",
+ "avatar_url": "https://avatars.githubusercontent.com/u/14341145?v=4",
+ "profile": "https://github.com/cnpryer",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "tirthasheshpatel",
+ "name": "Tirth Patel",
+ "avatar_url": "https://avatars.githubusercontent.com/u/43181252?v=4",
+ "profile": "https://github.com/tirthasheshpatel",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "kshitij12345",
+ "name": "Kshiteej K",
+ "avatar_url": "https://avatars.githubusercontent.com/u/19503980?v=4",
+ "profile": "https://github.com/kshitij12345",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "AnirudhDagar",
+ "name": "Anirudh Dagar",
+ "avatar_url": "https://avatars.githubusercontent.com/u/23621655?v=4",
+ "profile": "https://anirudhdagar.ml/",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "tomwhite",
+ "name": "Tom White",
+ "avatar_url": "https://avatars.githubusercontent.com/u/85085?v=4",
+ "profile": "http://tom-e-white.com/",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "honno",
+ "name": "Matthew Barber",
+ "avatar_url": "https://avatars.githubusercontent.com/u/8246949?v=4",
+ "profile": "https://github.com/honno",
+ "contributions": [
+ "ideas",
+ "content"
+ ]
+ },
+ {
+ "login": "pmeier",
+ "name": "Philip Meier",
+ "avatar_url": "https://avatars.githubusercontent.com/u/6849766?v=4",
+ "profile": "https://github.com/pmeier",
+ "contributions": [
+ "research",
+ "code"
+ ]
+ },
+ {
+ "login": "Zac-HD",
+ "name": "Zac Hatfield-Dodds",
+ "avatar_url": "https://avatars.githubusercontent.com/u/12229877?v=4",
+ "profile": "https://github.com/Zac-HD",
+ "contributions": [
+ "ideas",
+ "code"
+ ]
+ },
+ {
+ "login": "djl11",
+ "name": "Daniel Lenton",
+ "avatar_url": "https://avatars.githubusercontent.com/u/22750088?v=4",
+ "profile": "https://github.com/djl11",
+ "contributions": [
+ "code"
+ ]
+ },
+ {
+ "login": "simonetgordon",
+ "name": "Simone G",
+ "avatar_url": "https://avatars.githubusercontent.com/u/74716948?v=4",
+ "profile": "https://github.com/simonetgordon",
+ "contributions": [
+ "code",
+ "ideas"
+ ]
+ },
+ {
+ "login": "tylerjereddy",
+ "name": "Tyler Reddy",
+ "avatar_url": "https://avatars.githubusercontent.com/u/7903078?v=4",
+ "profile": "https://github.com/tylerjereddy",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "mattbarrett98",
+ "name": "Matt Barrett",
+ "avatar_url": "https://avatars.githubusercontent.com/u/83289589?v=4",
+ "profile": "https://github.com/mattbarrett98",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "bicycleman15",
+ "name": "Jatin Prakash",
+ "avatar_url": "https://avatars.githubusercontent.com/u/47978882?v=4",
+ "profile": "https://github.com/bicycleman15",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "Ishticode",
+ "name": "Ishtiaq Hussain",
+ "avatar_url": "https://avatars.githubusercontent.com/u/53497039?v=4",
+ "profile": "https://github.com/Ishticode",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "sherry30",
+ "name": "sherry30",
+ "avatar_url": "https://avatars.githubusercontent.com/u/65318415?v=4",
+ "profile": "https://github.com/sherry30",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "juaolobo",
+ "name": "JoΓ£o Lobo",
+ "avatar_url": "https://avatars.githubusercontent.com/u/49628984?v=4",
+ "profile": "https://github.com/juaolobo",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "NeilGirdhar",
+ "name": "Neil Girdhar",
+ "avatar_url": "https://avatars.githubusercontent.com/u/730137?v=4",
+ "profile": "https://github.com/NeilGirdhar",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "nstarman",
+ "name": "Nathaniel Starkman",
+ "avatar_url": "https://avatars.githubusercontent.com/u/8949649?v=4",
+ "profile": "https://github.com/nstarman",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "jakirkham",
+ "name": "jakirkham",
+ "avatar_url": "https://avatars.githubusercontent.com/u/3019665?v=4",
+ "profile": "https://github.com/jakirkham",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "RickSanchezStoic",
+ "name": "RickSanchezStoic",
+ "avatar_url": "https://avatars.githubusercontent.com/u/57310695?v=4",
+ "profile": "https://github.com/RickSanchezStoic",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "tlambert03",
+ "name": "Talley Lambert",
+ "avatar_url": "https://avatars.githubusercontent.com/u/1609449?v=4",
+ "profile": "https://github.com/tlambert03",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "jni",
+ "name": "Juan Nunez-Iglesias",
+ "avatar_url": "https://avatars.githubusercontent.com/u/492549?v=4",
+ "profile": "http://ilovesymposia.com/",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "chkothe",
+ "name": "Christian Kothe",
+ "avatar_url": "https://avatars.githubusercontent.com/u/5318120?v=4",
+ "profile": "https://github.com/chkothe",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "vnmabus",
+ "name": "Carlos Ramos CarreΓ±o",
+ "avatar_url": "https://avatars.githubusercontent.com/u/2364173?v=4",
+ "profile": "https://github.com/vnmabus",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "gilfree",
+ "name": "Gilad",
+ "avatar_url": "https://avatars.githubusercontent.com/u/88031955?v=4",
+ "profile": "https://github.com/gilfree",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "thomasjpfan",
+ "name": "Thomas J. Fan",
+ "avatar_url": "https://avatars.githubusercontent.com/u/5402633?v=4",
+ "profile": "https://github.com/thomasjpfan",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "Conchylicultor",
+ "name": "Conchylicultor",
+ "avatar_url": "https://avatars.githubusercontent.com/u/9047355?v=4",
+ "profile": "http://e-pot.xyz/",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "fcharras",
+ "name": "Franck Charras",
+ "avatar_url": "https://avatars.githubusercontent.com/u/29153872?v=4",
+ "profile": "https://github.com/fcharras",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "kkraus14",
+ "name": "Keith Kraus",
+ "avatar_url": "https://avatars.githubusercontent.com/u/3665167?v=4",
+ "profile": "https://github.com/kkraus14",
+ "contributions": [
+ "ideas"
+ ]
+ },
+ {
+ "login": "lucascolley",
+ "name": "Lucas Colley",
+ "avatar_url": "https://avatars.githubusercontent.com/u/51488791?v=4",
+ "profile": "https://github.com/lucascolley",
+ "contributions": [
+ "maintenance",
+ "bug"
+ ]
}
],
"contributorsPerLine": 7
diff --git a/.circleci/config.yml b/.circleci/config.yml
index aab5a1744..b35ab8506 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -19,10 +19,10 @@ jobs:
name: build docs
no_output_timeout: 25m
command: |
- pip install -r requirements.txt
- sphinx-build -b html -WT --keep-going spec build/draft -d doctrees
+ pip install -r doc-requirements.txt
+ make spec
- store_artifacts:
- path: build/draft
+ path: _site/
workflows:
version: 2
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index 64a94ebe7..3d9e87daa 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -1,7 +1,8 @@
# Migrate code style to Black
162034b12711dad54589c5dc9e75942695a7957f
+678f9eab5a593005e7bb80a46156c27b210cfcea
# Move special cases to notes sections
816fba3b75c38cbb1bb6fe5b1342adc5eab694f3
0a2fa71a32b924cc92718db29910a6cbbc5e9341
-931144e7d7d5c8b23393aa730ef28962a35b113b
\ No newline at end of file
+931144e7d7d5c8b23393aa730ef28962a35b113b
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 000000000..6d3ba5c27
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,46 @@
+#/
+# @license MIT
+#
+# Copyright (c) 2022 Python Data APIs Consortium.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#/
+
+# Workflow name:
+name: ci
+
+# Workflow triggers:
+on:
+ pull_request:
+ push:
+ branches: [main,]
+
+# Workflow jobs:
+jobs:
+
+ main:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.8
+ - uses: pre-commit/action@v3.0.0
+ - uses: pre-commit-ci/lite-action@v1.0.1
+ if: always()
diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml
index fc8a97015..7576076ef 100644
--- a/.github/workflows/pages.yml
+++ b/.github/workflows/pages.yml
@@ -45,10 +45,10 @@ jobs:
# Avoid running this workflow for forks and allow skipping CI:
if: "github.repository == 'data-apis/array-api' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')"
-
+
# Define a sequence of job steps...
steps:
-
+
# Checkout the repository:
- name: 'Checkout repository'
uses: actions/checkout@v2
@@ -76,22 +76,14 @@ jobs:
# Install dependencies:
- name: 'Install dependencies'
run: |
- pip install -r ./requirements.txt
+ pip install -r doc-requirements.txt
# Generate the documentation:
- name: 'Build documentation'
run: |
# Turn warnings into errors and ensure .doctrees is not deployed:
- sphinx-build -b html -WT --keep-going spec build/draft -d doctrees
-
- # Upload the build artifact:
- - name: 'Upload build artifact'
- uses: actions/upload-artifact@v2
- if: ${{ github.event_name == 'pull_request'}}
- with:
- name: html
- path: build/
- if-no-files-found: error
+ export SPHINXOPTS="-b html -WT --keep-going -d doctrees"
+ make spec
# Configure Git:
- name: 'Configure Git'
@@ -107,10 +99,10 @@ jobs:
git checkout gh-pages
timeout-minutes: 5
- # Copy build artifact:
- - name: 'Copy build artifact'
+ - name: 'Copy build to root'
run: |
- rm -rf ./draft && cp -R ./build/draft ./draft
+ cp -R ./_site/* .
+ cp ./_site/.gitignore .
timeout-minutes: 10
# Commit changes to:
diff --git a/.github/workflows/preview.yml b/.github/workflows/preview.yml
index cdfa3c57b..43d8c0b68 100644
--- a/.github/workflows/preview.yml
+++ b/.github/workflows/preview.yml
@@ -11,6 +11,7 @@ jobs:
uses: larsoner/circleci-artifacts-redirector-action@master
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- artifact-path: 0/build/draft/index.html
+ api-token: ${{ secrets.CIRCLECI_TOKEN }}
+ artifact-path: 0/_site/draft/index.html
circleci-jobs: build_page
job-title: Check the rendered docs here!
diff --git a/.gitignore b/.gitignore
index 86bab2717..cc40a3b43 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,7 +22,7 @@
# SOFTWARE.
#/
-spec/_build/
+_site/
doctrees/
build/
.vscode/
@@ -30,4 +30,8 @@ node_modules/
__pycache__/
*.pyc
spec/**/generated
-tmp/
\ No newline at end of file
+tmp/
+*.egg-info/
+*.egg
+dist/
+.DS_STORE
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 000000000..7ec7f02a8
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,50 @@
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.4.0
+ hooks:
+ - id: check-added-large-files
+ # Prevent giant files from being committed.
+ - id: check-ast
+ # Simply check whether files parse as valid python.
+ - id: check-case-conflict
+ # Check for files with names that would conflict on a case-insensitive
+ # filesystem like MacOS HFS+ or Windows FAT.
+ - id: check-json
+ # Attempts to load all json files to verify syntax.
+ - id: check-merge-conflict
+ # Check for files that contain merge conflict strings.
+ - id: check-symlinks
+ # Checks for symlinks which do not point to anything.
+ - id: check-toml
+ # Attempts to load all TOML files to verify syntax.
+ - id: check-xml
+ # Attempts to load all xml files to verify syntax.
+ - id: check-yaml
+ # Attempts to load all yaml files to verify syntax.
+ exclude: ".*(.github.*)$"
+ - id: debug-statements
+ # Check for debugger imports and py37+ breakpoint() calls in python
+ # source.
+ - id: detect-private-key
+ # Checks for the existence of private keys.
+ - id: end-of-file-fixer
+ # Makes sure files end in a newline and only a newline.
+ - id: trailing-whitespace
+ # Trims trailing whitespace.
+ exclude_types: [python]
+
+ - repo: https://github.com/pre-commit/pygrep-hooks
+ rev: v1.10.0
+ hooks:
+ - id: python-check-mock-methods
+ # Prevent common mistakes of assert mck.not_called(), assert
+ # mck.called_once_with(...) and mck.assert_called.
+ - id: text-unicode-replacement-char
+ # Forbid files which have a UTF-8 Unicode replacement character.
+ - id: python-check-blanket-noqa
+ # Enforce that all noqa annotations always occur with specific codes.
+
+ - repo: https://github.com/psf/black-pre-commit-mirror
+ rev: 23.7.0
+ hooks:
+ - id: black
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 002d4fec9..ca883c3b4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,304 @@
This changelog is organized by specification version and notes all changes with respect to the previous version. Within the section for a specific version (e.g., v2022.12), separate sections are used for (a) changes to existing APIs and requirements, (b) new APIs and new requirements, and (c) errata.
+## v2024.12
+
+### Updates
+
+> Updates to existing APIs and requirements.
+
+#### Normative
+
+- Clarify that conforming implementations may support additional arguments beyond those described in the Array API specification ([gh-870](https://github.com/data-apis/array-api/pull/870))
+- Clarify accuracy requirements for operations involving complex numbers ([gh-882](https://github.com/data-apis/array-api/pull/882))
+- Clarify expected results for in-place operations in conforming array libraries which do not support array mutation ([gh-895](https://github.com/data-apis/array-api/pull/895))
+
+#### APIs
+
+- `__dlpack__`: clarify the expected behavior of the `copy` keyword argument when `copy=True` ([gh-906](https://github.com/data-apis/array-api/pull/906))
+- `__eq__`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `__ge__`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `__getitem__`: clarify that iteration is defined for one-dimensional arrays ([gh-821](https://github.com/data-apis/array-api/pull/821))
+- `__gt__`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `__le__`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `__lt__`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `__ne__`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `asarray`: clarify the expected behavior of the `copy` keyword argument when `copy=True` ([gh-906](https://github.com/data-apis/array-api/pull/906))
+- `astype`: clarify the expected behavior of the `copy` keyword argument when `copy=True` ([gh-906](https://github.com/data-apis/array-api/pull/906))
+- `clip`: specify behavior when one of the operands is `NaN` ([gh-813](https://github.com/data-apis/array-api/pull/813); backported to v2023.12 revision of the Array API specification)
+- `clip`: clarify behavior when arguments have different data types ([gh-896](https://github.com/data-apis/array-api/pull/896))
+- `conj`: add support for real-valued arrays ([gh-884](https://github.com/data-apis/array-api/pull/884))
+- `cumulative_sum`: clarify that behavior when providing a zero-dimensional array is unspecified ([gh-851](https://github.com/data-apis/array-api/pull/851))
+- `equal`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `greater`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `greater_equal`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `less`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `less_equal`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `mean`: add support for complex floating-point data types ([gh-850](https://github.com/data-apis/array-api/pull/850))
+- `not_equal`: clarify that cross-kind comparisons are unspecified ([gh-822](https://github.com/data-apis/array-api/pull/822))
+- `real`: add support for real-valued arrays ([gh-884](https://github.com/data-apis/array-api/pull/884))
+- `reshape`: clarify the expected behavior of the `copy` keyword argument when `copy=True` ([gh-906](https://github.com/data-apis/array-api/pull/906))
+- `sqrt`: clarify that results must be correctly rounded according to IEEE 754 ([gh-882](https://github.com/data-apis/array-api/pull/882))
+- `take`: clarify that behavior when provided a zero-dimensional input array is unspecified ([gh-876](https://github.com/data-apis/array-api/pull/876))
+- `take`: clarify support for negative indices ([gh-894](https://github.com/data-apis/array-api/pull/894))
+
+##### Scalar Argument Support
+
+The following APIs were updated to support both scalar and array arguments for one or more arguments:
+
+- `add` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `atan2` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `bitwise_and` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `bitwise_left_shift` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `bitwise_or` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `bitwise_right_shift` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `bitwise_xor` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `copysign` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `divide` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `equal` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `floor_divide` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `greater` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `greater_equal` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `hypot` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `less` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `less_equal` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `logaddexp` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `logical_and` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `logical_or` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `logical_xor` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `maximum` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `minimum` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `multiply` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `nextafter` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `not_equal` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `pow` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `remainder` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `result_type` ([gh-873](https://github.com/data-apis/array-api/pull/873))
+- `subtract` ([gh-862](https://github.com/data-apis/array-api/pull/862))
+- `where` ([gh-860](https://github.com/data-apis/array-api/pull/860))
+
+#### Extensions
+
+> Updates to APIs and requirements included as part of specification extensions.
+
+- `fft.fftfreq`: add `dtype` keyword argument ([gh-885](https://github.com/data-apis/array-api/pull/885))
+- `fft.rfftfreq`: add `dtype` keyword argument ([gh-885](https://github.com/data-apis/array-api/pull/885))
+
+* * *
+
+### Additions
+
+> New APIs and requirements added to the specification.
+
+#### Normative
+
+- Add support for integer array indexing ([gh-900](https://github.com/data-apis/array-api/pull/900))
+
+#### APIs
+
+The following APIs were added to the specification:
+
+- `count_nonzero`: count the number of array elements which are non-zero ([gh-803](https://github.com/data-apis/array-api/pull/803))
+- `cumulative_prod`: calculate the cumulative product ([gh-793](https://github.com/data-apis/array-api/pull/793))
+- `diff`: calculate the n-th discrete forward difference along a specified axis ([gh-791](https://github.com/data-apis/array-api/pull/791), [gh-881](https://github.com/data-apis/array-api/pull/881))
+- `nextafter`: return the next representable floating-point value for each element in an array ([gh-792](https://github.com/data-apis/array-api/pull/792))
+- `reciprocal`: return the reciprocal for each element in an array ([gh-802](https://github.com/data-apis/array-api/pull/802))
+- `take_along_axis`: return elements from an array at locations specified by one-dimensional indices along an axis ([gh-816](https://github.com/data-apis/array-api/pull/816))
+
+#### Inspection APIs
+
+The following inspection APIs were added to the specification:
+
+- `max dimensions`: return the maximum number of supported dimensions ([gh-763](https://github.com/data-apis/array-api/pull/763) and [gh-809](https://github.com/data-apis/array-api/pull/809))
+
+* * *
+
+### Breaking Changes
+
+The following is a list of breaking changes relative to the previous version of the specification:
+
+#### Normative
+
+- An operation involving a Python `complex` scalar and a real-valued floating-point arrays must be equivalent to an operation involving a zero-dimensional array having a complex floating-point data type and a real-valued floating-point array ([gh-871](https://github.com/data-apis/array-api/pull/871))
+
+#### APIs
+
+- `can_cast`: application of type promotion rules must account for device context ([gh-889](https://github.com/data-apis/array-api/pull/889))
+- `result_type`: application of type promotion rules must account for device context ([gh-889](https://github.com/data-apis/array-api/pull/889))
+
+* * *
+
+### Errata
+
+The following is a list of fixes and points of clarification with regard to the previous version of the specification:
+
+- `__add__`: fix typing for `other` argument ([gh-905](https://github.com/data-apis/array-api/pull/905))
+- `__bool__`: fix typo in special case notes ([gh-785](https://github.com/data-apis/array-api/pull/785))
+- `__dlpack__`: resolve conflicting exception guidance ([gh-887](https://github.com/data-apis/array-api/pull/887))
+- `__eq__`: fix typing for `other` argument ([gh-905](https://github.com/data-apis/array-api/pull/905))
+- `__getitem__`: clarify required indexing semantics ([gh-821](https://github.com/data-apis/array-api/pull/821))
+- `__mul__`: fix typing for `other` argument ([gh-905](https://github.com/data-apis/array-api/pull/905))
+- `__ne__`: fix typing for `other` argument ([gh-905](https://github.com/data-apis/array-api/pull/905))
+- `__pow__`: fix typing for `other` argument ([gh-905](https://github.com/data-apis/array-api/pull/905))
+- `__setitem__`: clarify required indexing semantics ([gh-821](https://github.com/data-apis/array-api/pull/821))
+- `__setitem__`: fix typing for `value` argument ([gh-905](https://github.com/data-apis/array-api/pull/905))
+- `__sub__`: fix typing for `other` argument ([gh-905](https://github.com/data-apis/array-api/pull/905))
+- `__truediv__`: fix typing for `other` argument ([gh-905](https://github.com/data-apis/array-api/pull/905))
+- `broadcast_to`: clarify broadcast behavior ([gh-888](https://github.com/data-apis/array-api/pull/888))
+- `broadcast_to`: clarify required exception behavior ([gh-897](https://github.com/data-apis/array-api/pull/897))
+- `clip`: clarify that the operation is only defined when elements in `min` and `max` are inside the bounds of the input array data type ([gh-814](https://github.com/data-apis/array-api/pull/814))
+- `clip`: fix typo in parameter description ([gh-896](https://github.com/data-apis/array-api/pull/896))
+- `copysign`: fix formatting of special cases ([gh-806](https://github.com/data-apis/array-api/pull/806))
+- `fft.fft`: fix typo in function description ([gh-806](https://github.com/data-apis/array-api/pull/806))
+- `fft.ifft`: fix typo in function description ([gh-806](https://github.com/data-apis/array-api/pull/806))
+- `fft.fftn`: fix typo in function description ([gh-806](https://github.com/data-apis/array-api/pull/806))
+- `fft.ifftn`: fix typo in function description ([gh-806](https://github.com/data-apis/array-api/pull/806))
+- `fft.irfft`: fix typo in function description ([gh-806](https://github.com/data-apis/array-api/pull/806))
+- `fft.irfftn`: fix typo in function description ([gh-806](https://github.com/data-apis/array-api/pull/806))
+- `fft.hfft`: fix typo in function description ([gh-806](https://github.com/data-apis/array-api/pull/806))
+- `linalg.solve`: clarify broadcasting semantics and output shape ([gh-810](https://github.com/data-apis/array-api/pull/810))
+- `nonzero`: fix return type ([gh-803](https://github.com/data-apis/array-api/pull/803) and [gh-904](https://github.com/data-apis/array-api/pull/904))
+- `searchsorted`: fix incorrect boundary conditions ([gh-898](https://github.com/data-apis/array-api/pull/898))
+- `sign`: fix equation in function description ([gh-844](https://github.com/data-apis/array-api/pull/844))
+- `tile`: fix missing return type ([gh-798](https://github.com/data-apis/array-api/pull/798))
+- `unstack`: fix typo in function description ([gh-810](https://github.com/data-apis/array-api/pull/810))
+- `vecdot`: fix regression in default value for `axis` keyword argument ([gh-880](https://github.com/data-apis/array-api/pull/880))
+- `where`: clarify that the `condition` argument should have a boolean data type ([gh-868](https://github.com/data-apis/array-api/pull/868))
+
+* * *
+
+## v2023.12
+
+### Updates
+
+> Updates to existing APIs and requirements.
+
+#### Normative
+
+- Clarify expectations concerning exception handling ([gh-613](https://github.com/data-apis/array-api/pull/613))
+- Clarify that the constant `newaxis` is an alias of `None` ([gh-687](https://github.com/data-apis/array-api/pull/687))
+- Add design discussion on lazy versus eager implementations ([gh-708](https://github.com/data-apis/array-api/pull/708))
+- Revise guidance to require a minimum upper bound for supported ranks ([gh-702](https://github.com/data-apis/array-api/pull/702))
+- Relax design requirements for positional and keyword-only arguments ([gh-730](https://github.com/data-apis/array-api/pull/730))
+- Add recommendation to `__dlpack__` for handling read-only arrays ([gh-749](https://github.com/data-apis/array-api/pull/749))
+
+#### APIs
+
+- `__bool__`: allow lazy implementations to raise from intrinsically eager functions ([gh-652](https://github.com/data-apis/array-api/pull/652))
+- `__complex__`: allow lazy implementations to raise from intrinsically eager functions ([gh-652](https://github.com/data-apis/array-api/pull/652))
+- `__dlpack__`: add `max_version` keyword argument to support versioning ([gh-602](https://github.com/data-apis/array-api/pull/602))
+- `__dlpack__`: add `dl_device` and `copy` keyword arguments ([gh-741](https://github.com/data-apis/array-api/pull/741))
+- `__float__`: allow lazy implementations to raise from intrinsically eager functions ([gh-652](https://github.com/data-apis/array-api/pull/652))
+- `__index__`: allow lazy implementations to raise from intrinsically eager functions ([gh-652](https://github.com/data-apis/array-api/pull/652))
+- `__int__`: allow lazy implementations to raise from intrinsically eager functions ([gh-652](https://github.com/data-apis/array-api/pull/652))
+- `astype`: add support for an optional `device` keyword argument ([gh-665](https://github.com/data-apis/array-api/pull/665))
+- `from_dlpack`: require exceptions to address unsupported use cases ([gh-709](https://github.com/data-apis/array-api/pull/709))
+- `from_dlpack`: add support for `copy` and `device` keywords ([gh-741](https://github.com/data-apis/array-api/pull/741))
+- `max`: clarify that the order of signed zeros is unspecified ([gh-751](https://github.com/data-apis/array-api/pull/751))
+- `min`: clarify that the order of signed zeros is unspecified ([gh-751](https://github.com/data-apis/array-api/pull/751))
+- `take`: explicitly leave out-of-bounds behavior unspecified ([gh-701](https://github.com/data-apis/array-api/pull/701))
+- `tensordot`: allow negative axes ([gh-625](https://github.com/data-apis/array-api/pull/625))
+- `to_device`: clarify behavior when a provided `device` object corresponds to the same device on which an array instance resides ([gh-742](https://github.com/data-apis/array-api/pull/742))
+- `unique_all`: clarify the shape of the array containing unique values and the order of returned counts ([gh-752](https://github.com/data-apis/array-api/pull/752))
+- `unique_counts`: clarify the shape of the array containing unique values and the order of returned counts ([gh-752](https://github.com/data-apis/array-api/pull/752))
+- `unique_inverse`: clarify the shape of the array containing unique values ([gh-752](https://github.com/data-apis/array-api/pull/752))
+- `unique_values`: clarify the shape of the returned array ([gh-752](https://github.com/data-apis/array-api/pull/752))
+
+#### Extensions
+
+> Updates to APIs and requirements included as part of specification extensions.
+
+- `fft.*`: clarify behavior of the `n` and `s` keyword arguments and the expected output array shape ([gh-720](https://github.com/data-apis/array-api/pull/720) and [gh-746](https://github.com/data-apis/array-api/pull/746); backported to v2022.12 revision of Array API specification)
+
+* * *
+
+### Additions
+
+> New APIs and requirements added to the specification.
+
+#### APIs
+
+The following APIs were added to the specification:
+
+- `__array_namespace_info__`: namespace with Array API namespace inspection utilities ([gh-689](https://github.com/data-apis/array-api/pull/689))
+- `clip`: clamp each element of an input array to a specified range ([gh-715](https://github.com/data-apis/array-api/pull/715))
+- `copysign`: compose a floating-point value with the magnitude of a `x1_i` and the sign of `x2_i` ([gh-693](https://github.com/data-apis/array-api/pull/693))
+- `cumulative_sum`: calculate the cumulative sum ([gh-653](https://github.com/data-apis/array-api/pull/653))
+- `hypot`: compute the square root of the sum of squares for each element in an array ([gh-703](https://github.com/data-apis/array-api/pull/703))
+- `maximum`: compute the maximum value for each element of an array relative to the respective element in another array ([gh-713](https://github.com/data-apis/array-api/pull/713))
+- `minimum`: compute the minimum value for each element of an array relative to the respective element in another array ([gh-713](https://github.com/data-apis/array-api/pull/713))
+- `moveaxis`: move array axes to new positions, while leaving other axes in their original positions ([gh-656](https://github.com/data-apis/array-api/pull/656))
+- `repeat`: repeat each element of an array a specified number of times ([gh-690](https://github.com/data-apis/array-api/pull/690))
+- `searchsorted`: find the indices into `x1` such that, if the corresponding elements in `x2` were inserted before the indices, the order of `x1`, when sorted in ascending order, would be preserved ([gh-699](https://github.com/data-apis/array-api/pull/699))
+- `signbit`: determine whether the sign bit is set for each element of an array ([gh-705](https://github.com/data-apis/array-api/pull/705))
+- `tile`: construct an array by tiling an input array ([gh-692](https://github.com/data-apis/array-api/pull/692))
+- `unstack`: split an array into a sequence of arrays along a given axis ([gh-604](https://github.com/data-apis/array-api/pull/604))
+
+#### Inspection APIs
+
+The following inspection APIs were added to the specification:
+
+- `capabilities`: return a dictionary of array library capabilities ([gh-689](https://github.com/data-apis/array-api/pull/689))
+- `default_device`: return the default device ([gh-689](https://github.com/data-apis/array-api/pull/689))
+- `default_dtypes`: return a dictionary containing default data types ([gh-689](https://github.com/data-apis/array-api/pull/689))
+- `dtypes`: return a dictionary support Array API data types ([gh-689](https://github.com/data-apis/array-api/pull/689))
+- `devices`: return a list of supported devices ([gh-689](https://github.com/data-apis/array-api/pull/689))
+
+* * *
+
+### Breaking Changes
+
+The following is a list of breaking changes relative to the previous version of the specification:
+
+- `prod`: when provided a floating-point array, the function must return a floating-point array having the same data type ([gh-744](https://github.com/data-apis/array-api/pull/744))
+- `sum`: when provided a floating-point array, the function must return a floating-point array having the same data type ([gh-744](https://github.com/data-apis/array-api/pull/744))
+- `vecdot`: only require a negative integer for the `axis` keyword argument ([gh-740](https://github.com/data-apis/array-api/pull/740))
+
+#### Extensions
+
+The following is a list of breaking changes in specification extensions relative to the previous version of the specification:
+
+- `fft.fft`: require the input array to have a complex-valued floating-point data type and require that the output array have the same data type as the input array ([gh-720](https://github.com/data-apis/array-api/pull/720); backported to v2022.12 revision of Array API specification)
+- `fft.fftn`: require the input array to have a complex-valued floating-point data type and require that the output array have the same data type as the input array ([gh-720](https://github.com/data-apis/array-api/pull/720); backported to v2022.12 revision of Array API specification)
+- `fft.hfft`: require the input array to have a complex-valued floating-point data type and require that the output array have a real-valued data type having the same precision as the input array ([gh-720](https://github.com/data-apis/array-api/pull/720); backported to v2022.12 revision of Array API specification)
+- `fft.ifft`: require the input array to have a complex-valued floating-point data type and require that the output array have the same data type as the input array ([gh-720](https://github.com/data-apis/array-api/pull/720); backported to v2022.12 revision of Array API specification)
+- `fft.ifftn`: require the input array to have a complex-valued floating-point data type and require that the output array have the same data type as the input array ([gh-720](https://github.com/data-apis/array-api/pull/720); backported to v2022.12 revision of Array API specification)
+- `fft.irfft`: require the output array have a real-valued floating-point data type having the same precision as the input array ([gh-720](https://github.com/data-apis/array-api/pull/720); backported to v2022.12 revision of Array API specification)
+- `fft.irfftn`: require the output array have a real-valued floating-point data type having the same precision as the input array ([gh-720](https://github.com/data-apis/array-api/pull/720); backported to v2022.12 revision of Array API specification)
+- `fft.fftfreq`: require the output array have the default real-valued floating-point data type ([gh-720](https://github.com/data-apis/array-api/pull/720); backported to v2022.12 revision of Array API specification)
+- `fft.rfftfreq`: require the output array have the default real-valued floating-point data type ([gh-720](https://github.com/data-apis/array-api/pull/720); backported to v2022.12 revision of Array API specification)
+- `linalg.cross`: broadcast only along non-compute axes and only require a negative integer for the `axis` keyword argument ([gh-740](https://github.com/data-apis/array-api/pull/740))
+- `linalg.trace`: when provided a floating-point array, the function must return a floating-point array having the same data type ([gh-744](https://github.com/data-apis/array-api/pull/744))
+
+* * *
+
+### Errata
+
+The following is a list of fixes and points of clarification with regard to the previous version of the specification:
+
+- `__getitem__`: clarify typing to allow `None` in indexing ([gh-674](https://github.com/data-apis/array-api/pull/674) and [gh-687](https://github.com/data-apis/array-api/pull/687))
+- `__ge__`: clarify that the operation is only defined for arrays having real-valued data types ([gh-736](https://github.com/data-apis/array-api/pull/736))
+- `__gt__`: clarify that the operation is only defined for arrays having real-valued data types ([gh-736](https://github.com/data-apis/array-api/pull/736))
+- `__le__`: clarify that the operation is only defined for arrays having real-valued data types ([gh-736](https://github.com/data-apis/array-api/pull/736))
+- `__lt__`: clarify that the operation is only defined for arrays having real-valued data types ([gh-736](https://github.com/data-apis/array-api/pull/736))
+- `abs`: fix typo in return value description ([gh-633](https://github.com/data-apis/array-api/pull/633))
+- `asarray`: fix typo in `device` keyword argument description ([gh-681](https://github.com/data-apis/array-api/pull/681))
+- `conj`: fix typo in parameter description ([gh-706](https://github.com/data-apis/array-api/pull/706))
+- `finfo_object`: fix missing `dtype` attribute ([gh-639](https://github.com/data-apis/array-api/pull/639))
+- `fft.*`: fix various typing issues ([gh-720](https://github.com/data-apis/array-api/pull/720))
+- `iinfo_object`: fix missing `dtype` attribute ([gh-639](https://github.com/data-apis/array-api/pull/639))
+- `linalg.qr`: fix typo in function description ([gh-661](https://github.com/data-apis/array-api/pull/661))
+- `linalg.cholesky`: fix typo in function description ([gh-677](https://github.com/data-apis/array-api/pull/677))
+- `linalg.svd`: fix return type ([gh-619](https://github.com/data-apis/array-api/pull/619))
+- `prod`: clarify type promotion behavior when `dtype=None` ([gh-666](https://github.com/data-apis/array-api/pull/666))
+- `sum`: clarify type promotion behavior when `dtype=None` ([gh-666](https://github.com/data-apis/array-api/pull/666))
+- `take`: fix typing for optional `axis` keyword argument ([gh-644](https://github.com/data-apis/array-api/pull/644))
+- `tensordot`: fix typo in parameter description ([gh-622](https://github.com/data-apis/array-api/pull/622))
+- `trace`: clarify type promotion behavior when `dtype=None` ([gh-666](https://github.com/data-apis/array-api/pull/666))
+- `vecdot`: fix definition of complex inner product ([gh-723](https://github.com/data-apis/array-api/pull/723))
+
+* * *
+
## v2022.12
### Updates
@@ -196,4 +494,4 @@ The following is a list of fixes and points of clarification with regard to the
- `linspace`: conversion of `start` and `stop` should follow type promotion rules ([gh-568](https://github.com/data-apis/array-api/pull/568))
- `nonzero`: clarify that, for arrays having a boolean data type, non-zero elements are those elements which equal `True` ([gh-441](https://github.com/data-apis/array-api/pull/441))
- `trunc`: fix description ([gh-511](https://github.com/data-apis/array-api/pull/511))
-- `vecdot`: clarify broadcasting behavior ([gh-417](https://github.com/data-apis/array-api/pull/417) and [gh-473](https://github.com/data-apis/array-api/pull/473))
\ No newline at end of file
+- `vecdot`: clarify broadcasting behavior ([gh-417](https://github.com/data-apis/array-api/pull/417) and [gh-473](https://github.com/data-apis/array-api/pull/473))
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 3ceb0ad58..9e9419abe 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,3 +1,3 @@
Please note that the Consortium for Python Data API Standards has a Code of
Conduct that we ask everyone to respect, see:
-https://github.com/data-apis/.github/blob/master/CODE_OF_CONDUCT.md
\ No newline at end of file
+https://github.com/data-apis/.github/blob/master/CODE_OF_CONDUCT.md
diff --git a/LICENSE b/LICENSE
index e861ffccf..8d3ac90e9 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2020-2022 Consortium for Python Data API Standards contributors
+Copyright (c) 2020-2025 Consortium for Python Data API Standards contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 000000000..7616b26fd
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,3 @@
+exclude README.md
+exclude src/_array_api_conf.py
+include PACKAGE.md
diff --git a/Makefile b/Makefile
new file mode 100644
index 000000000..aa2212611
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,29 @@
+# You can set these variables from the command line.
+SPHINXOPTS ?= -W --keep-going
+SOURCEDIR = spec
+BUILDDIR = _site
+
+.PHONY: default clean draft spec
+
+default: clean spec
+
+clean:
+ rm -rf $(BUILDDIR)
+ find . -type d -name generated -exec rm -rf {} +
+
+draft:
+ mkdir -p $(BUILDDIR)
+ sphinx-build "$(SOURCEDIR)/draft" "$(BUILDDIR)/draft" $(SPHINXOPTS)
+
+spec:
+ mkdir -p $(BUILDDIR)
+ cp "$(SOURCEDIR)/_ghpages/_gitignore.txt" "$(BUILDDIR)/.gitignore"
+ cp "$(SOURCEDIR)/_ghpages/versions.json" "$(BUILDDIR)/versions.json"
+ cp "$(SOURCEDIR)/_ghpages/index.html" "$(BUILDDIR)/index.html"
+ touch "$(BUILDDIR)/.nojekyll"
+ sphinx-build "$(SOURCEDIR)/2021.12" "$(BUILDDIR)/2021.12" $(SPHINXOPTS)
+ sphinx-build "$(SOURCEDIR)/2022.12" "$(BUILDDIR)/2022.12" $(SPHINXOPTS)
+ sphinx-build "$(SOURCEDIR)/2023.12" "$(BUILDDIR)/2023.12" $(SPHINXOPTS)
+ sphinx-build "$(SOURCEDIR)/2024.12" "$(BUILDDIR)/2024.12" $(SPHINXOPTS)
+ cp -r "$(BUILDDIR)/2024.12" "$(BUILDDIR)/latest"
+ sphinx-build "$(SOURCEDIR)/draft" "$(BUILDDIR)/draft" $(SPHINXOPTS)
diff --git a/PACKAGE.md b/PACKAGE.md
new file mode 100644
index 000000000..dc5079d2a
--- /dev/null
+++ b/PACKAGE.md
@@ -0,0 +1,9 @@
+# Stubs for the array API standard
+
+Documentation specific to singular Python objects in the spec (i.e., functions,
+methods and attributes) are in fact represented by stub objects in the package
+`array-api-stubs`. These stubs ultimately get rendered via the autodoc
+capabilities in Sphinx.
+
+TODO: describe how `array-api-stubs` can be used for tooling, once it actually
+has the capacity to do so.
diff --git a/README.md b/README.md
index d0d88ae4c..fef3099ff 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# Array API standard
-[](#contributors-)
+[](#contributors-)
This repository contains documents, tooling and other content related to the
@@ -15,6 +15,134 @@ These are relevant documents related to the content in this repository:
See [CONTRIBUTING.md](CONTRIBUTING.md) for how to go about contributing to
this array API standard.
+
+## Building docs locally
+
+### Quickstart
+
+To install the local stubs and additional dependencies of the Sphinx docs, you
+can use `pip install -r doc-requirements.txt`. Then just running `make` at the
+root of the repository should build the whole spec website.
+
+```sh
+$ pip install -r doc-requirements.txt
+$ make
+$ ls _site/
+2021.12/ draft/ index.html latest/ versions.json
+```
+
+### The nitty-gritty
+
+The spec website is comprised of multiple Sphinx docs (one for each spec version),
+all of which exist in `spec/` and rely on the modules found in `src/` (most
+notably `array_api_stubs`). For purposes of building the docs, these `src/`
+modules do not need to be installed as they are added to the `sys.path` at
+runtime.
+
+To build specific versions of the spec, run `sphinx-build` on the respective
+folder in `spec/`, e.g.
+
+```sh
+$ sphinx-build spec/2012.12/ _site/2012.12/
+```
+
+Additionally, `make draft` aliases
+
+```sh
+$ sphinx-build spec/draft/ _site/draft/
+```
+
+To build the whole website, which includes every version of the spec, you can
+utilize `make spec`.
+
+
+## Making a spec release
+
+The Sphinx doc at `spec/draft/` should be where the in-development spec resides,
+with `src/array_api_stubs/_draft/` containing its respective stubs. A spec
+release should involve:
+
+* Renaming `src/array_api_stubs/_draft/` to `src/array_api_stubs/_YYYY_MM`
+* Renaming `spec/draft/` to `spec/YYYY.MM`
+* Updating `spec/YYYY.MM/conf.py`
+
+ ```diff
+ ...
+ - from array_api_stubs import _draft as stubs_mod
+ + from array_api_stubs import _YYYY_MM as stubs_mod
+ ...
+ - release = "DRAFT"
+ + release = "YYYY.MM"
+ ...
+ ```
+
+* Updating `spec/_ghpages/versions.json`
+
+ ```diff
+ {
+ + "YYYY.MM": "YYYY.MM",
+ ...
+ ```
+
+* Updating `Makefile`
+
+ ```diff
+ ...
+ -sphinx-build "$(SOURCEDIR)/PREVIOUS.VER" "$(BUILDDIR)/PREVIOUS.VER" $(SPHINXOPTS)
+ + -sphinx-build "$(SOURCEDIR)/YYYY.MM" "$(BUILDDIR)/YYYY.MM" $(SPHINXOPTS)
+ - -cp -r "$(BUILDDIR)/PREVIOUS.VER" "$(BUILDDIR)/latest"
+ + -cp -r "$(BUILDDIR)/YYYY.MM" "$(BUILDDIR)/latest"
+ ...
+ ```
+
+These changes should be committed and tagged. The next draft should then be
+created. To preserve git history for both the new release and the next draft:
+
+1. Create and checkout to a new temporary branch.
+
+ ```sh
+ $ git checkout -b tmp
+ ```
+
+2. Make an empty commit. This is required so merging the temporary branch
+ (4.) is not automatic.
+
+ ```sh
+ $ git commit --allow-empty -m "Empty commit for draft at YYYY.MM "
+ ```
+
+3. Checkout back to the branch you are making a spec release in.
+
+ ```sh
+ $ git checkout YYYY.MM-release
+ ```
+
+4. Merge the temporary branch, specifying no commit and no fast-forwarding.
+
+ ```sh
+ $ git merge --no-commit --no-ff tmp
+ Automatic merge went well; stopped before committing as requested
+ ```
+
+5. Checkout the `spec/draft/` files from the temporary branch.
+
+ ```sh
+ $ git checkout tmp -- spec/draft/
+ ```
+
+6. Commit your changes.
+
+ ```sh
+ $ git commit -m "Copy YYYY.MM as draft with preserved git history"
+ ```
+
+You can run `git blame` on both `spec/YYYY.MM` and `spec/draft` files to verify
+we've preserved history. See this [StackOverflow question](https://stackoverflow.com/q/74365771/5193926)
+for more background on the approach we use.
+
+
+
+
## Contributors β¨
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
@@ -23,45 +151,92 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
diff --git a/doc-requirements.txt b/doc-requirements.txt
new file mode 100644
index 000000000..15ef0b245
--- /dev/null
+++ b/doc-requirements.txt
@@ -0,0 +1,8 @@
+sphinx==7.2.6
+sphinx-material==0.0.36
+myst-parser
+sphinx-markdown-tables
+sphinx-copybutton
+sphinx-favicon
+sphinx-math-dollar
+sphinxcontrib-jquery
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000..57af04207
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,34 @@
+[project]
+name = "array-api-stubs"
+version = "0.0.2"
+description = "Stubs for the array API standard"
+authors = []
+license = {file = "LICENSE"}
+readme = "PACKAGE.md"
+requires-python = ">=3.8"
+keywords = []
+classifiers = []
+
+[project.urls]
+Source = "https://github.com/data-apis/array-api/"
+Documentation = "https://data-apis.org/array-api/"
+Homepage = "https://data-apis.org/"
+
+[project.optional-dependencies]
+doc = [
+ "sphinx==4.3.0",
+ "sphinx-material==0.0.30",
+ "myst-parser",
+ "sphinx_markdown_tables",
+ "sphinx_copybutton",
+ "docutils<0.18",
+ "sphinx-math-dollar",
+ "sphinx-favicon",
+]
+
+[build-system]
+requires = ["setuptools"]
+build-backend = "setuptools.build_meta"
+
+[tool.black]
+line-length = 88
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 230413784..000000000
--- a/requirements.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-sphinx==4.3.0
-sphinx-material==0.0.30
-myst-parser
-sphinx_markdown_tables
-sphinx_copybutton
-docutils<0.18
-sphinx-math-dollar
diff --git a/spec/2021.12/API_specification/array_object.rst b/spec/2021.12/API_specification/array_object.rst
new file mode 100644
index 000000000..32b775b6a
--- /dev/null
+++ b/spec/2021.12/API_specification/array_object.rst
@@ -0,0 +1,313 @@
+.. _array-object:
+
+Array object
+============
+
+ Array API specification for array object attributes and methods.
+
+A conforming implementation of the array API standard must provide and support an array object having the following attributes and methods adhering to the following conventions.
+
+* Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a method accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+* Optional parameters must be `keyword-only `_ arguments.
+* Broadcasting semantics must follow the semantics defined in :ref:`broadcasting`.
+* Unless stated otherwise, methods must support the data types defined in :ref:`data-types`.
+* Unless stated otherwise, methods must adhere to the type promotion rules defined in :ref:`type-promotion`.
+* Unless stated otherwise, floating-point operations must adhere to IEEE 754-2019.
+
+Furthermore, a conforming implementation of the array API standard must support array objects of arbitrary rank ``N`` (i.e., number of dimensions), where ``N`` is greater than or equal to zero.
+
+.. note::
+ Conforming implementations must support zero-dimensional arrays.
+
+ Apart from array object attributes, such as ``ndim``, ``device``, and ``dtype``, all operations in this standard return arrays (or tuples of arrays), including those operations, such as ``mean``, ``var``, and ``std``, from which some common array libraries (e.g., NumPy) return scalar values.
+
+ *Rationale: always returning arrays is necessary to (1) support accelerator libraries where non-array return values could force device synchronization and (2) support delayed execution models where an array represents a future value.*
+
+-------------------------------------------------
+
+.. _operators:
+
+Operators
+---------
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python operators.
+
+Arithmetic Operators
+~~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python arithmetic operators.
+
+- ``+x``: :meth:`.array.__pos__`
+
+ - `operator.pos(x) `_
+ - `operator.__pos__(x) `_
+
+- `-x`: :meth:`.array.__neg__`
+
+ - `operator.neg(x) `_
+ - `operator.__neg__(x) `_
+
+- `x1 + x2`: :meth:`.array.__add__`
+
+ - `operator.add(x1, x2) `_
+ - `operator.__add__(x1, x2) `_
+
+- `x1 - x2`: :meth:`.array.__sub__`
+
+ - `operator.sub(x1, x2) `_
+ - `operator.__sub__(x1, x2) `_
+
+- `x1 * x2`: :meth:`.array.__mul__`
+
+ - `operator.mul(x1, x2) `_
+ - `operator.__mul__(x1, x2) `_
+
+- `x1 / x2`: :meth:`.array.__truediv__`
+
+ - `operator.truediv(x1,x2) `_
+ - `operator.__truediv__(x1, x2) `_
+
+- `x1 // x2`: :meth:`.array.__floordiv__`
+
+ - `operator.floordiv(x1, x2) `_
+ - `operator.__floordiv__(x1, x2) `_
+
+- `x1 % x2`: :meth:`.array.__mod__`
+
+ - `operator.mod(x1, x2) `_
+ - `operator.__mod__(x1, x2) `_
+
+- `x1 ** x2`: :meth:`.array.__pow__`
+
+ - `operator.pow(x1, x2) `_
+ - `operator.__pow__(x1, x2) `_
+
+Arithmetic operators should be defined for arrays having numeric data types.
+
+Array Operators
+~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python array operators.
+
+- `x1 @ x2`: :meth:`.array.__matmul__`
+
+ - `operator.matmul(x1, x2) `_
+ - `operator.__matmul__(x1, x2) `_
+
+The matmul ``@`` operator should be defined for arrays having numeric data types.
+
+Bitwise Operators
+~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python bitwise operators.
+
+- `~x`: :meth:`.array.__invert__`
+
+ - `operator.inv(x) `_
+ - `operator.invert(x) `_
+ - `operator.__inv__(x) `_
+ - `operator.__invert__(x) `_
+
+- `x1 & x2`: :meth:`.array.__and__`
+
+ - `operator.and(x1, x2) `_
+ - `operator.__and__(x1, x2) `_
+
+- `x1 | x2`: :meth:`.array.__or__`
+
+ - `operator.or(x1, x2) `_
+ - `operator.__or__(x1, x2) `_
+
+- `x1 ^ x2`: :meth:`.array.__xor__`
+
+ - `operator.xor(x1, x2) `_
+ - `operator.__xor__(x1, x2) `_
+
+- `x1 << x2`: :meth:`.array.__lshift__`
+
+ - `operator.lshift(x1, x2) `_
+ - `operator.__lshift__(x1, x2) `_
+
+- `x1 >> x2`: :meth:`.array.__rshift__`
+
+ - `operator.rshift(x1, x2) `_
+ - `operator.__rshift__(x1, x2) `_
+
+Bitwise operators should be defined for arrays having integer and boolean data types.
+
+Comparison Operators
+~~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python comparison operators.
+
+- `x1 < x2`: :meth:`.array.__lt__`
+
+ - `operator.lt(x1, x2) `_
+ - `operator.__lt__(x1, x2) `_
+
+- `x1 <= x2`: :meth:`.array.__le__`
+
+ - `operator.le(x1, x2) `_
+ - `operator.__le__(x1, x2) `_
+
+- `x1 > x2`: :meth:`.array.__gt__`
+
+ - `operator.gt(x1, x2) `_
+ - `operator.__gt__(x1, x2) `_
+
+- `x1 >= x2`: :meth:`.array.__ge__`
+
+ - `operator.ge(x1, x2) `_
+ - `operator.__ge__(x1, x2) `_
+
+- `x1 == x2`: :meth:`.array.__eq__`
+
+ - `operator.eq(x1, x2) `_
+ - `operator.__eq__(x1, x2) `_
+
+- `x1 != x2`: :meth:`.array.__ne__`
+
+ - `operator.ne(x1, x2) `_
+ - `operator.__ne__(x1, x2) `_
+
+Comparison operators should be defined for arrays having any data type.
+
+In-place Operators
+~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following in-place Python operators.
+
+An in-place operation must not change the data type or shape of the in-place array as a result of :ref:`type-promotion` or :ref:`broadcasting`.
+
+An in-place operation must have the same behavior (including special cases) as its respective binary (i.e., two operand, non-assignment) operation. For example, after in-place addition ``x1 += x2``, the modified array ``x1`` must always equal the result of the equivalent binary arithmetic operation ``x1 = x1 + x2``.
+
+.. note::
+ In-place operators must be supported as discussed in :ref:`copyview-mutability`.
+
+Arithmetic Operators
+""""""""""""""""""""
+
+- ``+=``. May be implemented via ``__iadd__``.
+- ``-=``. May be implemented via ``__isub__``.
+- ``*=``. May be implemented via ``__imul__``.
+- ``/=``. May be implemented via ``__itruediv__``.
+- ``//=``. May be implemented via ``__ifloordiv__``.
+- ``**=``. May be implemented via ``__ipow__``.
+- ``%=``. May be implemented via ``__imod__``.
+
+Array Operators
+"""""""""""""""
+
+- ``@=``. May be implemented via ``__imatmul__``.
+
+Bitwise Operators
+"""""""""""""""""
+
+- ``&=``. May be implemented via ``__iand__``.
+- ``|=``. May be implemented via ``__ior__``.
+- ``^=``. May be implemented via ``__ixor__``.
+- ``<<=``. May be implemented via ``__ilshift__``.
+- ``>>=``. May be implemented via ``__irshift__``.
+
+Reflected Operators
+~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following reflected operators.
+
+The results of applying reflected operators must match their non-reflected equivalents.
+
+.. note::
+ All operators for which ``array scalar`` is implemented must have an equivalent reflected operator implementation.
+
+Arithmetic Operators
+""""""""""""""""""""
+
+- ``__radd__``
+- ``__rsub__``
+- ``__rmul__``
+- ``__rtruediv__``
+- ``__rfloordiv__``
+- ``__rpow__``
+- ``__rmod__``
+
+Array Operators
+"""""""""""""""
+
+- ``__rmatmul__``
+
+Bitwise Operators
+"""""""""""""""""
+
+- ``__rand__``
+- ``__ror__``
+- ``__rxor__``
+- ``__rlshift__``
+- ``__rrshift__``
+
+-------------------------------------------------
+
+.. currentmodule:: array_api
+
+Attributes
+----------
+..
+ NOTE: please keep the attributes in alphabetical order
+
+
+.. autosummary::
+ :toctree: generated
+ :template: property.rst
+
+ array.dtype
+ array.device
+ array.mT
+ array.ndim
+ array.shape
+ array.size
+ array.T
+
+-------------------------------------------------
+
+Methods
+-------
+..
+ NOTE: please keep the methods in alphabetical order
+
+
+.. autosummary::
+ :toctree: generated
+ :template: property.rst
+
+ array.__abs__
+ array.__add__
+ array.__and__
+ array.__array_namespace__
+ array.__bool__
+ array.__dlpack__
+ array.__dlpack_device__
+ array.__eq__
+ array.__float__
+ array.__floordiv__
+ array.__ge__
+ array.__getitem__
+ array.__gt__
+ array.__index__
+ array.__int__
+ array.__invert__
+ array.__le__
+ array.__lshift__
+ array.__lt__
+ array.__matmul__
+ array.__mod__
+ array.__mul__
+ array.__ne__
+ array.__neg__
+ array.__or__
+ array.__pos__
+ array.__pow__
+ array.__rshift__
+ array.__setitem__
+ array.__sub__
+ array.__truediv__
+ array.__xor__
+ array.to_device
diff --git a/spec/2021.12/API_specification/broadcasting.rst b/spec/2021.12/API_specification/broadcasting.rst
new file mode 100644
index 000000000..ec72fb089
--- /dev/null
+++ b/spec/2021.12/API_specification/broadcasting.rst
@@ -0,0 +1,115 @@
+.. _broadcasting:
+
+Broadcasting
+============
+
+ Array API specification for broadcasting semantics.
+
+Overview
+--------
+
+**Broadcasting** refers to the automatic (implicit) expansion of array dimensions to be of equal sizes without copying array data for the purpose of making arrays with different shapes have compatible shapes for element-wise operations.
+
+Broadcasting facilitates user ergonomics by encouraging users to avoid unnecessary copying of array data and can **potentially** enable more memory-efficient element-wise operations through vectorization, reduced memory consumption, and cache locality.
+
+Algorithm
+---------
+
+Given an element-wise operation involving two compatible arrays, an array having a singleton dimension (i.e., a dimension whose size is one) is broadcast (i.e., virtually repeated) across an array having a corresponding non-singleton dimension.
+
+If two arrays are of unequal rank, the array having a lower rank is promoted to a higher rank by (virtually) prepending singleton dimensions until the number of dimensions matches that of the array having a higher rank.
+
+The results of the element-wise operation must be stored in an array having a shape determined by the following algorithm.
+
+#. Let ``A`` and ``B`` both be arrays.
+
+#. Let ``shape1`` be a tuple describing the shape of array ``A``.
+
+#. Let ``shape2`` be a tuple describing the shape of array ``B``.
+
+#. Let ``N1`` be the number of dimensions of array ``A`` (i.e., the result of ``len(shape1)``).
+
+#. Let ``N2`` be the number of dimensions of array ``B`` (i.e., the result of ``len(shape2)``).
+
+#. Let ``N`` be the maximum value of ``N1`` and ``N2`` (i.e., the result of ``max(N1, N2)``).
+
+#. Let ``shape`` be a temporary list of length ``N`` for storing the shape of the result array.
+
+#. Let ``i`` be ``N-1``.
+
+#. Repeat, while ``i >= 0``
+
+ #. Let ``n1`` be ``N1 - N + i``.
+
+ #. If ``n1 >= 0``, let ``d1`` be the size of dimension ``n1`` for array ``A`` (i.e., the result of ``shape1[n1]``); else, let ``d1`` be ``1``.
+
+ #. Let ``n2`` be ``N2 - N + i``.
+
+ #. If ``n2 >= 0``, let ``d2`` be the size of dimension ``n2`` for array ``B`` (i.e., the result of ``shape2[n2]``); else, let ``d2`` be ``1``.
+
+ #. If ``d1 == 1``, then set the ``i``\th element of ``shape`` to ``d2``.
+
+ #. Else, if ``d2 == 1``, then
+
+ - set the ``i``\th element of ``shape`` to ``d1``.
+
+ #. Else, if ``d1 == d2``, then
+
+ - set the ``i``\th element of ``shape`` to ``d1``.
+
+ #. Else, throw an exception.
+
+ #. Set ``i`` to ``i-1``.
+
+#. Let ``tuple(shape)`` be the shape of the result array.
+
+Examples
+~~~~~~~~
+
+The following examples demonstrate the application of the broadcasting algorithm for two compatible arrays.
+
+::
+
+ A (4d array): 8 x 1 x 6 x 1
+ B (3d array): 7 x 1 x 5
+ ---------------------------------
+ Result (4d array): 8 x 7 x 6 x 5
+ A (2d array): 5 x 4
+ B (1d array): 1
+ -------------------------
+ Result (2d array): 5 x 4
+ A (2d array): 5 x 4
+ B (1d array): 4
+ -------------------------
+ Result (2d array): 5 x 4
+ A (3d array): 15 x 3 x 5
+ B (3d array): 15 x 1 x 5
+ ------------------------------
+ Result (3d array): 15 x 3 x 5
+ A (3d array): 15 x 3 x 5
+ B (2d array): 3 x 5
+ ------------------------------
+ Result (3d array): 15 x 3 x 5
+ A (3d array): 15 x 3 x 5
+ B (2d array): 3 x 1
+ ------------------------------
+ Result (3d array): 15 x 3 x 5
+
+
+The following examples demonstrate array shapes which do **not** broadcast.
+
+::
+
+ A (1d array): 3
+ B (1d array): 4 # dimension does not match
+
+ A (2d array): 2 x 1
+ B (3d array): 8 x 4 x 3 # second dimension does not match
+
+ A (3d array): 15 x 3 x 5
+ B (2d array): 15 x 3 # singleton dimensions can only be prepended, not appended
+
+In-place Semantics
+------------------
+
+As implied by the broadcasting algorithm, in-place element-wise operations must not change the shape of the in-place array as a result of broadcasting.
diff --git a/spec/API_specification/constants.rst b/spec/2021.12/API_specification/constants.rst
similarity index 93%
rename from spec/API_specification/constants.rst
rename to spec/2021.12/API_specification/constants.rst
index abe256533..71cb8688d 100644
--- a/spec/API_specification/constants.rst
+++ b/spec/2021.12/API_specification/constants.rst
@@ -10,7 +10,7 @@ A conforming implementation of the array API standard must provide and support t
Objects in API
--------------
-.. currentmodule:: array_api
+.. currentmodule:: array_api.constants
..
NOTE: please keep the functions in alphabetical order
diff --git a/spec/2021.12/API_specification/creation_functions.rst b/spec/2021.12/API_specification/creation_functions.rst
new file mode 100644
index 000000000..9984ff04c
--- /dev/null
+++ b/spec/2021.12/API_specification/creation_functions.rst
@@ -0,0 +1,38 @@
+Creation Functions
+==================
+
+ Array API specification for creating arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+- Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+- Optional parameters must be `keyword-only `_ arguments.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ arange
+ asarray
+ empty
+ empty_like
+ eye
+ from_dlpack
+ full
+ full_like
+ linspace
+ meshgrid
+ ones
+ ones_like
+ tril
+ triu
+ zeros
+ zeros_like
diff --git a/spec/2021.12/API_specification/data_type_functions.rst b/spec/2021.12/API_specification/data_type_functions.rst
new file mode 100644
index 000000000..bb32d2b7f
--- /dev/null
+++ b/spec/2021.12/API_specification/data_type_functions.rst
@@ -0,0 +1,27 @@
+Data Type Functions
+===================
+
+ Array API specification for data type functions.
+
+A conforming implementation of the array API standard must provide and support the following data type functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ astype
+ broadcast_arrays
+ broadcast_to
+ can_cast
+ finfo
+ iinfo
+ result_type
diff --git a/spec/2021.12/API_specification/data_types.rst b/spec/2021.12/API_specification/data_types.rst
new file mode 100644
index 000000000..48a8bd1a0
--- /dev/null
+++ b/spec/2021.12/API_specification/data_types.rst
@@ -0,0 +1,164 @@
+.. _data-types:
+
+Data Types
+==========
+
+ Array API specification for supported data types.
+
+A conforming implementation of the array API standard must provide and support the following data types.
+
+bool
+----
+
+Boolean (``True`` or ``False``).
+
+int8
+----
+
+An 8-bit signed integer whose values exist on the interval ``[-128, +127]``.
+
+int16
+-----
+
+A 16-bit signed integer whose values exist on the interval ``[β32,767, +32,767]``.
+
+int32
+-----
+
+A 32-bit signed integer whose values exist on the interval ``[β2,147,483,647, +2,147,483,647]``.
+
+int64
+-----
+
+A 64-bit signed integer whose values exist on the interval ``[β9,223,372,036,854,775,807, +9,223,372,036,854,775,807]``.
+
+uint8
+-----
+
+An 8-bit unsigned integer whose values exist on the interval ``[0, +255]``.
+
+uint16
+------
+
+A 16-bit unsigned integer whose values exist on the interval ``[0, +65,535]``.
+
+uint32
+------
+
+A 32-bit unsigned integer whose values exist on the interval ``[0, +4,294,967,295]``.
+
+uint64
+------
+
+A 64-bit unsigned integer whose values exist on the interval ``[0, +18,446,744,073,709,551,615]``.
+
+float32
+-------
+
+IEEE 754 single-precision (32-bit) binary floating-point number (see IEEE 754-2019).
+
+float64
+-------
+
+IEEE 754 double-precision (64-bit) binary floating-point number (see IEEE 754-2019).
+
+.. note::
+ IEEE 754-2019 requires support for subnormal (a.k.a., denormal) numbers, which are useful for supporting gradual underflow. However, hardware support for subnormal numbers is not universal, and many platforms (e.g., accelerators) and compilers support toggling denormals-are-zero (DAZ) and/or flush-to-zero (FTZ) behavior to increase performance and to guard against timing attacks.
+
+ Accordingly, subnormal behavior is left unspecified and, thus, implementation-defined. Conforming implementations may vary in their support for subnormal numbers.
+
+.. admonition:: Future extension
+ :class: admonition tip
+
+ ``complex64`` and ``complex128`` data types are expected to be included in the next version of this standard and to have the following casting rules (will be added to :ref:`type-promotion`):
+
+ .. image:: ../../_static/images/dtype_promotion_complex.png
+
+ See `array-api/issues/102 `_ for more details
+
+.. note::
+ A conforming implementation of the array API standard may provide and support additional data types beyond those described in this specification.
+
+.. _data-type-objects:
+
+Data Type Objects
+-----------------
+
+Data types ("dtypes") are objects which are used as ``dtype`` specifiers in functions and methods (e.g., ``zeros((2, 3), dtype=float32)``).
+
+.. note::
+ A conforming implementation may add additional methods or attributes to data type objects beyond those described in this specification.
+
+.. note::
+ Implementations may provide other ways to specify data types (e.g., ``zeros((2, 3), dtype='f4')``) which are not described in this specification; however, in order to ensure portability, array library consumers are recommended to use data type objects as provided by specification conforming array libraries.
+
+A conforming implementation of the array API standard must provide and support data type objects having the following attributes and methods.
+
+Methods
+~~~~~~~
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. currentmodule:: array_api.data_types
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ __eq__
+
+
+.. _data-type-defaults:
+
+Default Data Types
+------------------
+
+A conforming implementation of the array API standard must define the following default data types.
+
+- a default floating-point data type (either ``float32`` or ``float64``).
+- a default integer data type (either ``int32`` or ``int64``).
+- a default array index data type (either ``int32`` or ``int64``).
+
+The default floating-point data type must be the same across platforms.
+
+The default integer data type should be the same across platforms, but the default may vary depending on whether Python is 32-bit or 64-bit.
+
+The default array index data type may be ``int32`` on 32-bit platforms, but the default should be ``int64`` otherwise.
+
+.. note::
+ The default data types should be clearly defined in a conforming library's documentation.
+
+.. _data-type-categories:
+
+Data Type Categories
+--------------------
+
+For the purpose of organizing functions within this specification, the following data type categories are defined.
+
+.. note::
+ Conforming libraries are not required to organize data types according to these categories. These categories are only intended for use within this specification.
+
+.. note::
+ Future versions of the specification will include additional categories for complex data types.
+
+
+Numeric Data Types
+~~~~~~~~~~~~~~~~~~
+
+``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``uint16``, ``uint32``, ``uint64``, ``float32``, and ``float64`` (i.e., all data types except for ``bool``).
+
+Integer Data Types
+~~~~~~~~~~~~~~~~~~
+
+``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``uint16``, ``uint32``, and ``uint64``.
+
+Floating-point Data Types
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``float32`` and ``float64``.
+
+Boolean Data Types
+~~~~~~~~~~~~~~~~~~
+
+``bool``.
diff --git a/spec/2021.12/API_specification/elementwise_functions.rst b/spec/2021.12/API_specification/elementwise_functions.rst
new file mode 100644
index 000000000..02e3d50b6
--- /dev/null
+++ b/spec/2021.12/API_specification/elementwise_functions.rst
@@ -0,0 +1,86 @@
+.. _element-wise-functions:
+
+Element-wise Functions
+======================
+
+ Array API specification for element-wise functions.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+- Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+- Optional parameters must be `keyword-only `_ arguments.
+- Broadcasting semantics must follow the semantics defined in :ref:`broadcasting`.
+- Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+- Functions may only be required for a subset of input data type. Libraries may choose to implement functions for additional data types, but that behavior is not required by the specification. See :ref:`data-type-categories`.
+- Unless stated otherwise, functions must adhere to the type promotion rules defined in :ref:`type-promotion`.
+- Unless stated otherwise, floating-point operations must adhere to IEEE 754-2019.
+- Unless stated otherwise, element-wise mathematical functions must satisfy the minimum accuracy requirements defined in :ref:`accuracy`.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ abs
+ acos
+ acosh
+ add
+ asin
+ asinh
+ atan
+ atan2
+ atanh
+ bitwise_and
+ bitwise_left_shift
+ bitwise_invert
+ bitwise_or
+ bitwise_right_shift
+ bitwise_xor
+ ceil
+ cos
+ cosh
+ divide
+ equal
+ exp
+ expm1
+ floor
+ floor_divide
+ greater
+ greater_equal
+ isfinite
+ isinf
+ isnan
+ less
+ less_equal
+ log
+ log1p
+ log2
+ log10
+ logaddexp
+ logical_and
+ logical_not
+ logical_or
+ logical_xor
+ multiply
+ negative
+ not_equal
+ positive
+ pow
+ remainder
+ round
+ sign
+ sin
+ sinh
+ square
+ sqrt
+ subtract
+ tan
+ tanh
+ trunc
diff --git a/spec/API_specification/function_and_method_signatures.rst b/spec/2021.12/API_specification/function_and_method_signatures.rst
similarity index 100%
rename from spec/API_specification/function_and_method_signatures.rst
rename to spec/2021.12/API_specification/function_and_method_signatures.rst
diff --git a/spec/2021.12/API_specification/index.rst b/spec/2021.12/API_specification/index.rst
new file mode 100644
index 000000000..1d4c9cfef
--- /dev/null
+++ b/spec/2021.12/API_specification/index.rst
@@ -0,0 +1,26 @@
+.. _api-specification:
+
+API specification
+=================
+
+.. toctree::
+ :caption: API specification
+ :maxdepth: 3
+
+ array_object
+ broadcasting
+ constants
+ creation_functions
+ data_type_functions
+ data_types
+ elementwise_functions
+ function_and_method_signatures
+ indexing
+ linear_algebra_functions
+ manipulation_functions
+ searching_functions
+ set_functions
+ sorting_functions
+ statistical_functions
+ type_promotion
+ utility_functions
diff --git a/spec/API_specification/indexing.rst b/spec/2021.12/API_specification/indexing.rst
similarity index 100%
rename from spec/API_specification/indexing.rst
rename to spec/2021.12/API_specification/indexing.rst
diff --git a/spec/2021.12/API_specification/linear_algebra_functions.rst b/spec/2021.12/API_specification/linear_algebra_functions.rst
new file mode 100644
index 000000000..9bae18e77
--- /dev/null
+++ b/spec/2021.12/API_specification/linear_algebra_functions.rst
@@ -0,0 +1,29 @@
+Linear Algebra Functions
+========================
+
+ Array API specification for linear algebra functions.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+* Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+* Optional parameters must be `keyword-only `_ arguments.
+* Broadcasting semantics must follow the semantics defined in :ref:`broadcasting`.
+* Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+* Unless stated otherwise, functions must adhere to the type promotion rules defined in :ref:`type-promotion`.
+* Unless stated otherwise, floating-point operations must adhere to IEEE 754-2019.
+
+.. currentmodule:: array_api
+
+Objects in API
+--------------
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ matmul
+ matrix_transpose
+ tensordot
+ vecdot
diff --git a/spec/2021.12/API_specification/manipulation_functions.rst b/spec/2021.12/API_specification/manipulation_functions.rst
new file mode 100644
index 000000000..86ad2697f
--- /dev/null
+++ b/spec/2021.12/API_specification/manipulation_functions.rst
@@ -0,0 +1,31 @@
+Manipulation Functions
+======================
+
+ Array API specification for manipulating arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+- Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+- Optional parameters must be `keyword-only `_ arguments.
+- Unless stated otherwise, functions must adhere to the type promotion rules defined in :ref:`type-promotion`.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ concat
+ expand_dims
+ flip
+ permute_dims
+ reshape
+ roll
+ squeeze
+ stack
diff --git a/spec/2021.12/API_specification/searching_functions.rst b/spec/2021.12/API_specification/searching_functions.rst
new file mode 100644
index 000000000..bf09e4c8a
--- /dev/null
+++ b/spec/2021.12/API_specification/searching_functions.rst
@@ -0,0 +1,31 @@
+.. _searching-functions:
+
+Searching Functions
+===================
+
+ Array API specification for functions for searching arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+- Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+- Optional parameters must be `keyword-only `_ arguments.
+- Broadcasting semantics must follow the semantics defined in :ref:`broadcasting`.
+- Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+- Unless stated otherwise, functions must adhere to the type promotion rules defined in :ref:`type-promotion`.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ argmax
+ argmin
+ nonzero
+ where
diff --git a/spec/2021.12/API_specification/set_functions.rst b/spec/2021.12/API_specification/set_functions.rst
new file mode 100644
index 000000000..b7072d100
--- /dev/null
+++ b/spec/2021.12/API_specification/set_functions.rst
@@ -0,0 +1,27 @@
+Set Functions
+=============
+
+ Array API specification for creating and operating on sets.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+- Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+- Optional parameters must be `keyword-only `_ arguments.
+- Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ unique_all
+ unique_counts
+ unique_inverse
+ unique_values
diff --git a/spec/2021.12/API_specification/sorting_functions.rst b/spec/2021.12/API_specification/sorting_functions.rst
new file mode 100644
index 000000000..19d7fb439
--- /dev/null
+++ b/spec/2021.12/API_specification/sorting_functions.rst
@@ -0,0 +1,34 @@
+Sorting Functions
+=================
+
+ Array API specification for sorting functions.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+* Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+* Optional parameters must be `keyword-only `_ arguments.
+* Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+
+.. note::
+
+ For floating-point input arrays, the sort order of NaNs and signed zeros is unspecified and thus implementation-dependent.
+
+ Implementations may choose to sort signed zeros (``-0 < +0``) or may choose to rely solely on value equality (``==``).
+
+ Implementations may choose to sort NaNs (e.g., to the end or to the beginning of a returned array) or leave them in-place. Should an implementation sort NaNs, the sorting convention should be clearly documented in the conforming implementation's documentation.
+
+ While defining a sort order for IEEE 754 floating-point numbers is recommended in order to facilitate reproducible and consistent sort results, doing so is not currently required by this specification.
+
+.. currentmodule:: array_api
+
+Objects in API
+--------------
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ argsort
+ sort
diff --git a/spec/2021.12/API_specification/statistical_functions.rst b/spec/2021.12/API_specification/statistical_functions.rst
new file mode 100644
index 000000000..6734506ed
--- /dev/null
+++ b/spec/2021.12/API_specification/statistical_functions.rst
@@ -0,0 +1,33 @@
+Statistical Functions
+=====================
+
+ Array API specification for statistical functions.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+- Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+- Optional parameters must be `keyword-only `_ arguments.
+- Broadcasting semantics must follow the semantics defined in :ref:`broadcasting`.
+- Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+- Unless stated otherwise, functions must adhere to the type promotion rules defined in :ref:`type-promotion`.
+- Unless stated otherwise, floating-point operations must adhere to IEEE 754-2019.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ max
+ mean
+ min
+ prod
+ std
+ sum
+ var
diff --git a/spec/2021.12/API_specification/type_promotion.rst b/spec/2021.12/API_specification/type_promotion.rst
new file mode 100644
index 000000000..2b1a422a0
--- /dev/null
+++ b/spec/2021.12/API_specification/type_promotion.rst
@@ -0,0 +1,136 @@
+.. _type-promotion:
+
+Type Promotion Rules
+====================
+
+ Array API specification for type promotion rules.
+
+Type promotion rules can be understood at a high level from the following diagram:
+
+.. image:: ../../_static/images/dtype_promotion_lattice_no_complex.png
+ :target: Type promotion diagram
+
+*Type promotion diagram. Promotion between any two types is given by their join on this lattice. Only the types of participating arrays matter, not their values. Dashed lines indicate that behavior for Python scalars is undefined on overflow. Boolean, integer and floating-point dtypes are not connected, indicating mixed-kind promotion is undefined.*
+
+Rules
+-----
+
+A conforming implementation of the array API standard must implement the following type promotion rules governing the common result type for two **array** operands during an arithmetic operation.
+
+A conforming implementation of the array API standard may support additional type promotion rules beyond those described in this specification.
+
+.. note::
+ Type codes are used here to keep tables readable; they are not part of the standard. In code, use the data type objects specified in :ref:`data-types` (e.g., ``int16`` rather than ``'i2'``).
+
+..
+ Note: please keep table columns aligned
+
+The following type promotion tables specify the casting behavior for operations involving two array operands. When more than two array operands participate, application of the promotion tables is associative (i.e., the result does not depend on operand order).
+
+Signed integer type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+----+----+
+| | i1 | i2 | i4 | i8 |
++========+====+====+====+====+
+| **i1** | i1 | i2 | i4 | i8 |
++--------+----+----+----+----+
+| **i2** | i2 | i2 | i4 | i8 |
++--------+----+----+----+----+
+| **i4** | i4 | i4 | i4 | i8 |
++--------+----+----+----+----+
+| **i8** | i8 | i8 | i8 | i8 |
++--------+----+----+----+----+
+
+where
+
+- **i1**: 8-bit signed integer (i.e., ``int8``)
+- **i2**: 16-bit signed integer (i.e., ``int16``)
+- **i4**: 32-bit signed integer (i.e., ``int32``)
+- **i8**: 64-bit signed integer (i.e., ``int64``)
+
+Unsigned integer type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+----+----+
+| | u1 | u2 | u4 | u8 |
++========+====+====+====+====+
+| **u1** | u1 | u2 | u4 | u8 |
++--------+----+----+----+----+
+| **u2** | u2 | u2 | u4 | u8 |
++--------+----+----+----+----+
+| **u4** | u4 | u4 | u4 | u8 |
++--------+----+----+----+----+
+| **u8** | u8 | u8 | u8 | u8 |
++--------+----+----+----+----+
+
+where
+
+- **u1**: 8-bit unsigned integer (i.e., ``uint8``)
+- **u2**: 16-bit unsigned integer (i.e., ``uint16``)
+- **u4**: 32-bit unsigned integer (i.e., ``uint32``)
+- **u8**: 64-bit unsigned integer (i.e., ``uint64``)
+
+Mixed unsigned and signed integer type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+----+
+| | u1 | u2 | u4 |
++========+====+====+====+
+| **i1** | i2 | i4 | i8 |
++--------+----+----+----+
+| **i2** | i2 | i4 | i8 |
++--------+----+----+----+
+| **i4** | i4 | i4 | i8 |
++--------+----+----+----+
+| **i8** | i8 | i8 | i8 |
++--------+----+----+----+
+
+Floating-point type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+
+| | f4 | f8 |
++========+====+====+
+| **f4** | f4 | f8 |
++--------+----+----+
+| **f8** | f8 | f8 |
++--------+----+----+
+
+where
+
+- **f4**: single-precision (32-bit) floating-point number (i.e., ``float32``)
+- **f8**: double-precision (64-bit) floating-point number (i.e., ``float64``)
+
+Notes
+~~~~~
+
+- Type promotion rules must apply when determining the common result type for two **array** operands during an arithmetic operation, regardless of array dimension. Accordingly, zero-dimensional arrays must be subject to the same type promotion rules as dimensional arrays.
+- Type promotion of non-numerical data types to numerical data types is unspecified (e.g., ``bool`` to ``intxx`` or ``floatxx``).
+
+.. note::
+ Mixed integer and floating-point type promotion rules are not specified because behavior varies between implementations.
+
+Mixing arrays with Python scalars
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using Python scalars (i.e., instances of ``bool``, ``int``, ``float``) together with arrays must be supported for:
+
+- ``array scalar``
+- ``scalar array``
+
+where ```` is a built-in operator (including in-place operators, but excluding the matmul ``@`` operator; see :ref:`operators` for operators supported by the array object) and ``scalar`` has a type and value compatible with the array data type:
+
+- a Python ``bool`` for a ``bool`` array data type.
+- a Python ``int`` within the bounds of the given data type for integer array :ref:`data-types`.
+- a Python ``int`` or ``float`` for floating-point array data types.
+
+Provided the above requirements are met, the expected behavior is equivalent to:
+
+1. Convert the scalar to zero-dimensional array with the same data type as that of the array used in the expression.
+2. Execute the operation for ``array 0-D array`` (or ``0-D array array`` if ``scalar`` was the left-hand argument).
+
+.. note::
+ Behavior is not specified when mixing a Python ``float`` and an array with an integer data type; this may give ``float32``, ``float64``, or raise an exception. Behavior is implementation-specific.
+
+ The behavior is also not specified for integers outside of the bounds of a given integer data type. Integers outside of bounds may result in overflow or an error.
diff --git a/spec/2021.12/API_specification/utility_functions.rst b/spec/2021.12/API_specification/utility_functions.rst
new file mode 100644
index 000000000..f869b4321
--- /dev/null
+++ b/spec/2021.12/API_specification/utility_functions.rst
@@ -0,0 +1,28 @@
+Utility Functions
+=================
+
+ Array API specification for utility functions.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+- Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+- Optional parameters must be `keyword-only `_ arguments.
+- Broadcasting semantics must follow the semantics defined in :ref:`broadcasting`.
+- Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+- Unless stated otherwise, functions must adhere to the type promotion rules defined in :ref:`type-promotion`.
+- Unless stated otherwise, floating-point operations must adhere to IEEE 754-2019.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ all
+ any
diff --git a/spec/assumptions.md b/spec/2021.12/assumptions.md
similarity index 96%
rename from spec/assumptions.md
rename to spec/2021.12/assumptions.md
index 3a315e6cd..b11482c5a 100644
--- a/spec/assumptions.md
+++ b/spec/2021.12/assumptions.md
@@ -26,7 +26,7 @@ of functions to be predictable from input dtypes only rather than input values.
The only dependency that's assumed in this standard is that on Python itself.
Python >= 3.8 is assumed, motivated by the use of positional-only parameters
-(see [function and method signatures](API_specification/function_and_method_signatures.md)).
+(see [function and method signatures](API_specification/function_and_method_signatures.rst)).
Importantly, array libraries are not assumed to be aware of each other, or of
a common array-specific layer. The [use cases](use_cases.md) do not require
@@ -35,11 +35,11 @@ such a coupling. Facilitation support of multiple array types in downstream
libraries is an important use case however, the assumed dependency structure
for that is:
-
+
Array libraries may know how to interoperate with each other, for example by
constructing their own array type from that of another library or by shared
-memory use of an array (see [Data interchange mechanisms](design_topics/data_interchange.md)).
+memory use of an array (see [Data interchange mechanisms](design_topics/data_interchange.rst)).
This can be done without a dependency though - only adherence to a protocol is
enough.
diff --git a/spec/2021.12/benchmark_suite.md b/spec/2021.12/benchmark_suite.md
new file mode 100644
index 000000000..41066c6a4
--- /dev/null
+++ b/spec/2021.12/benchmark_suite.md
@@ -0,0 +1,3 @@
+# Benchmark suite
+
+Adding a benchmark suite is planned in the future.
diff --git a/spec/2021.12/conf.py b/spec/2021.12/conf.py
new file mode 100644
index 000000000..9638670b4
--- /dev/null
+++ b/spec/2021.12/conf.py
@@ -0,0 +1,14 @@
+import sys
+from pathlib import Path
+
+sys.path.insert(0, str(Path(__file__).parents[2] / "src"))
+
+from array_api_stubs import _2021_12 as stubs_mod
+from _array_api_conf import *
+
+release = "2021.12"
+
+nav_title = html_theme_options.get("nav_title") + " v{}".format(release)
+html_theme_options.update({"nav_title": nav_title})
+
+sys.modules["array_api"] = stubs_mod
diff --git a/spec/2021.12/design_topics/C_API.rst b/spec/2021.12/design_topics/C_API.rst
new file mode 100644
index 000000000..ec2b721f8
--- /dev/null
+++ b/spec/2021.12/design_topics/C_API.rst
@@ -0,0 +1,94 @@
+.. _C-API:
+
+C API
+=====
+
+Use of a C API is out of scope for this array API, as mentioned in :ref:`Scope`.
+There are a lot of libraries that do use such an API - in particular via Cython code
+or via direct usage of the NumPy C API. When the maintainers of such libraries
+want to use this array API standard to support multiple types of arrays, they
+need a way to deal with that issue. This section aims to provide some guidance.
+
+The assumption in the rest of this section is that performance matters for the library,
+and hence the goal is to make other array types work without converting to a
+``numpy.ndarray`` or another particular array type. If that's not the case (e.g. for a
+visualization package), then other array types can simply be handled by converting
+to the supported array type.
+
+.. note::
+ Often a zero-copy conversion to ``numpy.ndarray`` is possible, at least for CPU arrays.
+ If that's the case, this may be a good way to support other array types.
+ The main difficulty in that case will be getting the return array type right - however,
+ this standard does provide a Python-level API for array construction that should allow
+ doing this. A relevant question is if it's possible to know with
+ certainty that a conversion will be zero-copy. This may indeed be
+ possible, see :ref:`data-interchange`.
+
+
+Example situations for C/Cython usage
+-------------------------------------
+
+Situation 1: a Python package that is mostly pure Python, with a limited number of Cython extensions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+ Projects in this situation include Statsmodels, scikit-bio and QuTiP
+
+Main strategy: documentation. The functionality using Cython code will not support other array types (or only with conversion to/from ``numpy.ndarray``), which can be documented per function.
+
+
+Situation 2: a Python package that contains a lot of Cython code
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+ Projects in this situation include scikit-learn and scikit-image
+
+Main strategy: add support for other array types *per submodule*. This keeps it manageable to explain to the user which functionality does and doesn't have support.
+
+Longer term: specific support for particular array types (e.g. ``cupy.ndarray`` can be supported with Python-only code via ``cupy.ElementwiseKernel``).
+
+
+Situation 3: a Python package that uses the NumPy or Python C API directly
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+ Projects in this situation include SciPy and Astropy
+
+Strategy: similar to *situation 2*, but the number of submodules that can support all array types may be limited.
+
+
+Device support
+--------------
+
+Supporting non-CPU array types in code using the C API or Cython seems problematic,
+this almost inevitably will require custom device-specific code (e.g., CUDA, ROCm) or
+something like JIT compilation with Numba.
+
+
+Other longer-term approaches
+----------------------------
+
+Further Python API standardization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There may be cases where it makes sense to standardize additional sets of
+functions, because they're important enough that array libraries tend to
+reimplement them. An example of this may be *special functions*, as provided
+by ``scipy.special``. Bessel and gamma functions for example are commonly
+reimplemented by array libraries. This may avoid having to drop into a
+particular implementation that does use a C API (e.g., one can then rely on
+``arraylib.special.gamma`` rather than having to use ``scipy.special.gamma``).
+
+HPy
+~~~
+
+`HPy `_ is a new project that will provide a higher-level
+C API and ABI than CPython offers. A Cython backend targeting HPy will be provided as well.
+
+- Better PyPy support
+- Universal ABI - single binary for all supported Python versions
+- Cython backend generating HPy rather than CPython code
+
+HPy isn't quite ready for mainstream usage today, but once it does it may
+help make supporting multiple array libraries or adding non-CPU device
+support to Cython more feasible.
diff --git a/spec/design_topics/accuracy.rst b/spec/2021.12/design_topics/accuracy.rst
similarity index 98%
rename from spec/design_topics/accuracy.rst
rename to spec/2021.12/design_topics/accuracy.rst
index df417d546..8c97db698 100644
--- a/spec/design_topics/accuracy.rst
+++ b/spec/2021.12/design_topics/accuracy.rst
@@ -74,4 +74,4 @@ This specification does not specify accuracy requirements for statistical functi
Linear Algebra
--------------
-This specification does not specify accuracy requirements for linear algebra functions; however, this specification does expect that a conforming implementation of the array API standard will make a best-effort attempt to ensure that its implementations are theoretically sound and numerically robust.
\ No newline at end of file
+This specification does not specify accuracy requirements for linear algebra functions; however, this specification does expect that a conforming implementation of the array API standard will make a best-effort attempt to ensure that its implementations are theoretically sound and numerically robust.
diff --git a/spec/design_topics/copies_views_and_mutation.rst b/spec/2021.12/design_topics/copies_views_and_mutation.rst
similarity index 100%
rename from spec/design_topics/copies_views_and_mutation.rst
rename to spec/2021.12/design_topics/copies_views_and_mutation.rst
diff --git a/spec/design_topics/data_dependent_output_shapes.rst b/spec/2021.12/design_topics/data_dependent_output_shapes.rst
similarity index 100%
rename from spec/design_topics/data_dependent_output_shapes.rst
rename to spec/2021.12/design_topics/data_dependent_output_shapes.rst
diff --git a/spec/design_topics/data_interchange.rst b/spec/2021.12/design_topics/data_interchange.rst
similarity index 100%
rename from spec/design_topics/data_interchange.rst
rename to spec/2021.12/design_topics/data_interchange.rst
diff --git a/spec/design_topics/device_support.rst b/spec/2021.12/design_topics/device_support.rst
similarity index 100%
rename from spec/design_topics/device_support.rst
rename to spec/2021.12/design_topics/device_support.rst
diff --git a/spec/2021.12/design_topics/index.rst b/spec/2021.12/design_topics/index.rst
new file mode 100644
index 000000000..2729cdbe4
--- /dev/null
+++ b/spec/2021.12/design_topics/index.rst
@@ -0,0 +1,15 @@
+Design topics & constraints
+===========================
+
+.. toctree::
+ :caption: Design topics & constraints
+ :maxdepth: 1
+
+ copies_views_and_mutation
+ data_dependent_output_shapes
+ data_interchange
+ device_support
+ static_typing
+ accuracy
+ C_API
+ parallelism
diff --git a/spec/design_topics/parallelism.rst b/spec/2021.12/design_topics/parallelism.rst
similarity index 95%
rename from spec/design_topics/parallelism.rst
rename to spec/2021.12/design_topics/parallelism.rst
index 77d06c966..f013a9cf9 100644
--- a/spec/design_topics/parallelism.rst
+++ b/spec/2021.12/design_topics/parallelism.rst
@@ -21,4 +21,4 @@ coordination of parallelization behavior in a stack of Python libraries are:
Option (1) may possibly fit in a future version of this array API standard.
`array-api issue 4 `_ contains
-more detailed discussion on the topic of parallelism.
\ No newline at end of file
+more detailed discussion on the topic of parallelism.
diff --git a/spec/design_topics/static_typing.rst b/spec/2021.12/design_topics/static_typing.rst
similarity index 100%
rename from spec/design_topics/static_typing.rst
rename to spec/2021.12/design_topics/static_typing.rst
diff --git a/spec/2021.12/extensions/index.rst b/spec/2021.12/extensions/index.rst
new file mode 100644
index 000000000..1b3b7470f
--- /dev/null
+++ b/spec/2021.12/extensions/index.rst
@@ -0,0 +1,10 @@
+.. _extensions:
+
+Extensions
+==========
+
+.. toctree::
+ :caption: Extensions
+ :maxdepth: 3
+
+ linear_algebra_functions
diff --git a/spec/2021.12/extensions/linear_algebra_functions.rst b/spec/2021.12/extensions/linear_algebra_functions.rst
new file mode 100644
index 000000000..de24d5a0b
--- /dev/null
+++ b/spec/2021.12/extensions/linear_algebra_functions.rst
@@ -0,0 +1,110 @@
+.. _linear-algebra-extension:
+
+Linear Algebra Extension
+========================
+
+ Array API specification for linear algebra functions.
+
+A conforming implementation of the array API standard must provide and support the following functions adhering to the following conventions.
+
+- Positional parameters must be `positional-only `_ parameters. Positional-only parameters have no externally-usable name. When a function accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
+- Optional parameters must be `keyword-only `_ arguments.
+- Broadcasting semantics must follow the semantics defined in :ref:`broadcasting`.
+- Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+- Unless stated otherwise, functions must adhere to the type promotion rules defined in :ref:`type-promotion`.
+- Unless stated otherwise, floating-point operations must adhere to IEEE 754-2019.
+
+Design Principles
+-----------------
+
+A principal goal of this specification is to standardize commonly implemented interfaces among array libraries. While this specification endeavors to avoid straying too far from common practice, this specification does, with due restraint, seek to address design decisions arising more from historical accident than first principles. This is especially true for linear algebra APIs, which have arisen and evolved organically over time and have often been tied to particular underlying implementations (e.g., to BLAS and LAPACK).
+
+Accordingly, the standardization process affords the opportunity to reduce interface complexity among linear algebra APIs by inferring and subsequently codifying common design themes, thus allowing more consistent APIs. What follows is the set of design principles governing the APIs which follow:
+
+1. **Batching**: if an operation is explicitly defined in terms of matrices (i.e., two-dimensional arrays), then the associated interface should support "batching" (i.e., the ability to perform the operation over a "stack" of matrices). Example operations include:
+
+ - ``inv``: computing the multiplicative inverse of a square matrix.
+ - ``cholesky``: performing Cholesky decomposition.
+ - ``matmul``: performing matrix multiplication.
+
+2. **Data types**: if an operation requires decimal operations and :ref:`type-promotion` semantics are undefined (e.g., as is the case for mixed-kind promotions), then the associated interface should be specified as being restricted to floating-point data types. While the specification uses the term "SHOULD" rather than "MUST", a conforming implementation of the array API standard should only ignore the restriction provided overly compelling reasons for doing so. Example operations which should be limited to floating-point data types include:
+
+ - ``inv``: computing the multiplicative inverse.
+ - ``slogdet``: computing the natural logarithm of the absolute value of the determinant.
+ - ``norm``: computing the matrix or vector norm.
+
+ Certain operations are solely comprised of multiplications and additions. Accordingly, associated interfaces need not be restricted to floating-point data types. However, careful consideration should be given to overflow, and use of floating-point data types may be more prudent in practice. Example operations include:
+
+ - ``matmul``: performing matrix multiplication.
+ - ``trace``: computing the sum along the diagonal.
+ - ``cross``: computing the vector cross product.
+
+ Lastly, certain operations may be performed independent of data type, and, thus, the associated interfaces should support all data types specified in this standard. Example operations include:
+
+ - ``matrix_transpose``: computing the transpose.
+ - ``diagonal``: returning the diagonal.
+
+3. **Return values**: if an interface has more than one return value, the interface should return a namedtuple consisting of each value.
+
+ In general, interfaces should avoid polymorphic return values (e.g., returning an array **or** a namedtuple, dependent on, e.g., an optional keyword argument). Dedicated interfaces for each return value type are preferred, as dedicated interfaces are easier to reason about at both the implementation level and user level. Example interfaces which could be combined into a single overloaded interface, but are not, include:
+
+ - ``eigh``: computing both eigenvalues and eigenvectors.
+ - ``eigvalsh``: computing only eigenvalues.
+
+4. **Implementation agnosticism**: a standardized interface should eschew parameterization (including keyword arguments) biased toward particular implementations.
+
+ Historically, at a time when all array computing happened on CPUs, BLAS and LAPACK underpinned most numerical computing libraries and environments. Naturally, language and library abstractions catered to the parameterization of those libraries, often exposing low-level implementation details verbatim in their higher-level interfaces, even if such choices would be considered poor or ill-advised by today's standards (e.g., NumPy's use of `UPLO` in `eigh`). However, the present day is considerably different. While still important, BLAS and LAPACK no longer hold a monopoly over linear algebra operations, especially given the proliferation of devices and hardware on which such operations must be performed. Accordingly, interfaces must be conservative in the parameterization they support in order to best ensure universality. Such conservatism applies even to performance optimization parameters afforded by certain hardware.
+
+5. **Orthogonality**: an interface should have clearly defined and delineated functionality which, ideally, has no overlap with the functionality of other interfaces in the specification. Providing multiple interfaces which can all perform the same operation creates unnecessary confusion regarding interface applicability (i.e., which interface is best at which time) and decreases readability of both library and user code. Where overlap is possible, the specification must be parsimonious in the number of interfaces, ensuring that each interface provides a unique and compelling abstraction. Examples of related interfaces which provide distinct levels of abstraction (and generality) include:
+
+ - ``vecdot``: computing the dot product of two vectors.
+ - ``matmul``: performing matrix multiplication (including between two vectors and thus the dot product).
+ - ``tensordot``: computing tensor contractions (generalized sum-products).
+ - ``einsum``: expressing operations in terms of Einstein summation convention, including dot products and tensor contractions.
+
+ The above can be contrasted with, e.g., NumPy, which provides the following interfaces for computing the dot product or related operations:
+
+ - ``dot``: dot product, matrix multiplication, and tensor contraction.
+ - ``inner``: dot product.
+ - ``vdot``: dot product with flattening and complex conjugation.
+ - ``multi_dot``: chained dot product.
+ - ``tensordot``: tensor contraction.
+ - ``matmul``: matrix multiplication (dot product for two vectors).
+ - ``einsum``: Einstein summation convention.
+
+ where ``dot`` is overloaded based on input array dimensionality and ``vdot`` and ``inner`` exhibit a high degree of overlap with other interfaces. By consolidating interfaces and more clearly delineating behavior, this specification aims to ensure that each interface has a unique purpose and defined use case.
+
+.. currentmodule:: array_api.linalg
+
+Objects in API
+--------------
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ cholesky
+ cross
+ det
+ diagonal
+ eigh
+ eigvalsh
+ inv
+ matmul
+ matrix_norm
+ matrix_power
+ matrix_rank
+ matrix_transpose
+ outer
+ pinv
+ qr
+ slogdet
+ solve
+ svd
+ svdvals
+ tensordot
+ trace
+ vecdot
+ vector_norm
diff --git a/spec/future_API_evolution.md b/spec/2021.12/future_API_evolution.md
similarity index 98%
rename from spec/future_API_evolution.md
rename to spec/2021.12/future_API_evolution.md
index 719b554e3..443f683d5 100644
--- a/spec/future_API_evolution.md
+++ b/spec/2021.12/future_API_evolution.md
@@ -57,4 +57,4 @@ than Python package versioning.
The frequency of releasing a new version of an API standard will likely be at
regular intervals and on the order of one year, however no assumption on
-frequency of new versions appearing must be made.
\ No newline at end of file
+frequency of new versions appearing must be made.
diff --git a/spec/2021.12/index.rst b/spec/2021.12/index.rst
new file mode 100644
index 000000000..706c2f5e6
--- /dev/null
+++ b/spec/2021.12/index.rst
@@ -0,0 +1,30 @@
+Python array API standard
+=========================
+
+Contents
+--------
+
+.. toctree::
+ :caption: Context
+ :maxdepth: 1
+
+ purpose_and_scope
+ use_cases
+ assumptions
+
+.. toctree::
+ :caption: API
+ :maxdepth: 1
+
+ design_topics/index
+ future_API_evolution
+ API_specification/index
+ extensions/index
+
+.. toctree::
+ :caption: Methodology and Usage
+ :maxdepth: 1
+
+ usage_data
+ verification_test_suite
+ benchmark_suite
diff --git a/spec/purpose_and_scope.md b/spec/2021.12/purpose_and_scope.md
similarity index 98%
rename from spec/purpose_and_scope.md
rename to spec/2021.12/purpose_and_scope.md
index 62e9bb8ba..0debbc08a 100644
--- a/spec/purpose_and_scope.md
+++ b/spec/2021.12/purpose_and_scope.md
@@ -111,7 +111,7 @@ Furthermore, meta-topics included in this standard include:
The concrete set of functionality that is in scope for this version of the
standard is shown in this diagram:
-
+
**Goals** for the API standard include:
@@ -151,7 +151,7 @@ standard is shown in this diagram:
_Rationale: this is an important topic for some array-consuming libraries,
but there is no widely shared C/Cython API and hence it doesn't make sense at
this point in time to standardize anything. See
- the [C API section](design_topics/C_API.md) for more details._
+ the [C API section](design_topics/C_API.rst) for more details._
4. Standardization of these dtypes is out of scope: bfloat16, complex, extended
precision floating point, datetime, string, object and void dtypes.
@@ -434,9 +434,6 @@ a (usually fixed-size) multidimensional container of items of the same type and
**axis**:
an array dimension.
-**branch cut**:
-a curve in the complex plane across which a given complex function fails to be continuous.
-
**broadcast**:
automatic (implicit) expansion of array dimensions to be of equal sizes without copying array data for the purpose of making arrays with different shapes have compatible shapes for element-wise operations.
diff --git a/spec/usage_data.md b/spec/2021.12/usage_data.md
similarity index 99%
rename from spec/usage_data.md
rename to spec/2021.12/usage_data.md
index 7963333ff..c2dcd5d65 100644
--- a/spec/usage_data.md
+++ b/spec/2021.12/usage_data.md
@@ -82,5 +82,5 @@ See the [`python-record-api`](https://github.com/data-apis/python-record-api) re
Design and usage data support specification decision-making in the following ways.
- Validate user stories to ensure that proposals satisfy existing needs.
-- Define scope to ensure that proposals address general array library design requirements (i.e., proposals must have broad applicability and be possible to implement with a reasonable amount of effort).
-- Inform technical design discussions to ensure that proposals are grounded in empirical data.
\ No newline at end of file
+- Define scope to ensure that proposals address general array library design requirements (i.e., proposals must have broad applicability and be possible to implement with a reasonable amount of effort).
+- Inform technical design discussions to ensure that proposals are grounded in empirical data.
diff --git a/spec/use_cases.md b/spec/2021.12/use_cases.md
similarity index 99%
rename from spec/use_cases.md
rename to spec/2021.12/use_cases.md
index 50b6bd24d..e24aa50db 100644
--- a/spec/use_cases.md
+++ b/spec/2021.12/use_cases.md
@@ -59,7 +59,7 @@ array implementation as a dependency.
It's clear that SciPy functionality that relies on compiled extensions (C,
C++, Cython, Fortran) directly can't easily be run on another array library
-than NumPy (see [C API](design_topics/C_API.md) for more details about this topic). Pure Python
+than NumPy (see [C API](design_topics/C_API.rst) for more details about this topic). Pure Python
code can work though. There's two main possibilities:
1. Testing with another package, manually or in CI, and simply provide a list
@@ -232,4 +232,4 @@ def check(x, y):
# (this is different from Numpy, whose behaviour depends on
# the *values* of the arguments -- see PyArray_CanCastArrayTo).
self.assertEqual(got.dtype, x.dtype)
-```
\ No newline at end of file
+```
diff --git a/spec/verification_test_suite.md b/spec/2021.12/verification_test_suite.md
similarity index 100%
rename from spec/verification_test_suite.md
rename to spec/2021.12/verification_test_suite.md
diff --git a/spec/API_specification/array_object.rst b/spec/2022.12/API_specification/array_object.rst
similarity index 96%
rename from spec/API_specification/array_object.rst
rename to spec/2022.12/API_specification/array_object.rst
index b15bbdc43..45aec9b34 100644
--- a/spec/API_specification/array_object.rst
+++ b/spec/2022.12/API_specification/array_object.rst
@@ -163,7 +163,8 @@ A conforming implementation of the array API standard must provide and support a
- `operator.ne(x1, x2) `_
- `operator.__ne__(x1, x2) `_
-Comparison operators should be defined for arrays having any data type.
+:meth:`.array.__lt__`, :meth:`.array.__le__`, :meth:`.array.__gt__`, :meth:`.array.__ge__` are only defined for arrays having real-valued data types. Other comparison operators should be defined for arrays having any data type.
+For backward compatibility, conforming implementations may support complex numbers; however, inequality comparison of complex numbers is unspecified and thus implementation-dependent (see :ref:`complex-number-ordering`).
In-place Operators
~~~~~~~~~~~~~~~~~~
diff --git a/spec/API_specification/broadcasting.rst b/spec/2022.12/API_specification/broadcasting.rst
similarity index 100%
rename from spec/API_specification/broadcasting.rst
rename to spec/2022.12/API_specification/broadcasting.rst
diff --git a/spec/2022.12/API_specification/constants.rst b/spec/2022.12/API_specification/constants.rst
new file mode 100644
index 000000000..71cb8688d
--- /dev/null
+++ b/spec/2022.12/API_specification/constants.rst
@@ -0,0 +1,26 @@
+Constants
+=========
+
+ Array API specification for constants.
+
+A conforming implementation of the array API standard must provide and support the following constants adhering to the following conventions.
+
+- Each constant must have a Python floating-point data type (i.e., ``float``) and be provided as a Python scalar value.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api.constants
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: attribute.rst
+
+ e
+ inf
+ nan
+ newaxis
+ pi
diff --git a/spec/API_specification/creation_functions.rst b/spec/2022.12/API_specification/creation_functions.rst
similarity index 100%
rename from spec/API_specification/creation_functions.rst
rename to spec/2022.12/API_specification/creation_functions.rst
diff --git a/spec/API_specification/data_type_functions.rst b/spec/2022.12/API_specification/data_type_functions.rst
similarity index 100%
rename from spec/API_specification/data_type_functions.rst
rename to spec/2022.12/API_specification/data_type_functions.rst
diff --git a/spec/API_specification/data_types.rst b/spec/2022.12/API_specification/data_types.rst
similarity index 100%
rename from spec/API_specification/data_types.rst
rename to spec/2022.12/API_specification/data_types.rst
diff --git a/spec/API_specification/elementwise_functions.rst b/spec/2022.12/API_specification/elementwise_functions.rst
similarity index 99%
rename from spec/API_specification/elementwise_functions.rst
rename to spec/2022.12/API_specification/elementwise_functions.rst
index bc21e14b9..0e5fd0609 100644
--- a/spec/API_specification/elementwise_functions.rst
+++ b/spec/2022.12/API_specification/elementwise_functions.rst
@@ -44,6 +44,7 @@ Objects in API
floor_divide
greater
greater_equal
+ imag
isfinite
isinf
isnan
diff --git a/spec/2022.12/API_specification/function_and_method_signatures.rst b/spec/2022.12/API_specification/function_and_method_signatures.rst
new file mode 100644
index 000000000..86d0819a6
--- /dev/null
+++ b/spec/2022.12/API_specification/function_and_method_signatures.rst
@@ -0,0 +1,59 @@
+.. _function-and-method-signatures:
+
+Function and method signatures
+==============================
+
+Function signatures in this standard adhere to the following:
+
+1. Positional parameters must be `positional-only `_ parameters.
+ Positional-only parameters have no externally-usable name. When a function
+ accepting positional-only parameters is called, positional arguments are
+ mapped to these parameters based solely on their order.
+
+ *Rationale: existing libraries have incompatible conventions, and using names
+ of positional parameters is not normal/recommended practice.*
+
+ .. note::
+
+ Positional-only parameters are only available in Python >= 3.8. Libraries
+ still supporting 3.7 or 3.6 may consider making the API standard-compliant
+ namespace >= 3.8. Alternatively, they can add guidance to their users in the
+ documentation to use the functions as if they were positional-only.
+
+2. Optional parameters must be `keyword-only `_ arguments.
+
+ *Rationale: this leads to more readable code, and it makes it easier to
+ evolve an API over time by adding keywords without having to worry about
+ keyword order.*
+
+3. For functions that have a single positional array parameter, that parameter
+ is called ``x``. For functions that have multiple array parameters, those
+ parameters are called ``xi`` with ``i = 1, 2, ...`` (i.e., ``x1``, ``x2``).
+
+4. Type annotations are left out of the signatures themselves for readability; however,
+ they are added to individual parameter descriptions. For code which aims to
+ adhere to the standard, adding type annotations is strongly recommended.
+
+A function signature and description will look like:
+
+::
+
+ funcname(x1, x2, /, *, key1=-1, key2=None) -> out:
+ Parameters
+
+ x1 : array
+ description
+ x2 : array
+ description
+ key1 : int
+ description
+ key2 : Optional[str]
+ description
+
+ Returns
+
+ out : array
+ description
+
+
+Method signatures will follow the same conventions modulo the addition of ``self``.
diff --git a/spec/API_specification/index.rst b/spec/2022.12/API_specification/index.rst
similarity index 100%
rename from spec/API_specification/index.rst
rename to spec/2022.12/API_specification/index.rst
diff --git a/spec/2022.12/API_specification/indexing.rst b/spec/2022.12/API_specification/indexing.rst
new file mode 100644
index 000000000..6d5e77a5b
--- /dev/null
+++ b/spec/2022.12/API_specification/indexing.rst
@@ -0,0 +1,205 @@
+.. _indexing:
+
+Indexing
+========
+
+ Array API specification for indexing arrays.
+
+A conforming implementation of the array API standard must adhere to the following conventions.
+
+Single-axis Indexing
+--------------------
+
+To index a single array axis, an array must support standard Python indexing rules. Let ``n`` be the axis (dimension) size.
+
+- An integer index must be an object satisfying `operator.index `_ (e.g., ``int``).
+
+- Nonnegative indices must start at ``0`` (i.e., zero-based indexing).
+
+- **Valid** nonnegative indices must reside on the half-open interval ``[0, n)``.
+
+ .. note::
+ This specification does not require bounds checking. The behavior for out-of-bounds integer indices is left unspecified.
+
+- Negative indices must count backward from the last array index, starting from ``-1`` (i.e., negative-one-based indexing, where ``-1`` refers to the last array index).
+
+ .. note::
+ A negative index ``j`` is equivalent to ``n-j``; the former is syntactic sugar for the latter, providing a shorthand for indexing elements that would otherwise need to be specified in terms of the axis (dimension) size.
+
+- **Valid** negative indices must reside on the closed interval ``[-n, -1]``.
+
+ .. note::
+ This specification does not require bounds checking. The behavior for out-of-bounds integer indices is left unspecified.
+
+- A negative index ``j`` is related to a zero-based nonnegative index ``i`` via ``i = n+j``.
+
+- Colons ``:`` must be used for `slices `_: ``start:stop:step``, where ``start`` is inclusive and ``stop`` is exclusive.
+
+ .. note::
+ The specification does not support returning scalar (i.e., non-array) values from operations, including indexing. In contrast to standard Python indexing rules, for any index, or combination of indices, which select a single value, the result must be a zero-dimensional array containing the selected value.
+
+Slice Syntax
+~~~~~~~~~~~~
+
+The basic slice syntax is ``i:j:k`` where ``i`` is the starting index, ``j`` is the stopping index, and ``k`` is the step (``k != 0``). A slice may contain either one or two colons, with either an integer value or nothing on either side of each colon. The following are valid slices.
+
+::
+
+ A[:]
+ A[i:]
+ A[:j]
+ A[i:k]
+ A[::]
+ A[i::]
+ A[:j:]
+ A[::k]
+ A[i:j:]
+ A[i::k]
+ A[:j:k]
+ A[i::k]
+ A[i:j:k]
+
+.. note::
+ Slice syntax can be equivalently achieved using the Python built-in `slice() `_ API. From the perspective of ``A``, the behavior of ``A[i:j:k]`` and ``A[slice(i, j, k)]`` is indistinguishable (i.e., both retrieve the same set of items from ``__getitem__``).
+
+Using a slice to index a single array axis must select ``m`` elements with index values
+
+::
+
+ i, i+k, i+2k, i+3k, ..., i+(m-1)k
+
+where
+
+::
+
+ m = q + r
+
+and ``q`` and ``r`` (``r != 0``) are the quotient and remainder obtained by dividing ``j-i`` by ``k``
+
+::
+
+ j - i = qk + r
+
+such that
+
+::
+
+ j > i + (m-1)k
+
+.. note::
+ For ``i`` on the interval ``[0, n)`` (where ``n`` is the axis size), ``j`` on the interval ``(0, n]``, ``i`` less than ``j``, and positive step ``k``, a starting index ``i`` is **always** included, while the stopping index ``j`` is **always** excluded. This preserves ``x[:i]+x[i:]`` always being equal to ``x``.
+
+.. note::
+ Using a slice to index into a single array axis should select the same elements as using a slice to index a Python list of the same size.
+
+Slice syntax must have the following defaults. Let ``n`` be the axis (dimension) size.
+
+- If ``k`` is not provided (e.g., ``0:10``), ``k`` must equal ``1``.
+- If ``k`` is greater than ``0`` and ``i`` is not provided (e.g., ``:10:2``), ``i`` must equal ``0``.
+- If ``k`` is greater than ``0`` and ``j`` is not provided (e.g., ``0::2``), ``j`` must equal ``n``.
+- If ``k`` is less than ``0`` and ``i`` is not provided (e.g., ``:10:-2``), ``i`` must equal ``n-1``.
+- If ``k`` is less than ``0`` and ``j`` is not provided (e.g., ``0::-2``), ``j`` must equal ``-n-1``.
+
+Using a slice to index a single array axis must adhere to the following rules. Let ``n`` be the axis (dimension) size.
+
+- If ``i`` equals ``j``, a slice must return an empty array, whose axis (dimension) size along the indexed axis is ``0``.
+
+- Indexing via ``:`` and ``::`` must be equivalent and have defaults derived from the rules above. Both ``:`` and ``::`` indicate to select all elements along a single axis (dimension).
+
+ .. note::
+ This specification does not require "clipping" out-of-bounds slice indices. This is in contrast to Python slice semantics where ``0:100`` and ``0:10`` are equivalent on a list of length ``10``.
+
+The following ranges for the start and stop values of a slice must be supported. Let ``n`` be the axis (dimension) size being sliced. For a slice ``i:j:k``, the behavior specified above should be implemented for the following:
+
+- ``i`` or ``j`` omitted (``None``).
+- ``-n <= i <= n``.
+- For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
+- For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
+
+The behavior outside of these bounds is unspecified.
+
+.. note::
+ *Rationale: this is consistent with bounds checking for integer indexing; the behavior of out-of-bounds indices is left unspecified. Implementations may choose to clip (consistent with Python* ``list`` *slicing semantics), raise an exception, return junk values, or some other behavior depending on device requirements and performance considerations.*
+
+Multi-axis Indexing
+-------------------
+
+Multi-dimensional arrays must extend the concept of single-axis indexing to multiple axes by applying single-axis indexing rules along each axis (dimension) and supporting the following additional rules. Let ``N`` be the number of dimensions ("rank") of a multi-dimensional array ``A``.
+
+- Each axis may be independently indexed via single-axis indexing by providing a comma-separated sequence ("selection tuple") of single-axis indexing expressions (e.g., ``A[:, 2:10, :, 5]``).
+
+ .. note::
+ In Python, ``A[(exp1, exp2, ..., expN)]`` is equivalent to ``A[exp1, exp2, ..., expN]``; the latter is syntactic sugar for the former.
+
+ Accordingly, if ``A`` has rank ``1``, then ``A[(2:10,)]`` must be equivalent to ``A[2:10]``. If ``A`` has rank ``2``, then ``A[(2:10, :)]`` must be equivalent to ``A[2:10, :]``. And so on and so forth.
+
+- Providing a single nonnegative integer ``i`` as a single-axis index must index the same elements as the slice ``i:i+1``.
+
+- Providing a single negative integer ``i`` as a single-axis index must index the same elements as the slice ``n+i:n+i+1``, where ``n`` is the axis (dimension) size.
+
+- Providing a single integer as a single-axis index must reduce the number of array dimensions by ``1`` (i.e., the array rank must decrease by one; if ``A`` has rank ``2``, ``rank(A)-1 == rank(A[0, :])``). In particular, a selection tuple with the ``m``\th element an integer (and all other entries ``:``) indexes a sub-array with rank ``N-1``.
+
+ .. note::
+ When providing a single integer as a single-axis index to an array of rank ``1``, the result should be an array of rank ``0``, not a NumPy scalar. Note that this behavior differs from NumPy.
+
+- Providing a slice must retain array dimensions (i.e., the array rank must remain the same; ``rank(A) == rank(A[:])``).
+
+- Providing `ellipsis `_ must apply ``:`` to each dimension necessary to index all dimensions (e.g., if ``A`` has rank ``4``, ``A[1:, ..., 2:5] == A[1:, :, :, 2:5]``). Only a single ellipsis must be allowed. An ``IndexError`` exception must be raised if more than one ellipsis is provided.
+
+- Providing an empty tuple or an ellipsis to an array of rank ``0`` must result in an array of the same rank (i.e., if ``A`` has rank ``0``, ``A == A[()]`` and ``A == A[...]``).
+
+ .. note::
+ This behavior differs from NumPy where providing an empty tuple to an array of rank ``0`` returns a NumPy scalar.
+
+- Each ``None`` in the selection tuple must expand the dimensions of the resulting selection by one dimension of size ``1``. The position of the added dimension must be the same as the position of ``None`` in the selection tuple.
+
+ .. note::
+ Expanding dimensions can be equivalently achieved via repeated invocation of :func:`~array_api.expand_dims`.
+
+- Except in the case of providing a single ellipsis (e.g., ``A[2:10, ...]`` or ``A[1:, ..., 2:5]``), the number of provided single-axis indexing expressions (excluding ``None``) should equal ``N``. For example, if ``A`` has rank ``2``, a single-axis indexing expression should be explicitly provided for both axes (e.g., ``A[2:10, :]``). An ``IndexError`` exception should be raised if the number of provided single-axis indexing expressions (excluding ``None``) is less than ``N``.
+
+ .. note::
+ Some libraries, such as SymPy, support flat indexing (i.e., providing a single-axis indexing expression to a higher-dimensional array). That practice is not supported here.
+
+ To perform flat indexing, use ``reshape(x, (-1,))[integer]``.
+
+- An ``IndexError`` exception must be raised if the number of provided single-axis indexing expressions (excluding ``None``) is greater than ``N``.
+
+ .. note::
+ This specification leaves unspecified the behavior of providing a slice which attempts to select elements along a particular axis, but whose starting index is out-of-bounds.
+
+ *Rationale: this is consistent with bounds-checking for single-axis indexing. An implementation may choose to set the axis (dimension) size of the result array to* ``0`` *, raise an exception, return junk values, or some other behavior depending on device requirements and performance considerations.*
+
+Boolean Array Indexing
+----------------------
+
+.. admonition:: Data-dependent output shape
+ :class: admonition important
+
+ For common boolean array use cases (e.g., using a dynamically-sized boolean array mask to filter the values of another array), the shape of the output array is data-dependent; hence, array libraries which build computation graphs (e.g., JAX, Dask, etc.) may find boolean array indexing difficult to implement. Accordingly, such libraries may choose to omit boolean array indexing. See :ref:`data-dependent-output-shapes` section for more details.
+
+An array must support indexing where the **sole index** is an ``M``-dimensional boolean array ``B`` with shape ``S1 = (s1, ..., sM)`` according to the following rules. Let ``A`` be an ``N``-dimensional array with shape ``S2 = (s1, ..., sM, ..., sN)``.
+
+ .. note::
+ The prohibition against combining boolean array indices with other single-axis indexing expressions includes the use of ``None``. To expand dimensions of the returned array, use repeated invocation of :func:`~array_api.expand_dims`.
+
+- If ``N >= M``, then ``A[B]`` must replace the first ``M`` dimensions of ``A`` with a single dimension having a size equal to the number of ``True`` elements in ``B``. The values in the resulting array must be in row-major (C-style order); this is equivalent to ``A[nonzero(B)]``.
+
+ .. note::
+ For example, if ``N == M == 2``, indexing ``A`` via a boolean array ``B`` will return a one-dimensional array whose size is equal to the number of ``True`` elements in ``B``.
+
+- If ``N < M``, then an ``IndexError`` exception must be raised.
+
+- The size of each dimension in ``B`` must equal the size of the corresponding dimension in ``A`` or be ``0``, beginning with the first dimension in ``A``. If a dimension size does not equal the size of the corresponding dimension in ``A`` and is not ``0``, then an ``IndexError`` exception must be raised.
+
+- The elements of a boolean index array must be iterated in row-major, C-style order, with the exception of zero-dimensional boolean arrays.
+
+- A zero-dimensional boolean index array (equivalent to ``True`` or ``False``) must follow the same axis replacement rules stated above. Namely, a zero-dimensional boolean index array removes zero dimensions and adds a single dimension of length ``1`` if the index array's value is ``True`` and of length ``0`` if the index array's value is ``False``. Accordingly, for a zero-dimensional boolean index array ``B``, the result of ``A[B]`` has shape ``S = (1, s1, ..., sN)`` if the index array's value is ``True`` and has shape ``S = (0, s1, ..., sN)`` if the index array's value is ``False``.
+
+Return Values
+-------------
+
+The result of an indexing operation (e.g., multi-axis indexing, boolean array indexing, etc) must be an array of the same data type as the indexed array.
+
+.. note::
+ The specified return value behavior includes indexing operations which return a single value (e.g., accessing a single element within a one-dimensional array).
diff --git a/spec/API_specification/indexing_functions.rst b/spec/2022.12/API_specification/indexing_functions.rst
similarity index 100%
rename from spec/API_specification/indexing_functions.rst
rename to spec/2022.12/API_specification/indexing_functions.rst
diff --git a/spec/API_specification/linear_algebra_functions.rst b/spec/2022.12/API_specification/linear_algebra_functions.rst
similarity index 100%
rename from spec/API_specification/linear_algebra_functions.rst
rename to spec/2022.12/API_specification/linear_algebra_functions.rst
diff --git a/spec/API_specification/manipulation_functions.rst b/spec/2022.12/API_specification/manipulation_functions.rst
similarity index 100%
rename from spec/API_specification/manipulation_functions.rst
rename to spec/2022.12/API_specification/manipulation_functions.rst
diff --git a/spec/API_specification/searching_functions.rst b/spec/2022.12/API_specification/searching_functions.rst
similarity index 100%
rename from spec/API_specification/searching_functions.rst
rename to spec/2022.12/API_specification/searching_functions.rst
diff --git a/spec/API_specification/set_functions.rst b/spec/2022.12/API_specification/set_functions.rst
similarity index 100%
rename from spec/API_specification/set_functions.rst
rename to spec/2022.12/API_specification/set_functions.rst
diff --git a/spec/API_specification/sorting_functions.rst b/spec/2022.12/API_specification/sorting_functions.rst
similarity index 100%
rename from spec/API_specification/sorting_functions.rst
rename to spec/2022.12/API_specification/sorting_functions.rst
diff --git a/spec/API_specification/statistical_functions.rst b/spec/2022.12/API_specification/statistical_functions.rst
similarity index 100%
rename from spec/API_specification/statistical_functions.rst
rename to spec/2022.12/API_specification/statistical_functions.rst
diff --git a/spec/API_specification/type_promotion.rst b/spec/2022.12/API_specification/type_promotion.rst
similarity index 99%
rename from spec/API_specification/type_promotion.rst
rename to spec/2022.12/API_specification/type_promotion.rst
index fc9f6e1bf..339b90e45 100644
--- a/spec/API_specification/type_promotion.rst
+++ b/spec/2022.12/API_specification/type_promotion.rst
@@ -7,7 +7,7 @@ Type Promotion Rules
Type promotion rules can be understood at a high level from the following diagram:
-.. image:: /_static/images/dtype_promotion_lattice.png
+.. image:: ../../_static/images/dtype_promotion_lattice.png
:target: Type promotion diagram
*Type promotion diagram. Promotion between any two types is given by their join on this lattice. Only the types of participating arrays matter, not their values. Dashed lines indicate that behavior for Python scalars is undefined on overflow. Boolean, integer and floating-point dtypes are not connected, indicating mixed-kind promotion is undefined.*
diff --git a/spec/API_specification/utility_functions.rst b/spec/2022.12/API_specification/utility_functions.rst
similarity index 100%
rename from spec/API_specification/utility_functions.rst
rename to spec/2022.12/API_specification/utility_functions.rst
diff --git a/spec/API_specification/version.rst b/spec/2022.12/API_specification/version.rst
similarity index 100%
rename from spec/API_specification/version.rst
rename to spec/2022.12/API_specification/version.rst
diff --git a/spec/2022.12/assumptions.md b/spec/2022.12/assumptions.md
new file mode 100644
index 000000000..b11482c5a
--- /dev/null
+++ b/spec/2022.12/assumptions.md
@@ -0,0 +1,77 @@
+(Assumptions)=
+
+# Assumptions
+
+## Hardware and software environments
+
+No assumptions on a specific hardware environment are made. It must be possible
+to create an array library adhering to this standard that runs (efficiently) on
+a variety of different hardware: CPUs with different architectures, GPUs,
+distributed systems and TPUs and other emerging accelerators.
+
+The same applies to software environments: it must be possible to create an
+array library adhering to this standard that runs efficiently independent of
+what compilers, build-time or run-time execution environment, or distribution
+and install method is employed. Parallel execution, JIT compilation, and
+delayed (lazy) evaluation must all be possible.
+
+The variety of hardware and software environments puts _constraints_ on choices
+made in the API standard. For example, JIT compilers may require output dtypes
+of functions to be predictable from input dtypes only rather than input values.
+
+
+(assumptions-dependencies)=
+
+## Dependencies
+
+The only dependency that's assumed in this standard is that on Python itself.
+Python >= 3.8 is assumed, motivated by the use of positional-only parameters
+(see [function and method signatures](API_specification/function_and_method_signatures.rst)).
+
+Importantly, array libraries are not assumed to be aware of each other, or of
+a common array-specific layer. The [use cases](use_cases.md) do not require
+such a dependency, and building and evolving an array library is easier without
+such a coupling. Facilitation support of multiple array types in downstream
+libraries is an important use case however, the assumed dependency structure
+for that is:
+
+
+
+Array libraries may know how to interoperate with each other, for example by
+constructing their own array type from that of another library or by shared
+memory use of an array (see [Data interchange mechanisms](design_topics/data_interchange.rst)).
+This can be done without a dependency though - only adherence to a protocol is
+enough.
+
+Array-consuming libraries will have to depend on one or more array libraries.
+That could be a "soft dependency" though, meaning retrieving an array library
+namespace from array instances that are passed in, but not explicitly doing
+`import arraylib_name`.
+
+
+## Backwards compatibility
+
+The assumption made during creation of this standard is that libraries are
+constrained by backwards compatibility guarantees to their users, and are
+likely unwilling to make significant backwards-incompatible changes for the
+purpose of conforming to this standard. Therefore it is assumed that the
+standard will be made available in a new namespace within each library, or the
+library will provide a way to retrieve a module or module-like object that
+adheres to this standard. See {ref}`how-to-adopt-this-api` for more details.
+
+
+## Production code & interactive use
+
+It is assumed that the primary use case is writing production code, for example
+in array-consuming libraries. As a consequence, making it easy to ensure that
+code is written as intended and has unambiguous semantics is preferred - and
+clear exceptions must be raised otherwise.
+
+It is also assumed that this does not significantly detract from the
+interactive user experience. However, in case existing libraries differ in
+behavior, the more strict version of that behavior is typically preferred. A
+good example is array inputs to functions - while NumPy accepts lists, tuples,
+generators, and anything else that could be turned into an array, most other
+libraries only accept their own array types. This standard follows the latter choice.
+It is likely always possible to put a thin "interactive use convenience layer"
+on top of a more strict behavior.
diff --git a/spec/2022.12/benchmark_suite.md b/spec/2022.12/benchmark_suite.md
new file mode 100644
index 000000000..41066c6a4
--- /dev/null
+++ b/spec/2022.12/benchmark_suite.md
@@ -0,0 +1,3 @@
+# Benchmark suite
+
+Adding a benchmark suite is planned in the future.
diff --git a/spec/changelog.rst b/spec/2022.12/changelog.rst
similarity index 76%
rename from spec/changelog.rst
rename to spec/2022.12/changelog.rst
index e0993307d..701a3dbcd 100644
--- a/spec/changelog.rst
+++ b/spec/2022.12/changelog.rst
@@ -1,5 +1,5 @@
Changelog per API standard version
==================================
-.. include:: ../CHANGELOG.md
+.. include:: ../../CHANGELOG.md
:parser: myst_parser.sphinx_
diff --git a/spec/2022.12/conf.py b/spec/2022.12/conf.py
new file mode 100644
index 000000000..e056bb7f2
--- /dev/null
+++ b/spec/2022.12/conf.py
@@ -0,0 +1,13 @@
+import sys
+from pathlib import Path
+
+sys.path.insert(0, str(Path(__file__).parents[2] / "src"))
+
+from array_api_stubs import _2022_12 as stubs_mod
+from _array_api_conf import *
+
+release = "2022.12"
+
+nav_title = html_theme_options.get("nav_title") + " v{}".format(release)
+html_theme_options.update({"nav_title": nav_title})
+sys.modules["array_api"] = stubs_mod
diff --git a/spec/design_topics/C_API.rst b/spec/2022.12/design_topics/C_API.rst
similarity index 100%
rename from spec/design_topics/C_API.rst
rename to spec/2022.12/design_topics/C_API.rst
diff --git a/spec/2022.12/design_topics/accuracy.rst b/spec/2022.12/design_topics/accuracy.rst
new file mode 100644
index 000000000..8c97db698
--- /dev/null
+++ b/spec/2022.12/design_topics/accuracy.rst
@@ -0,0 +1,77 @@
+.. _accuracy:
+
+Accuracy
+========
+
+ Array API specification for minimum accuracy requirements.
+
+Arithmetic Operations
+---------------------
+
+The results of element-wise arithmetic operations
+
+- ``+``
+- ``-``
+- ``*``
+- ``/``
+- ``%``
+
+including the corresponding element-wise array APIs defined in this standard
+
+- add
+- subtract
+- multiply
+- divide
+
+for floating-point operands must return the nearest representable value according to IEEE 754-2019 and a supported rounding mode. By default, the rounding mode should be ``roundTiesToEven`` (i.e., ties rounded toward the nearest value with an even least significant bit).
+
+Mathematical Functions
+----------------------
+
+This specification does **not** precisely define the behavior of the following functions
+
+- acos
+- acosh
+- asin
+- asinh
+- atan
+- atan2
+- atanh
+- cos
+- cosh
+- exp
+- expm1
+- log
+- log1p
+- log2
+- log10
+- pow
+- sin
+- sinh
+- tan
+- tanh
+
+except to require specific results for certain argument values that represent boundary cases of interest.
+
+.. note::
+ To help readers identify functions lacking precisely defined accuracy behavior, this specification uses the phrase "implementation-dependent approximation" in function descriptions.
+
+For other argument values, these functions should compute approximations to the results of respective mathematical functions; however, this specification recognizes that array libraries may be constrained by underlying hardware and/or seek to optimize performance over absolute accuracy and, thus, allows some latitude in the choice of approximation algorithms.
+
+Although the specification leaves the choice of algorithms to the implementation, this specification recommends (but does not specify) that implementations use the approximation algorithms for IEEE 754-2019 arithmetic contained in `FDLIBM `_, the freely distributable mathematical library from Sun Microsystems, or some other comparable IEEE 754-2019 compliant mathematical library.
+
+.. note::
+ With exception of a few mathematical functions, returning results which are indistinguishable from correctly rounded infinitely precise results is difficult, if not impossible, to achieve due to the algorithms involved, the limits of finite-precision, and error propagation. However, this specification recognizes that numerical accuracy alignment among array libraries is desirable in order to ensure portability and reproducibility. Accordingly, for each mathematical function, the specification test suite includes test values which span a function's domain and reports the average and maximum deviation from either a designated standard implementation (e.g., an arbitrary precision arithmetic implementation) or an average computed across a subset of known array library implementations. Such reporting aids users who need to know how accuracy varies among libraries and developers who need to check the validity of their implementations.
+
+Statistical Functions
+---------------------
+
+This specification does not specify accuracy requirements for statistical functions; however, this specification does expect that a conforming implementation of the array API standard will make a best-effort attempt to ensure that its implementations are theoretically sound and numerically robust.
+
+.. note::
+ In order for an array library to pass the specification test suite, an array library's statistical function implementations must satisfy certain bare-minimum accuracy requirements (e.g., accurate summation of a small set of positive integers). Unfortunately, imposing more rigorous accuracy requirements is not possible without severely curtailing possible implementation algorithms and unduly increasing implementation complexity.
+
+Linear Algebra
+--------------
+
+This specification does not specify accuracy requirements for linear algebra functions; however, this specification does expect that a conforming implementation of the array API standard will make a best-effort attempt to ensure that its implementations are theoretically sound and numerically robust.
diff --git a/spec/design_topics/complex_numbers.rst b/spec/2022.12/design_topics/complex_numbers.rst
similarity index 99%
rename from spec/design_topics/complex_numbers.rst
rename to spec/2022.12/design_topics/complex_numbers.rst
index da441499a..0eca79e91 100644
--- a/spec/design_topics/complex_numbers.rst
+++ b/spec/2022.12/design_topics/complex_numbers.rst
@@ -27,7 +27,7 @@ Branch cuts do not arise for single-valued trigonometric, hyperbolic, integer po
In contrast to real-valued floating-point numbers which have well-defined behavior as specified in IEEE 754, complex-valued floating-point numbers have no equivalent specification. Accordingly, this specification chooses to follow C99 conventions for special cases and branch cuts for those functions supporting complex numbers. For those functions which do not have C99 equivalents (e.g., linear algebra APIs), the specification relies on dominant conventions among existing array libraries.
.. warning::
- All branch cuts documented in this specification are considered **provisional**. While conforming implementations of the array API standard should adopt the branch cuts described in this standard, consumers of array API standard implementations should **not** assume that branch cuts are consistent between implementations.
+ All branch cuts documented in this specification are considered **provisional**. While conforming implementations of the array API standard should adopt the branch cuts described in this standard, consumers of array API standard implementations should **not** assume that branch cuts are consistent between implementations.
Provided no issues arise due to the choice of branch cut, the provisional status is likely to be removed in a future revision of this standard.
@@ -58,4 +58,4 @@ Valued-based Promotion
According to the type promotion rules described in this specification (see :ref:`type-promotion`), only the data types of the input arrays participating in an operation matter, not their values. The same principle applies to situations in which one or more results of operations on real-valued arrays are mathematically defined in the complex domain, but not in their real domain.
-By convention, the principal square root of :math:`-1` is :math:`j`, where :math:`j` is the imaginary unit. Despite this convention, for those operations supporting type promotion, conforming implementations must only consider input array data types when determining the data type of the output array. For example, if a real-valued input array is provided to :func:`~array_api.sqrt`, the output array must also be real-valued, even if the input array contains negative values. Accordingly, if a consumer of a conforming implementation of this specification desires for an operation's results to include the complex domain, the consumer should first cast the input array(s) to an appropriate complex floating-point data type before performing the operation.
\ No newline at end of file
+By convention, the principal square root of :math:`-1` is :math:`j`, where :math:`j` is the imaginary unit. Despite this convention, for those operations supporting type promotion, conforming implementations must only consider input array data types when determining the data type of the output array. For example, if a real-valued input array is provided to :func:`~array_api.sqrt`, the output array must also be real-valued, even if the input array contains negative values. Accordingly, if a consumer of a conforming implementation of this specification desires for an operation's results to include the complex domain, the consumer should first cast the input array(s) to an appropriate complex floating-point data type before performing the operation.
diff --git a/spec/2022.12/design_topics/copies_views_and_mutation.rst b/spec/2022.12/design_topics/copies_views_and_mutation.rst
new file mode 100644
index 000000000..52be1c805
--- /dev/null
+++ b/spec/2022.12/design_topics/copies_views_and_mutation.rst
@@ -0,0 +1,77 @@
+.. _copyview-mutability:
+
+Copy-view behaviour and mutability
+==================================
+
+.. admonition:: Mutating views
+ :class: important
+
+ Array API consumers are *strongly* advised to avoid *any* mutating operations when an array object may be either a "view" (i.e., an array whose data refers to memory that belongs to another array) or own memory of which one or more other array objects may be views. This admonition may become more strict in the future (e.g., this specification may require that view mutation be prohibited and trigger an exception). Accordingly, only perform mutation operations (e.g., in-place assignment) when absolutely confident that array data belongs to one, and only one, array object.
+
+Strided array implementations (e.g. NumPy, PyTorch, CuPy, MXNet) typically
+have the concept of a "view", meaning an array containing data in memory that
+belongs to another array (i.e. a different "view" on the original data).
+Views are useful for performance reasons - not copying data to a new location
+saves memory and is faster than copying - but can also affect the semantics
+of code. This happens when views are combined with *mutating* operations.
+This simple example illustrates that:
+
+.. code-block:: python
+
+ x = ones(1)
+ y = x[:] # `y` *may* be a view on the data of `x`
+ y -= 1 # if `y` is a view, this modifies `x`
+
+Code as simple as the above example will not be portable between array
+libraries - for NumPy/PyTorch/CuPy/MXNet ``x`` will contain the value ``0``,
+while for TensorFlow/JAX/Dask it will contain the value ``1``. The combination
+of views and mutability is fundamentally problematic here if the goal is to
+be able to write code with unambiguous semantics.
+
+Views are necessary for getting good performance out of the current strided
+array libraries. It is not always clear however when a library will return a
+view, and when it will return a copy. This API standard does not attempt to
+specify this - libraries can do either.
+
+There are several types of operations that do in-place mutation of data
+contained in arrays. These include:
+
+1. Inplace operators (e.g. ``*=``)
+2. Item assignment (e.g. ``x[0] = 1``)
+3. Slice assignment (e.g., ``x[:2, :] = 3``)
+4. The `out=` keyword present in some strided array libraries (e.g. ``sin(x, out=y)``)
+
+Libraries like TensorFlow and JAX tend to support inplace operators, provide
+alternative syntax for item and slice assignment (e.g. an ``update_index``
+function or ``x.at[idx].set(y)``), and have no need for ``out=``.
+
+A potential solution could be to make views read-only, or use copy-on-write
+semantics. Both are hard to implement and would present significant issues
+for backwards compatibility for current strided array libraries. Read-only
+views would also not be a full solution, given that mutating the original
+(base) array will also result in ambiguous semantics. Hence this API standard
+does not attempt to go down this route.
+
+Both inplace operators and item/slice assignment can be mapped onto
+equivalent functional expressions (e.g. ``x[idx] = val`` maps to
+``x.at[idx].set(val)``), and given that both inplace operators and item/slice
+assignment are very widely used in both library and end user code, this
+standard chooses to include them.
+
+The situation with ``out=`` is slightly different - it's less heavily used, and
+easier to avoid. It's also not an optimal API, because it mixes an
+"efficiency of implementation" consideration ("you're allowed to do this
+inplace") with the semantics of a function ("the output _must_ be placed into
+this array). There are libraries that do some form of tracing or abstract
+interpretation over a language that does not support mutation (to make
+analysis easier); in those cases implementing ``out=`` with correct handling of
+views may even be impossible to do. There's alternatives, for example the
+donated arguments in JAX or working buffers in LAPACK, that allow the user to
+express "you _may_ overwrite this data, do whatever is fastest". Given that
+those alternatives aren't widely used in array libraries today, this API
+standard chooses to (a) leave out ``out=``, and (b) not specify another method
+of reusing arrays that are no longer needed as buffers.
+
+This leaves the problem of the initial example - with this API standard it
+remains possible to write code that will not work the same for all array
+libraries. This is something that the user must be careful about.
diff --git a/spec/2022.12/design_topics/data_dependent_output_shapes.rst b/spec/2022.12/design_topics/data_dependent_output_shapes.rst
new file mode 100644
index 000000000..43daa9765
--- /dev/null
+++ b/spec/2022.12/design_topics/data_dependent_output_shapes.rst
@@ -0,0 +1,15 @@
+.. _data-dependent-output-shapes:
+
+Data-dependent output shapes
+============================
+
+Array libraries which build computation graphs commonly employ static analysis that relies upon known shapes. For example, JAX requires known array sizes when compiling code, in order to perform static memory allocation. Functions and operations which are value-dependent present difficulties for such libraries, as array sizes cannot be inferred ahead of time without also knowing the contents of the respective arrays.
+
+While value-dependent functions and operations are not impossible to implement for array libraries which build computation graphs, this specification does not want to impose an undue burden on such libraries and permits omission of value-dependent operations. All other array libraries are expected, however, to implement the value-dependent operations included in this specification in order to be array specification compliant.
+
+Value-dependent operations are demarcated in this specification using an admonition similar to the following:
+
+.. admonition:: Data-dependent output shape
+ :class: important
+
+ The shape of the output array for this function/operation depends on the data values in the input array; hence, array libraries which build computation graphs (e.g., JAX, Dask, etc.) may find this function/operation difficult to implement without knowing array values. Accordingly, such libraries may choose to omit this function. See :ref:`data-dependent-output-shapes` section for more details.
diff --git a/spec/2022.12/design_topics/data_interchange.rst b/spec/2022.12/design_topics/data_interchange.rst
new file mode 100644
index 000000000..8686042c8
--- /dev/null
+++ b/spec/2022.12/design_topics/data_interchange.rst
@@ -0,0 +1,86 @@
+.. _data-interchange:
+
+Data interchange mechanisms
+===========================
+
+This section discusses the mechanism to convert one type of array into another.
+As discussed in the :ref:`assumptions-dependencies ` section,
+*functions* provided by an array library are not expected to operate on
+*array types* implemented by another library. Instead, the array can be
+converted to a "native" array type.
+
+The interchange mechanism must offer the following:
+
+1. Data access via a protocol that describes the memory layout of the array
+ in an implementation-independent manner.
+
+ *Rationale: any number of libraries must be able to exchange data, and no
+ particular package must be needed to do so.*
+
+2. Support for all dtypes in this API standard (see :ref:`data-types`).
+
+3. Device support. It must be possible to determine on what device the array
+ that is to be converted lives.
+
+ *Rationale: there are CPU-only, GPU-only, and multi-device array types;
+ it's best to support these with a single protocol (with separate
+ per-device protocols it's hard to figure out unambiguous rules for which
+ protocol gets used, and the situation will get more complex over time
+ as TPU's and other accelerators become more widely available).*
+
+4. Zero-copy semantics where possible, making a copy only if needed (e.g.
+ when data is not contiguous in memory).
+
+ *Rationale: performance.*
+
+5. A Python-side and a C-side interface, the latter with a stable C ABI.
+
+ *Rationale: all prominent existing array libraries are implemented in
+ C/C++, and are released independently from each other. Hence a stable C
+ ABI is required for packages to work well together.*
+
+DLPack: An in-memory tensor structure
+-------------------------------------
+
+The best candidate for this protocol is
+`DLPack `_, and hence that is what this
+standard has chosen as the primary/recommended protocol. Note that the
+``asarray`` function also supports the Python buffer protocol (CPU-only) to
+support libraries that already implement buffer protocol support.
+
+.. note::
+ The main alternatives to DLPack are device-specific methods:
+
+ - The `buffer protocol `_ on CPU
+ - ``__cuda_array_interface__`` for CUDA, specified in the Numba documentation
+ `here `_
+ (Python-side only at the moment)
+
+ An issue with device-specific protocols are: if two libraries both
+ support multiple device types, in which order should the protocols be
+ tried? A growth in the number of protocols to support each time a new
+ device gets supported by array libraries (e.g. TPUs, AMD GPUs, emerging
+ hardware accelerators) also seems undesirable.
+
+ In addition to the above argument, it is also clear from adoption
+ patterns that DLPack has the widest support. The buffer protocol, despite
+ being a lot older and standardized as part of Python itself via PEP 3118,
+ hardly has any support from array libraries. CPU interoperability is
+ mostly dealt with via the NumPy-specific ``__array__`` (which, when called,
+ means the object it is attached to must return a ``numpy.ndarray``
+ containing the data the object holds).
+
+ See the `RFC to adopt DLPack `_
+ for discussion that preceded the adoption of DLPack.
+
+DLPack's documentation can be found at: https://dmlc.github.io/dlpack/latest/.
+
+The `Python specification of DLPack `__
+page gives a high-level specification for data exchange in Python using DLPack.
+
+.. note::
+ DLPack is a standalone protocol/project and can therefore be used outside of
+ this standard. Python libraries that want to implement only DLPack support
+ are recommended to do so using the same syntax and semantics as outlined
+ below. They are not required to return an array object from ``from_dlpack``
+ which conforms to this standard.
diff --git a/spec/2022.12/design_topics/device_support.rst b/spec/2022.12/design_topics/device_support.rst
new file mode 100644
index 000000000..29f0789bb
--- /dev/null
+++ b/spec/2022.12/design_topics/device_support.rst
@@ -0,0 +1,111 @@
+.. _device-support:
+
+Device support
+==============
+
+For libraries that support execution on more than a single hardware device - e.g. CPU and GPU, or multiple GPUs - it is important to be able to control on which device newly created arrays get placed and where execution happens. Attempting to be fully implicit doesn't always scale well to situations with multiple GPUs.
+
+Existing libraries employ one or more of these three methods to exert such control over data placement:
+
+1. A global default device, which may be fixed or user-switchable.
+2. A context manager to control device assignment within its scope.
+3. Local control for data allocation target device via explicit keywords, and a method to transfer arrays to another device.
+
+Libraries differ in how execution is controlled, via a context manager or with the convention that execution takes place on the same device where all argument arrays are allocated. And they may or may not allow mixing arrays on different devices via implicit data transfers.
+
+This standard chooses to add support for method 3 (local control), with the convention that execution takes place on the same device where all argument arrays are allocated. The rationale for choosing method 3 is because it's the most explicit and granular, with its only downside being verbosity. A context manager may be added in the future - see :ref:`device-out-of-scope` for details.
+
+Intended usage
+--------------
+
+The intended usage for the device support in the current version of the
+standard is *device handling in library code*. The assumed pattern is that
+users create arrays (for which they can use all the relevant device syntax
+that the library they use provides), and that they then pass those arrays
+into library code which may have to do the following:
+
+- Create new arrays on the same device as an array that's passed in.
+- Determine whether two input arrays are present on the same device or not.
+- Move an array from one device to another.
+- Create output arrays on the same device as the input arrays.
+- Pass on a specified device to other library code.
+
+.. note::
+ Given that there is not much that's currently common in terms of
+ device-related syntax between different array libraries, the syntax included
+ in the standard is kept as minimal as possible while enabling the
+ above-listed use cases.
+
+Syntax for device assignment
+----------------------------
+
+The array API will offer the following syntax for device assignment and
+cross-device data transfer:
+
+1. A ``.device`` property on the array object, which returns a ``Device`` object
+ representing the device the data in the array is stored on, and supports
+ comparing devices for equality with ``==`` and ``!=`` within the same library
+ (e.g., by implementing ``__eq__``); comparing device objects from different
+ libraries is out of scope).
+2. A ``device=None`` keyword for array creation functions, which takes an
+ instance of a ``Device`` object.
+3. A ``.to_device`` method on the array object to copy an array to a different device.
+
+.. note::
+ In the current API standard, the only way to obtain a ``Device`` object is from the
+ ``.device`` property on the array object. The standard does **not** include a universal
+ ``Device`` object recognized by all compliant libraries. Accordingly, the standard does
+ not provide a means of instantiating a ``Device`` object to point to a specific physical or
+ logical device.
+
+ The choice to not include a standardized ``Device`` object may be revisited in a future revision of this standard.
+
+ For array libraries which concern themselves with multi-device support, including CPU and GPU,
+ they are free to expose a library-specific device object (e.g., for creating an
+ array on a particular device). While a library-specific device object can be used as input to
+ ``to_device``, beware that this will mean non-portability as code will be specific to
+ that library.
+
+Semantics
+---------
+
+Handling devices is complex, and some frameworks have elaborate policies for
+handling device placement. Therefore this section only gives recommendations,
+rather than hard requirements:
+
+- Respect explicit device assignment (i.e. if the input to the ``device=`` keyword is not ``None``, guarantee that the array is created on the given device, and raise an exception otherwise).
+- Preserve device assignment as much as possible (e.g. output arrays from a function are expected to be on the same device as input arrays to the function).
+- Raise an exception if an operation involves arrays on different devices (i.e. avoid implicit data transfer between devices).
+- Use a default for ``device=None`` which is consistent between functions within the same library.
+- If a library has multiple ways of controlling device placement, the most explicit method should have the highest priority. For example:
+
+ 1. If ``device=`` keyword is specified, that always takes precedence
+
+ 2. If ``device=None``, then use the setting from a context manager, if set.
+
+ 3. If no context manager was used, then use the global default device/strategy
+
+.. _device-out-of-scope:
+
+Out of scope for device support
+-------------------------------
+
+Individual libraries may offers APIs for one or more of the following topics,
+however those are out of scope for this standard:
+
+- Identifying a specific physical or logical device across libraries
+- Setting a default device globally
+- Stream/queue control
+- Distributed allocation
+- Memory pinning
+- A context manager for device control
+
+.. note::
+ A context manager for controlling the default device is present in most existing array
+ libraries (NumPy being the exception). There are concerns with using a
+ context manager however. A context manager can be tricky to use at a high
+ level, since it may affect library code below function calls (non-local
+ effects). See, e.g., `this PyTorch issue `_
+ for a discussion on a good context manager API.
+
+ Adding a context manager may be considered in a future version of this API standard.
diff --git a/spec/design_topics/index.rst b/spec/2022.12/design_topics/index.rst
similarity index 100%
rename from spec/design_topics/index.rst
rename to spec/2022.12/design_topics/index.rst
diff --git a/spec/2022.12/design_topics/parallelism.rst b/spec/2022.12/design_topics/parallelism.rst
new file mode 100644
index 000000000..f013a9cf9
--- /dev/null
+++ b/spec/2022.12/design_topics/parallelism.rst
@@ -0,0 +1,24 @@
+Parallelism
+===========
+
+Parallelism is mostly, but not completely, an execution or runtime concern
+rather than an API concern. Execution semantics are out of scope for this API
+standard, and hence won't be discussed further here. The API related part
+involves how libraries allow users to exercise control over the parallelism
+they offer, such as:
+
+- Via environment variables. This is the method of choice for BLAS libraries and libraries using OpenMP.
+- Via a keyword to individual functions or methods. Examples include the ``n_jobs`` keyword used in scikit-learn and the ``workers`` keyword used in SciPy.
+- Build-time settings to enable a parallel or distributed backend.
+- Via letting the user set chunk sizes. Dask uses this approach.
+
+When combining multiple libraries, one has to deal with auto-parallelization
+semantics and nested parallelism. Two things that could help improve the
+coordination of parallelization behavior in a stack of Python libraries are:
+
+1. A common API pattern for enabling parallelism
+2. A common library providing a parallelization layer
+
+Option (1) may possibly fit in a future version of this array API standard.
+`array-api issue 4 `_ contains
+more detailed discussion on the topic of parallelism.
diff --git a/spec/2022.12/design_topics/static_typing.rst b/spec/2022.12/design_topics/static_typing.rst
new file mode 100644
index 000000000..26a1fb901
--- /dev/null
+++ b/spec/2022.12/design_topics/static_typing.rst
@@ -0,0 +1,50 @@
+Static typing
+=============
+
+Good support for static typing both in array libraries and array-consuming
+code is desirable. Therefore the exact type or set of types for each
+parameter, keyword and return value is specified for functions and methods -
+see :ref:`function-and-method-signatures`. That section specifies arrays
+simply as ``array``; what that means is dealt with in this section.
+
+Introducing type annotations in libraries became more relevant only when
+Python 2.7 support was dropped at the start of 2020. As a consequence, using
+type annotations with array libraries is largely still a work in progress.
+This version of the API standard does not deal with trying to type *array
+properties* like shape, dimensionality or dtype, because that's not a solved
+problem in individual array libraries yet.
+
+An ``array`` type annotation can mean either the type of one specific array
+object, or some superclass or typing Protocol - as long as it is consistent
+with the array object specified in :ref:`array-object`. To illustrate by
+example:
+
+.. code-block:: python
+
+ # `Array` is a particular class in the library
+ def sin(x: Array, / ...) -> Array:
+ ...
+
+and
+
+.. code-block:: python
+
+ # There's some base class `_BaseArray`, and there may be multiple
+ # array subclasses inside the library
+ A = TypeVar('A', bound=_BaseArray)
+ def sin(x: A, / ...) -> A:
+ ...
+
+should both be fine. There may be other variations possible. Also note that
+this standard does not require that input and output array types are the same
+(they're expected to be defined in the same library though). Given that
+array libraries don't have to be aware of other types of arrays defined in
+other libraries (see :ref:`assumptions-dependencies`), this should be enough
+for a single array library.
+
+That said, an array-consuming library aiming to support multiple array types
+may need more - for example a protocol to enable structural subtyping. This
+API standard currently takes the position that it does not provide any
+reference implementation or package that can or should be relied on at
+runtime, hence no such protocol is defined here. This may be dealt with in a
+future version of this standard.
diff --git a/spec/extensions/fourier_transform_functions.rst b/spec/2022.12/extensions/fourier_transform_functions.rst
similarity index 100%
rename from spec/extensions/fourier_transform_functions.rst
rename to spec/2022.12/extensions/fourier_transform_functions.rst
diff --git a/spec/extensions/index.rst b/spec/2022.12/extensions/index.rst
similarity index 100%
rename from spec/extensions/index.rst
rename to spec/2022.12/extensions/index.rst
diff --git a/spec/extensions/linear_algebra_functions.rst b/spec/2022.12/extensions/linear_algebra_functions.rst
similarity index 98%
rename from spec/extensions/linear_algebra_functions.rst
rename to spec/2022.12/extensions/linear_algebra_functions.rst
index 6759b2260..938221c79 100644
--- a/spec/extensions/linear_algebra_functions.rst
+++ b/spec/2022.12/extensions/linear_algebra_functions.rst
@@ -51,8 +51,8 @@ Accordingly, the standardization process affords the opportunity to reduce inter
In general, interfaces should avoid polymorphic return values (e.g., returning an array **or** a namedtuple, dependent on, e.g., an optional keyword argument). Dedicated interfaces for each return value type are preferred, as dedicated interfaces are easier to reason about at both the implementation level and user level. Example interfaces which could be combined into a single overloaded interface, but are not, include:
- - ``eig``: computing both eigenvalues and eignvectors.
- - ``eigvals``: computing only eigenvalues.
+ - ``eigh``: computing both eigenvalues and eigenvectors.
+ - ``eigvalsh``: computing only eigenvalues.
4. **Implementation agnosticism**: a standardized interface should eschew parameterization (including keyword arguments) biased toward particular implementations.
diff --git a/spec/2022.12/future_API_evolution.md b/spec/2022.12/future_API_evolution.md
new file mode 100644
index 000000000..443f683d5
--- /dev/null
+++ b/spec/2022.12/future_API_evolution.md
@@ -0,0 +1,60 @@
+(future-API-evolution)=
+
+# Future API standard evolution
+
+## Scope extensions
+
+Proposals for scope extensions in a future version of the API standard will follow
+the process documented at https://github.com/data-apis/governance/blob/master/process_document.md
+
+In summary, proposed new APIs go through several maturity stages, and will only be
+accepted in a future version of this API standard once they have reached the "Final"
+maturity stage, which means multiple array libraries have compliant implementations
+and real-world experience from use of those implementations is available.
+
+
+## Backwards compatibility
+
+Functions, objects, keywords and specified behavior are added to this API standard
+only if those are already present in multiple existing array libraries, and if there is
+data that those APIs are used. Therefore it is highly unlikely that future versions
+of this standard will make backwards-incompatible changes.
+
+The aim is for future versions to be 100% backwards compatible with older versions.
+Any exceptions must have strong rationales and be clearly documented in the updated
+API specification.
+
+
+(api-versioning)=
+
+## Versioning
+
+This API standard uses the following versioning scheme:
+
+- The version is date-based, in the form `yyyy.mm` (e.g., `2020.12`).
+- The version shall not include a standard way to do `alpha`/`beta`/`rc` or
+ `.post`/`.dev` type versions.
+ _Rationale: that's for Python packages, not for a standard._
+- The version must be made available at runtime via an attribute
+ `__array_api_version__` by a compliant implementation, in `'yyyy.mm'` format
+ as a string, in the namespace that implements the API standard.
+ _Rationale: dunder version strings are the standard way of doing this._
+
+No utilities for dealing with version comparisons need to be provided; given
+the format simple string comparisons with Python operators (`=-`, `<`, `>=`,
+etc.) will be enough.
+
+```{note}
+
+Rationale for the `yyyy.mm` versioning scheme choice:
+the API will be provided as part of a library, which already has a versioning
+scheme (typically PEP 440 compliant and in the form `major.minor.bugfix`),
+and a way to access it via `module.__version__`. The API standard version is
+completely independent from the package version. Given the standardization
+process, it resembles a C/C++ versioning scheme (e.g. `C99`, `C++14`) more
+than Python package versioning.
+```
+
+The frequency of releasing a new version of an API standard will likely be at
+regular intervals and on the order of one year, however no assumption on
+frequency of new versions appearing must be made.
diff --git a/spec/index.rst b/spec/2022.12/index.rst
similarity index 100%
rename from spec/index.rst
rename to spec/2022.12/index.rst
diff --git a/spec/license.rst b/spec/2022.12/license.rst
similarity index 87%
rename from spec/license.rst
rename to spec/2022.12/license.rst
index 8d4b6d1fd..06ec75dfc 100644
--- a/spec/license.rst
+++ b/spec/2022.12/license.rst
@@ -5,5 +5,5 @@ All content on this website and the corresponding
`GitHub repository `__ is licensed
under the following license:
- .. include:: ../LICENSE
+ .. include:: ../../LICENSE
:parser: myst_parser.sphinx_
diff --git a/spec/2022.12/purpose_and_scope.md b/spec/2022.12/purpose_and_scope.md
new file mode 100644
index 000000000..f375c9512
--- /dev/null
+++ b/spec/2022.12/purpose_and_scope.md
@@ -0,0 +1,470 @@
+# Purpose and scope
+
+## Introduction
+
+Python users have a wealth of choice for libraries and frameworks for
+numerical computing, data science, machine learning, and deep learning. New
+frameworks pushing forward the state of the art in these fields are appearing
+every year. One unintended consequence of all this activity and creativity
+has been fragmentation in multidimensional array (a.k.a. tensor) libraries -
+which are the fundamental data structure for these fields. Choices include
+NumPy, Tensorflow, PyTorch, Dask, JAX, CuPy, MXNet, Xarray, and others.
+
+The APIs of each of these libraries are largely similar, but with enough
+differences that it's quite difficult to write code that works with multiple
+(or all) of these libraries. This array API standard aims to address that
+issue, by specifying an API for the most common ways arrays are constructed
+and used.
+
+Why not simply pick an existing API and bless that as the standard? In short,
+because there are often good reasons for the current inconsistencies between
+libraries. The most obvious candidate for that existing API is NumPy. However
+NumPy was not designed with non-CPU devices, graph-based libraries, or JIT
+compilers in mind. Other libraries often deviate from NumPy for good
+(necessary) reasons. Choices made in this API standard are often the same
+ones NumPy makes, or close to it, but are different where necessary to make
+sure all existing array libraries can adopt this API.
+
+
+### This API standard
+
+This document aims to standardize functionality that exists in most/all array
+libraries and either is commonly used or is needed for
+consistency/completeness. Usage is determined via analysis of downstream
+libraries, see {ref}`usage-data`. An example of consistency is: there are
+functional equivalents for all Python operators (including the rarely used
+ones).
+
+Beyond usage and consistency, there's a set of use cases that inform the API
+design to ensure it's fit for a wide range of users and situations - see
+{ref}`use-cases`.
+
+A question that may arise when reading this document is: _"what about
+functionality that's not present in this document?_ This:
+
+- means that there is no guarantee the functionality is present in libraries
+ adhering to the standard
+- does _not_ mean that that functionality is unimportant
+- may indicate that that functionality, if present in a particular array
+ library, is unlikely to be present in all other libraries
+
+### History
+
+The first library for numerical and scientific computing in Python was
+Numeric, developed in the mid-1990s. In the early 2000s a second, similar
+library, Numarray, was created. In 2005 NumPy was written, superceding both
+Numeric and Numarray and resolving the fragmentation at that time. For
+roughly a decade, NumPy was the only widely used array library. Over the past
+~5 years, mainly due to the emergence of new hardware and the rise of deep
+learning, many other libraries have appeared, leading to more severe
+fragmentation. Concepts and APIs in newer libraries were often inspired by
+(or copied from) those in older ones - and then changed or improved upon to
+fit new needs and use cases. Individual library authors discussed ideas,
+however there was never (before this array API standard) a serious attempt
+to coordinate between all libraries to avoid fragmentation and arrive at a
+common API standard.
+
+The idea for this array API standard grew gradually out of many conversations
+between maintainers during 2019-2020. It quickly became clear that any
+attempt to write a new "reference library" to fix the current fragmentation
+was infeasible - unlike in 2005, there are now too many different use cases
+and too many stakeholders, and the speed of innovation is too high. In May
+2020 an initial group of maintainers was assembled in the [Consortium for
+Python Data API Standards](https://data-apis.org/) to start drafting a
+specification for an array API that could be adopted by each of the existing
+array and tensor libraries. That resulted in this document, describing that
+API.
+
+
+(Scope)=
+
+## Scope (includes out-of-scope / non-goals)
+
+This section outlines what is in scope and out of scope for this API standard.
+
+### In scope
+
+The scope of the array API standard includes:
+
+- Functionality which needs to be included in an array library for it to adhere
+ to this standard.
+- Names of functions, methods, classes and other objects.
+- Function signatures, including type annotations.
+- Semantics of functions and methods. I.e. expected outputs including precision
+ for and dtypes of numerical results.
+- Semantics in the presence of `nan`'s, `inf`'s, empty arrays (i.e. arrays
+ including one or more dimensions of size `0`).
+- Casting rules, broadcasting, indexing
+- Data interchange. I.e. protocols to convert one type of array into another
+ type, potentially sharing memory.
+- Device support.
+
+Furthermore, meta-topics included in this standard include:
+
+- Use cases for the API standard and assumptions made in it
+- API standard adoption
+- API standard versioning
+- Future API standard evolution
+- Array library and API standard versioning
+- Verification of API standard conformance
+
+The concrete set of functionality that is in scope for this version of the
+standard is shown in this diagram:
+
+
+
+
+**Goals** for the API standard include:
+
+- Make it possible for array-consuming libraries to start using multiple types
+ of arrays as inputs.
+- Enable more sharing and reuse of code built on top of the core functionality
+ in the API standard.
+- For authors of new array libraries, provide a concrete API that can be
+ adopted as is, rather than each author having to decide what to borrow from
+ where and where to deviate.
+- Make the learning curve for users less steep when they switch from one array
+ library to another one.
+
+
+### Out of scope
+
+1. Implementations of the standard are out of scope.
+
+ _Rationale: the standard will consist of a document and an accompanying test
+ suite with which the conformance of an implementation can be verified. Actual
+ implementations will live in array libraries; no reference implementation is
+ planned._
+
+2. Execution semantics are out of scope. This includes single-threaded vs.
+ parallel execution, task scheduling and synchronization, eager vs. delayed
+ evaluation, performance characteristics of a particular implementation of the
+ standard, and other such topics.
+
+ _Rationale: execution is the domain of implementations. Attempting to specify
+ execution behavior in a standard is likely to require much more fine-grained
+ coordination between developers of implementations, and hence is likely to
+ become an obstacle to adoption._
+
+3. Non-Python API standardization (e.g., Cython or NumPy C APIs)
+
+ _Rationale: this is an important topic for some array-consuming libraries,
+ but there is no widely shared C/Cython API and hence it doesn't make sense at
+ this point in time to standardize anything. See
+ the [C API section](design_topics/C_API.rst) for more details._
+
+4. Standardization of these dtypes is out of scope: bfloat16, extended
+ precision floating point, datetime, string, object and void dtypes.
+
+ _Rationale: these dtypes aren't uniformly supported, and their inclusion at
+ this point in time could put a significant implementation burden on
+ libraries. It is expected that some of these dtypes - in particular
+ `bfloat16` - will be included in a future version of the standard._
+
+5. The following topics are out of scope: I/O, polynomials, error handling,
+ testing routines, building and packaging related functionality, methods of
+ binding compiled code (e.g., `cffi`, `ctypes`), subclassing of an array
+ class, masked arrays, and missing data.
+
+ _Rationale: these topics are not core functionality for an array library,
+ and/or are too tied to implementation details._
+
+6. NumPy (generalized) universal functions, i.e. ufuncs and gufuncs.
+
+ _Rationale: these are NumPy-specific concepts, and are mostly just a
+ particular way of building regular functions with a few extra
+ methods/properties._
+
+7. Behaviour for unexpected/invalid input to functions and methods.
+
+ _Rationale: there are a huge amount of ways in which users can provide
+ invalid or unspecified input to functionality in the standard. Exception
+ types or other resulting behaviour cannot be completely covered and would
+ be hard to make consistent between libraries._
+
+
+**Non-goals** for the API standard include:
+
+- Making array libraries identical so they can be merged.
+
+ _Each library will keep having its own particular strength, whether it's
+ offering functionality beyond what's in the standard, performance advantages
+ for a given use case, specific hardware or software environment support, or
+ more._
+
+- Implement a backend or runtime switching system to be able to switch from one
+ array library to another with a single setting or line of code.
+
+ _This may be feasible, however it's assumed that when an array-consuming
+ library switches from one array type to another, some testing and possibly
+ code adjustment for performance or other reasons may be needed._
+
+- Making it possible to mix multiple array libraries in function calls.
+
+ _Most array libraries do not know about other libraries, and the functions
+ they implement may try to convert "foreign" input, or raise an exception.
+ This behaviour is hard to specify; ensuring only a single array type is
+ used is best left to the end user._
+
+
+### Implications of in/out of scope
+
+If something is out of scope and therefore will not be part of (the current
+version of) the API standard, that means that there are no guarantees that that
+functionality works the same way, or even exists at all, across the set of
+array libraries that conform to the standard. It does _not_ imply that this
+functionality is less important or should not be used.
+
+
+## Stakeholders
+
+Arrays are fundamental to scientific computing, data science, and machine
+learning and deep learning. Hence there are many stakeholders for an array API
+standard. The _direct_ stakeholders of this standard are **authors/maintainers of
+Python array libraries**. There are many more types of _indirect_ stakeholders
+though, including:
+
+- maintainers of libraries and other programs which depend on array libraries
+ (called "array-consuming libraries" in the rest of this document)
+- authors of non-Python array libraries
+- developers of compilers and runtimes with array-specific functionality
+- end users
+
+Libraries that are being actively considered - in terms of current behaviour and
+API surface - during the creation of the first version of this standard
+include:
+
+- [NumPy](https://numpy.org)
+- [TensorFlow](https://www.tensorflow.org/)
+- [PyTorch](https://pytorch.org/)
+- [MXNet](https://numpy.mxnet.io/)
+- [JAX](https://github.com/google/jax)
+- [Dask](https://dask.org/)
+- [CuPy](https://cupy.chainer.org/)
+
+Other Python array libraries that are currently under active development and
+could adopt this API standard include:
+
+- [xarray](https://xarray.pydata.org/)
+- [PyData/Sparse](https://sparse.pydata.org)
+- [Weld](https://github.com/weld-project/weld)
+- [Bohrium](https://bohrium.readthedocs.io/)
+- [Arkouda](https://github.com/mhmerrill/arkouda)
+- [Legate](https://research.nvidia.com/publication/2019-11_Legate-NumPy%3A-Accelerated)
+
+There are a huge amount of array-consuming libraries; some of the most
+prominent ones that are being taken into account - in terms of current array
+API usage or impact of design decisions on them - include (this list is likely
+to grow it over time):
+
+- [Pandas](https://pandas.pydata.org/)
+- [SciPy](https://github.com/scipy/scipy)
+- [scikit-learn](https://scikit-learn.org/)
+- [Matplotlib](https://matplotlib.org/)
+- [scikit-image](https://scikit-image.org/)
+- [NetworkX](https://networkx.github.io/)
+
+Array libraries in other languages, some of which may grow a Python API in the
+future or have taken inspiration from NumPy or other array libraries, include:
+
+- [Xtensor](https://xtensor.readthedocs.io) (C++, cross-language)
+- [XND](https://xnd.io/) (C, cross-language)
+- [stdlib](https://stdlib.io/) (JavaScript)
+- [rust-ndarray](https://github.com/rust-ndarray/ndarray) (Rust)
+- [rray](https://github.com/r-lib/rray) (R)
+- [ND4J](https://github.com/deeplearning4j/nd4j) (JVM)
+- [NumSharp](https://github.com/SciSharp/NumSharp) (C#)
+
+Compilers, runtimes, and dispatching layers for which this API standard may be
+relevant:
+
+- [Cython](https://cython.org/)
+- [Numba](http://numba.pydata.org/)
+- [Pythran](https://pythran.readthedocs.io/en/latest/)
+- [Transonic](https://transonic.readthedocs.io)
+- [ONNX](https://onnx.ai/)
+- [Apache TVM](https://tvm.apache.org/)
+- [MLIR](https://mlir.llvm.org/)
+- [TACO](https://github.com/tensor-compiler/taco)
+- [unumpy](https://github.com/Quansight-Labs/unumpy)
+- [einops](https://github.com/arogozhnikov/einops)
+- [Apache Arrow](https://arrow.apache.org/)
+
+
+
+## How to read this document
+
+For guidance on how to read and understand the type annotations included in this specification, consult the Python [documentation](https://docs.python.org/3/library/typing.html).
+
+
+(how-to-adopt-this-api)=
+
+## How to adopt this API
+
+Most (all) existing array libraries will find something in this API standard
+that is incompatible with a current implementation, and that they cannot
+change due to backwards compatibility concerns. Therefore we expect that each
+of those libraries will want to offer a standard-compliant API in a _new
+namespace_. The question then becomes: how does a user access this namespace?
+
+The simplest method is: document the import to use to directly access the
+namespace (e.g. `import package_name.array_api`). This has two issues though:
+
+1. Array-consuming libraries that want to support multiple array libraries
+ then have to explicitly import each library.
+2. It is difficult to _version_ the array API standard implementation (see
+ {ref}`api-versioning`).
+
+To address both issues, a uniform way must be provided by a conforming
+implementation to access the API namespace, namely a [method on the array
+object](array.__array_namespace__):
+
+```
+xp = x.__array_namespace__()
+```
+
+The method must take one keyword, `api_version=None`, to make it possible to
+request a specific API version:
+
+```
+xp = x.__array_namespace__(api_version='2020.10')
+```
+
+The `xp` namespace must contain all functionality specified in
+{ref}`api-specification`. The namespace may contain other functionality; however,
+including additional functionality is not recommended as doing so may hinder
+portability and inter-operation of array libraries within user code.
+
+### Checking an array object for Compliance
+
+Array-consuming libraries are likely to want a mechanism for determining
+whether a provided array is specification compliant. The recommended approach
+to check for compliance is by checking whether an array object has an
+`__array_namespace__` attribute, as this is the one distinguishing feature of
+an array-compliant object.
+
+Checking for an `__array_namespace__` attribute can be implemented as a small
+utility function similar to the following.
+
+```python
+def is_array_api_obj(x):
+ return hasattr(x, '__array_namespace__')
+```
+
+```{note}
+Providing a "reference library" on which people depend is out-of-scope for
+the standard. Hence the standard cannot, e.g., provide an array ABC from
+which libraries can inherit to enable an `isinstance` check. However, note
+that the `numpy.array_api` implementation aims to provide a reference
+implementation with only the behavior specified in this standard - it may
+prove useful for verifying one is writing portable code.
+```
+
+### Discoverability of conforming implementations
+
+It may be useful to have a way to discover all packages in a Python
+environment which provide a conforming array API implementation, and the
+namespace that that implementation resides in.
+To assist array-consuming libraries which need to create arrays originating
+from multiple conforming array implementations, or developers who want to perform
+for example cross-library testing, libraries may provide an
+{pypa}`entry point ` in order to make an array API
+namespace discoverable.
+
+:::{admonition} Optional feature
+Given that entry points typically require build system & package installer
+specific implementation, this standard chooses to recommend rather than
+mandate providing an entry point.
+:::
+
+The following code is an example for how one can discover installed
+conforming libraries:
+
+```python
+from importlib.metadata import entry_points
+
+try:
+ eps = entry_points()['array_api']
+ ep = next(ep for ep in eps if ep.name == 'package_name')
+except TypeError:
+ # The dict interface for entry_points() is deprecated in py3.10,
+ # supplanted by a new select interface.
+ ep = entry_points(group='array_api', name='package_name')
+
+xp = ep.load()
+```
+
+An entry point must have the following properties:
+
+- **group**: equal to `array_api`.
+- **name**: equal to the package name.
+- **object reference**: equal to the array API namespace import path.
+
+
+* * *
+
+## Conformance
+
+A conforming implementation of the array API standard must provide and support
+all the functions, arguments, data types, syntax, and semantics described in
+this specification.
+
+A conforming implementation of the array API standard may provide additional
+values, objects, properties, data types, and functions beyond those described
+in this specification.
+
+Libraries which aim to provide a conforming implementation but haven't yet
+completed such an implementation may, and are encouraged to, provide details on
+the level of (non-)conformance. For details on how to do this, see
+[Verification - measuring conformance](verification_test_suite.md).
+
+
+* * *
+
+## Terms and Definitions
+
+For the purposes of this specification, the following terms and definitions apply.
+
+
+
+**array**:
+a (usually fixed-size) multidimensional container of items of the same type and size.
+
+**axis**:
+an array dimension.
+
+**branch cut**:
+a curve in the complex plane across which a given complex function fails to be continuous.
+
+**broadcast**:
+automatic (implicit) expansion of array dimensions to be of equal sizes without copying array data for the purpose of making arrays with different shapes have compatible shapes for element-wise operations.
+
+**compatible**:
+two arrays whose dimensions are compatible (i.e., where the size of each dimension in one array is either equal to one or to the size of the corresponding dimension in a second array).
+
+**element-wise**:
+an operation performed element-by-element, in which individual array elements are considered in isolation and independently of other elements within the same array.
+
+**matrix**:
+a two-dimensional array.
+
+**rank**:
+number of array dimensions (not to be confused with the number of linearly independent columns of a matrix).
+
+**shape**:
+a tuple of `N` non-negative integers that specify the sizes of each dimension and where `N` corresponds to the number of dimensions.
+
+**singleton dimension**:
+a dimension whose size is one.
+
+**vector**:
+a one-dimensional array.
+
+* * *
+
+## Normative References
+
+The following referenced documents are indispensable for the application of this specification.
+
+- __IEEE 754-2019: IEEE Standard for Floating-Point Arithmetic.__ Institute of Electrical and Electronic Engineers, New York (2019).
+- Scott Bradner. 1997. "Key words for use in RFCs to Indicate Requirement Levels". RFC 2119. doi:[10.17487/rfc2119](https://tools.ietf.org/html/rfc2119).
diff --git a/spec/2022.12/usage_data.md b/spec/2022.12/usage_data.md
new file mode 100644
index 000000000..c2dcd5d65
--- /dev/null
+++ b/spec/2022.12/usage_data.md
@@ -0,0 +1,86 @@
+(usage-data)=
+
+# Usage Data
+
+> Summary of existing array API design and usage.
+
+## Introduction
+
+With rare exception, technical standardization ("standardization") occurs neither in a vacuum nor from first principles. Instead, standardization finds its origins in two or more, sometimes competing, implementations differing in design and behavior. These differences introduce friction as those (e.g., downstream end-users and library authors) who operate at higher levels of abstraction must either focus on an implementation subset (e.g., only NumPy-like array libraries) or accommodate variation through increased complexity (e.g., if NumPy array, call method `.foo()`; else if Dask array, call method `.bar()`).
+
+Standardization aspires to reduce this friction and is a process which codifies that which is common, while still encouraging experimentation and innovation. Through the process of standardization, implementations can align around a subset of established practices and channel development resources toward that which is new and novel. In short, standardization aims to thwart reinventing the proverbial wheel.
+
+A foundational step in standardization is articulating a subset of established practices and defining those practices in unambiguous terms. To this end, the standardization process must approach the problem from two directions: **design** and **usage**. The former direction seeks to understand
+
+- current implementation design (APIs, names, signatures, classes, and objects)
+- current implementation semantics (calling conventions and behavior)
+
+while the latter direction seeks to quantify API
+
+- consumers (e.g., which downstream libraries utilize an API?)
+- usage frequency (e.g., how often is an API consumed?)
+- consumption patterns (e.g., which optional arguments are provided and in what context?)
+
+By analyzing both design and usage, the standardization process grounds specification decisions in empirical data and analysis.
+
+## Design
+
+To understand API design, standardization follows the following process.
+
+- Identify a representative sample of commonly used Python array libraries (e.g., NumPy, Dask Array, CuPy, MXNet, JAX, TensorFlow, and PyTorch).
+- Acquire public APIs (e.g., by analyzing module exports and scraping public documentation).
+- Unify and standardize public API data representation for subsequent analysis.
+- Extract commonalities and differences by analyzing the intersection and complement of available APIs.
+- Derive a common API subset suitable for standardization (based on prevalence and ease of implementation), where such a subset may include attribute names, method names, and positional and keyword arguments.
+- Leverage usage data to validate API need and to inform naming conventions, supported data types, and/or optional arguments.
+- Summarize findings and provide tooling for additional analysis and exploration.
+
+See the [`array-api-comparison`](https://github.com/data-apis/array-api-comparison)
+repository for design data and summary analysis.
+
+## Usage
+
+To understand usage patterns, standardization follows the following process.
+
+- Identify a representative sample of commonly used Python libraries ("downstream libraries") which consume the subset of array libraries identified during design analysis (e.g., pandas, Matplotlib, SciPy, Xarray, scikit-learn, and scikit-image).
+- Instrument downstream libraries in order to record Python array API calls.
+- Collect traces while running downstream library test suites.
+- Transform trace data into structured data (e.g., as JSON) for subsequent analysis.
+- Generate empirical APIs based on provided arguments and associated types, noting which downstream library called which empirical API and at what frequency.
+- Derive a single inferred API which unifies the individual empirical API calling semantics.
+- Organize API results in human-readable form as type definition files.
+- Compare the inferred API to the documented API.
+
+The following is an inferred API for `numpy.arange`. The docstring includes the number of lines of code that invoked this function for each downstream library when running downstream library test suites.
+
+```python
+def arange(
+ _0: object,
+ /,
+ *_args: object,
+ dtype: Union[type, str, numpy.dtype, None] = ...,
+ step: Union[int, float] = ...,
+ stop: int = ...,
+):
+ """
+ usage.dask: 347
+ usage.matplotlib: 359
+ usage.pandas: 894
+ usage.sample-usage: 4
+ usage.scipy: 1173
+ usage.skimage: 174
+ usage.sklearn: 373
+ usage.xarray: 666
+ """
+ ...
+```
+
+See the [`python-record-api`](https://github.com/data-apis/python-record-api) repository for source code, usage data, and analysis. To perform a similar analysis on additional downstream libraries, including those not publicly released, see the published PyPI [package](https://pypi.org/project/record_api/).
+
+## Use in Decision-Making
+
+Design and usage data support specification decision-making in the following ways.
+
+- Validate user stories to ensure that proposals satisfy existing needs.
+- Define scope to ensure that proposals address general array library design requirements (i.e., proposals must have broad applicability and be possible to implement with a reasonable amount of effort).
+- Inform technical design discussions to ensure that proposals are grounded in empirical data.
diff --git a/spec/2022.12/use_cases.md b/spec/2022.12/use_cases.md
new file mode 100644
index 000000000..e24aa50db
--- /dev/null
+++ b/spec/2022.12/use_cases.md
@@ -0,0 +1,235 @@
+(use-cases)=
+
+# Use cases
+
+Use cases inform the requirements for, and design choices made in, this array
+API standard. This section first discusses what types of use cases are
+considered, and then works out a few concrete use cases in more detail.
+
+## Types of use cases
+
+- Packages that depend on a specific array library currently, and would like
+ to support multiple of them (e.g. for GPU or distributed array support, for
+ improved performance, or for reaching a wider user base).
+- Writing new libraries/tools that wrap multiple array libraries.
+- Projects that implement new types of arrays with, e.g., hardware-specific
+ optimizations or auto-parallelization behavior, and need an API to put on
+ top that is familiar to end users.
+- End users that want to switch from one library to another without learning
+ about all the small differences between those libraries.
+
+
+## Concrete use cases
+
+- {ref}`use-case-scipy`
+- {ref}`use-case-einops`
+- {ref}`use-case-xtensor`
+- {ref}`use-case-numba`
+
+
+(use-case-scipy)=
+
+### Use case 1: add hardware accelerator and distributed support to SciPy
+
+When surveying a representative set of advanced users and research software
+engineers in 2019 (for [this NSF proposal](https://figshare.com/articles/Mid-Scale_Research_Infrastructure_-_The_Scientific_Python_Ecosystem/8009441)),
+the single most common pain point brought up about SciPy was performance.
+
+SciPy heavily relies on NumPy (its only non-optional runtime dependency).
+NumPy provides an array implementation that's in-memory, CPU-only and
+single-threaded. Common performance-related wishes users have are:
+
+- parallel algorithms (can be multi-threaded or multiprocessing based)
+- support for distributed arrays (with Dask in particular)
+- support for GPUs and other hardware accelerators (shortened to just "GPU"
+ in the rest of this use case)
+
+Some parallelism can be supported in SciPy, it has a `workers` keyword
+(similar to scikit-learn's `n_jobs` keyword) that allows specifying to use
+parallelism in some algorithms. However SciPy itself will not directly start
+depending on a GPU or distributed array implementation, or contain (e.g.)
+CUDA code - that's not maintainable given the resources for development.
+_However_, there is a way to provide distributed or GPU support. Part of the
+solution is provided by NumPy's "array protocols" (see [gh-1](https://github.com/data-apis/array-api/issues/1)), that allow
+dispatching to other array implementations. The main problem then becomes how
+to know whether this will work with a particular distributed or GPU array
+implementation - given that there are zero other array implementations that
+are even close to providing full NumPy compatibility - without adding that
+array implementation as a dependency.
+
+It's clear that SciPy functionality that relies on compiled extensions (C,
+C++, Cython, Fortran) directly can't easily be run on another array library
+than NumPy (see [C API](design_topics/C_API.rst) for more details about this topic). Pure Python
+code can work though. There's two main possibilities:
+
+1. Testing with another package, manually or in CI, and simply provide a list
+ of functionality that is found to work. Then make ad-hoc fixes to expand
+ the set that works.
+2. Start relying on a well-defined subset of the NumPy API (or a new
+ NumPy-like API), for which compatibility is guaranteed.
+
+Option (2) seems strongly preferable, and that "well-defined subset" is _what
+an API standard should provide_. Testing will still be needed, to ensure there
+are no critical corner cases or bugs between array implementations, however
+that's then a very tractable task.
+
+As a concrete example, consider the spectral analysis functions in `scipy.signal`.
+All of those functions (e.g., `periodogram`, `spectrogram`, `csd`, `welch`, `stft`,
+`istft`) are pure Python - with the exception of `lombscargle` which is ~40
+lines of Cython - and uses NumPy function calls, array attributes and
+indexing. The beginning of each function could be changed to retrieve the
+module that implements the array API standard for the given input array type,
+and then functions from that module could be used instead of NumPy functions.
+
+If the user has another array type, say a CuPy or PyTorch array `x` on their
+GPU, doing:
+```
+from scipy import signal
+
+signal.welch(x)
+```
+will result in:
+```
+# For CuPy
+ValueError: object __array__ method not producing an array
+
+# For PyTorch
+TypeError: can't convert cuda:0 device type tensor to numpy.
+```
+and therefore the user will have to explicitly convert to and from a
+`numpy.ndarray` (which is quite inefficient):
+```
+# For CuPy
+x_np = cupy.asnumpy(x)
+freq, Pxx = (cupy.asarray(res) for res in signal.welch(x_np))
+
+# For PyTorch
+x_np = x.cpu().numpy()
+# Note: ends up with tensors on CPU, may still have to move them back
+freq, Pxx = (torch.tensor(res) for res in signal.welch(x_np))
+```
+This code will look a little different for each array library. The end goal
+here is to be able to write this instead as:
+```
+freq, Pxx = signal.welch(x)
+```
+and have `freq`, `Pxx` be arrays of the same type and on the same device as `x`.
+
+```{note}
+
+This type of use case applies to many other libraries, from scikit-learn
+and scikit-image to domain-specific libraries like AstroPy and
+scikit-bio, to code written for a single purpose or user.
+```
+
+(use-case-einops)=
+
+### Use case 2: simplify einops by removing the backend system
+
+[einops](https://github.com/arogozhnikov/einops) is a library that provides flexible tensor operations and supports many array libraries (NumPy, TensorFlow, PyTorch, CuPy, MXNet, JAX).
+Most of the code in `einops` is:
+
+- [einops.py](https://github.com/arogozhnikov/einops/blob/master/einops/einops.py)
+ contains the functions it offers as public API (`rearrange`, `reduce`, `repeat`).
+- [_backends.py](https://github.com/arogozhnikov/einops/blob/master/einops/_backends.py)
+ contains the glue code needed to support that many array libraries.
+
+The amount of code in each of those two files is almost the same (~550 LoC each).
+The typical pattern in `einops.py` is:
+```
+def some_func(x):
+ ...
+ backend = get_backend(x)
+ shape = backend.shape(x)
+ result = backend.reduce(x)
+ ...
+```
+With a standard array API, the `_backends.py` glue layer could almost completely disappear,
+because the purpose it serves (providing a unified interface to array operations from each
+of the supported backends) is already addressed by the array API standard.
+Hence the complete `einops` code base could be close to 50% smaller, and easier to maintain or add to.
+
+```{note}
+
+Other libraries that have a similar backend system to support many array libraries
+include [TensorLy](https://github.com/tensorly/tensorly), the (now discontinued)
+multi-backend version of [Keras](https://github.com/keras-team/keras),
+[Unumpy](https://github.com/Quansight-Labs/unumpy) and
+[EagerPy](https://github.com/jonasrauber/eagerpy). Many end users and
+organizations will also have such glue code - it tends to be needed whenever
+one tries to support multiple array types in a single API.
+```
+
+
+(use-case-xtensor)=
+
+### Use case 3: adding a Python API to xtensor
+
+[xtensor](https://github.com/xtensor-stack/xtensor) is a C++ array library
+that is NumPy-inspired and provides lazy arrays. It has Python (and Julia and R)
+bindings, however it does not have a Python array API.
+
+Xtensor aims to follow NumPy closely, however it only implements a subset of functionality
+and documents some API differences in
+[Notable differences with NumPy](https://xtensor.readthedocs.io/en/latest/numpy-differences.html).
+
+Note that other libraries document similar differences, see for example
+[this page for JAX](https://jax.readthedocs.io/en/latest/jax.numpy.html) and
+[this page for TensorFlow](https://www.tensorflow.org/guide/tf_numpy).
+
+Each time an array library author designs a new API, they have to choose (a)
+what subset of NumPy makes sense to implement, and (b) where to deviate
+because NumPy's API for a particular function is suboptimal or the semantics
+don't fit their execution model.
+
+This array API standard aims to provide an API that can be readily adopted,
+without having to make the above-mentioned choices.
+
+```{note}
+
+XND is another array library, written in C, that still needs a Python API.
+Array implementations in other languages are often in a similar situation,
+and could translate this array API standard 1:1 to their language.
+```
+
+
+(use-case-numba)=
+
+### Use case 4: make JIT compilation of array computations easier and more robust
+
+[Numba](https://github.com/numba/numba) is a Just-In-Time (JIT) compiler for
+numerical functions in Python; it is NumPy-aware. [PyPy](https://pypy.org)
+is an implementation of Python with a JIT at its core; its NumPy support relies
+on running NumPy itself through a compatibility layer (`cpyext`), while a
+previous attempt to implement NumPy support directly was unsuccessful.
+
+Other array libraries may have an internal JIT (e.g., TensorFlow, PyTorch,
+JAX, MXNet) or work with an external JIT like
+[XLA](https://www.tensorflow.org/xla) or [VTA](https://tvm.apache.org/docs/vta/index.html).
+
+Numba currently has to jump through some hoops to accommodate NumPy's casting rules
+and may not attain full compatibility with NumPy in some cases - see, e.g.,
+[this](https://github.com/numba/numba/issues/4749) or
+[this](https://github.com/numba/numba/issues/5907) example issue regarding (array) scalar
+return values.
+
+An [explicit suggestion from a Numba developer](https://twitter.com/esc___/status/1295389487485333505)
+for this array API standard was:
+
+> for JIT compilers (e.g. Numba) it will be important, that the type of the
+ returned value(s) depends only on the *types* of the input but not on the
+ *values*.
+
+A concrete goal for this use case is to have better matching between
+JIT-compiled and non-JIT execution. Here is an example from the Numba code
+base, the need for which should be avoided in the future:
+
+```
+def check(x, y):
+ got = cfunc(x, y)
+ np.testing.assert_array_almost_equal(got, pyfunc(x, y))
+ # Check the power operation conserved the input's dtype
+ # (this is different from Numpy, whose behaviour depends on
+ # the *values* of the arguments -- see PyArray_CanCastArrayTo).
+ self.assertEqual(got.dtype, x.dtype)
+```
diff --git a/spec/2022.12/verification_test_suite.md b/spec/2022.12/verification_test_suite.md
new file mode 100644
index 000000000..cbe770e48
--- /dev/null
+++ b/spec/2022.12/verification_test_suite.md
@@ -0,0 +1,62 @@
+# Verification - test suite
+
+## Measuring conformance
+
+In addition to the specification documents, a test suite is being developed to
+aid library developers check conformance to the spec. **NOTE: The test suite
+is still a work in progress.** It can be found at
+.
+
+It is important to note that while the aim of the array API test suite is to
+cover as much of the spec as possible, there are necessarily some aspects of
+the spec that are not covered by the test suite, typically because they are
+impossible to effectively test. Furthermore, if the test suite appears to
+diverge in any way from what the spec documents say, this should be considered
+a bug in the test suite. The specification is the ground source of truth.
+
+## Running the tests
+
+To run the tests, first clone the [test suite
+repo](https://github.com/data-apis/array-api-tests), and install the testing
+dependencies,
+
+ pip install pytest hypothesis
+
+or
+
+ conda install pytest hypothesis
+
+as well as the array libraries that you want to test. To run the tests, you
+need to specify the array library that is to be tested. There are two ways to
+do this. One way is to set the `ARRAY_API_TESTS_MODULE` environment variable.
+For example
+
+ ARRAY_API_TESTS_MODULE=numpy pytest
+
+Alternatively, edit the `array_api_tests/_array_module.py` file and change the
+line
+
+```py
+array_module = None
+```
+
+to
+
+```py
+import numpy as array_module
+```
+
+(replacing `numpy` with the array module namespace to be tested).
+
+In either case, the tests should be run with the `pytest` command.
+
+Aside from the two testing dependencies (`pytest` and `hypothesis`), the test
+suite has no dependencies. In particular, it does not depend on any specific
+array libraries such as NumPy. All tests are run using only the array library
+that is being tested, comparing results against the behavior as defined in the
+spec. The test suite is designed to be standalone so that it can easily be vendored.
+
+See the
+[README](https://github.com/data-apis/array-api-tests/blob/master/README.md)
+in the test suite repo for more information about how to run and interpret the
+test suite results.
diff --git a/spec/2023.12/API_specification/array_object.rst b/spec/2023.12/API_specification/array_object.rst
new file mode 100644
index 000000000..6a41d4016
--- /dev/null
+++ b/spec/2023.12/API_specification/array_object.rst
@@ -0,0 +1,308 @@
+.. _array-object:
+
+Array object
+============
+
+ Array API specification for array object attributes and methods.
+
+A conforming implementation of the array API standard must provide and support an array object having the following attributes and methods.
+
+Furthermore, a conforming implementation of the array API standard must support, at minimum, array objects of rank (i.e., number of dimensions) ``0``, ``1``, ``2``, ``3``, and ``4`` and must explicitly document their maximum supported rank ``N``.
+
+.. note::
+ Conforming implementations must support zero-dimensional arrays.
+
+ Apart from array object attributes, such as ``ndim``, ``device``, and ``dtype``, all operations in this standard return arrays (or tuples of arrays), including those operations, such as ``mean``, ``var``, and ``std``, from which some common array libraries (e.g., NumPy) return scalar values.
+
+ *Rationale: always returning arrays is necessary to (1) support accelerator libraries where non-array return values could force device synchronization and (2) support delayed execution models where an array represents a future value.*
+
+-------------------------------------------------
+
+.. _operators:
+
+Operators
+---------
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python operators.
+
+Arithmetic Operators
+~~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python arithmetic operators.
+
+- ``+x``: :meth:`.array.__pos__`
+
+ - `operator.pos(x) `_
+ - `operator.__pos__(x) `_
+
+- `-x`: :meth:`.array.__neg__`
+
+ - `operator.neg(x) `_
+ - `operator.__neg__(x) `_
+
+- `x1 + x2`: :meth:`.array.__add__`
+
+ - `operator.add(x1, x2) `_
+ - `operator.__add__(x1, x2) `_
+
+- `x1 - x2`: :meth:`.array.__sub__`
+
+ - `operator.sub(x1, x2) `_
+ - `operator.__sub__(x1, x2) `_
+
+- `x1 * x2`: :meth:`.array.__mul__`
+
+ - `operator.mul(x1, x2) `_
+ - `operator.__mul__(x1, x2) `_
+
+- `x1 / x2`: :meth:`.array.__truediv__`
+
+ - `operator.truediv(x1,x2) `_
+ - `operator.__truediv__(x1, x2) `_
+
+- `x1 // x2`: :meth:`.array.__floordiv__`
+
+ - `operator.floordiv(x1, x2) `_
+ - `operator.__floordiv__(x1, x2) `_
+
+- `x1 % x2`: :meth:`.array.__mod__`
+
+ - `operator.mod(x1, x2) `_
+ - `operator.__mod__(x1, x2) `_
+
+- `x1 ** x2`: :meth:`.array.__pow__`
+
+ - `operator.pow(x1, x2) `_
+ - `operator.__pow__(x1, x2) `_
+
+Arithmetic operators should be defined for arrays having real-valued data types.
+
+Array Operators
+~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python array operators.
+
+- `x1 @ x2`: :meth:`.array.__matmul__`
+
+ - `operator.matmul(x1, x2) `_
+ - `operator.__matmul__(x1, x2) `_
+
+The matmul ``@`` operator should be defined for arrays having numeric data types.
+
+Bitwise Operators
+~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python bitwise operators.
+
+- `~x`: :meth:`.array.__invert__`
+
+ - `operator.inv(x) `_
+ - `operator.invert(x) `_
+ - `operator.__inv__(x) `_
+ - `operator.__invert__(x) `_
+
+- `x1 & x2`: :meth:`.array.__and__`
+
+ - `operator.and(x1, x2) `_
+ - `operator.__and__(x1, x2) `_
+
+- `x1 | x2`: :meth:`.array.__or__`
+
+ - `operator.or(x1, x2) `_
+ - `operator.__or__(x1, x2) `_
+
+- `x1 ^ x2`: :meth:`.array.__xor__`
+
+ - `operator.xor(x1, x2) `_
+ - `operator.__xor__(x1, x2) `_
+
+- `x1 << x2`: :meth:`.array.__lshift__`
+
+ - `operator.lshift(x1, x2) `_
+ - `operator.__lshift__(x1, x2) `_
+
+- `x1 >> x2`: :meth:`.array.__rshift__`
+
+ - `operator.rshift(x1, x2) `_
+ - `operator.__rshift__(x1, x2) `_
+
+Bitwise operators should be defined for arrays having integer and boolean data types.
+
+Comparison Operators
+~~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python comparison operators.
+
+- `x1 < x2`: :meth:`.array.__lt__`
+
+ - `operator.lt(x1, x2) `_
+ - `operator.__lt__(x1, x2) `_
+
+- `x1 <= x2`: :meth:`.array.__le__`
+
+ - `operator.le(x1, x2) `_
+ - `operator.__le__(x1, x2) `_
+
+- `x1 > x2`: :meth:`.array.__gt__`
+
+ - `operator.gt(x1, x2) `_
+ - `operator.__gt__(x1, x2) `_
+
+- `x1 >= x2`: :meth:`.array.__ge__`
+
+ - `operator.ge(x1, x2) `_
+ - `operator.__ge__(x1, x2) `_
+
+- `x1 == x2`: :meth:`.array.__eq__`
+
+ - `operator.eq(x1, x2) `_
+ - `operator.__eq__(x1, x2) `_
+
+- `x1 != x2`: :meth:`.array.__ne__`
+
+ - `operator.ne(x1, x2) `_
+ - `operator.__ne__(x1, x2) `_
+
+:meth:`.array.__lt__`, :meth:`.array.__le__`, :meth:`.array.__gt__`, :meth:`.array.__ge__` are only defined for arrays having real-valued data types. Other comparison operators should be defined for arrays having any data type.
+For backward compatibility, conforming implementations may support complex numbers; however, inequality comparison of complex numbers is unspecified and thus implementation-dependent (see :ref:`complex-number-ordering`).
+
+In-place Operators
+~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following in-place Python operators.
+
+An in-place operation must not change the data type or shape of the in-place array as a result of :ref:`type-promotion` or :ref:`broadcasting`.
+
+An in-place operation must have the same behavior (including special cases) as its respective binary (i.e., two operand, non-assignment) operation. For example, after in-place addition ``x1 += x2``, the modified array ``x1`` must always equal the result of the equivalent binary arithmetic operation ``x1 = x1 + x2``.
+
+.. note::
+ In-place operators must be supported as discussed in :ref:`copyview-mutability`.
+
+Arithmetic Operators
+""""""""""""""""""""
+
+- ``+=``. May be implemented via ``__iadd__``.
+- ``-=``. May be implemented via ``__isub__``.
+- ``*=``. May be implemented via ``__imul__``.
+- ``/=``. May be implemented via ``__itruediv__``.
+- ``//=``. May be implemented via ``__ifloordiv__``.
+- ``**=``. May be implemented via ``__ipow__``.
+- ``%=``. May be implemented via ``__imod__``.
+
+Array Operators
+"""""""""""""""
+
+- ``@=``. May be implemented via ``__imatmul__``.
+
+Bitwise Operators
+"""""""""""""""""
+
+- ``&=``. May be implemented via ``__iand__``.
+- ``|=``. May be implemented via ``__ior__``.
+- ``^=``. May be implemented via ``__ixor__``.
+- ``<<=``. May be implemented via ``__ilshift__``.
+- ``>>=``. May be implemented via ``__irshift__``.
+
+Reflected Operators
+~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following reflected operators.
+
+The results of applying reflected operators must match their non-reflected equivalents.
+
+.. note::
+ All operators for which ``array scalar`` is implemented must have an equivalent reflected operator implementation.
+
+Arithmetic Operators
+""""""""""""""""""""
+
+- ``__radd__``
+- ``__rsub__``
+- ``__rmul__``
+- ``__rtruediv__``
+- ``__rfloordiv__``
+- ``__rpow__``
+- ``__rmod__``
+
+Array Operators
+"""""""""""""""
+
+- ``__rmatmul__``
+
+Bitwise Operators
+"""""""""""""""""
+
+- ``__rand__``
+- ``__ror__``
+- ``__rxor__``
+- ``__rlshift__``
+- ``__rrshift__``
+
+-------------------------------------------------
+
+.. currentmodule:: array_api
+
+Attributes
+----------
+..
+ NOTE: please keep the attributes in alphabetical order
+
+
+.. autosummary::
+ :toctree: generated
+ :template: property.rst
+
+ array.dtype
+ array.device
+ array.mT
+ array.ndim
+ array.shape
+ array.size
+ array.T
+
+-------------------------------------------------
+
+Methods
+-------
+..
+ NOTE: please keep the methods in alphabetical order
+
+
+.. autosummary::
+ :toctree: generated
+ :template: property.rst
+
+ array.__abs__
+ array.__add__
+ array.__and__
+ array.__array_namespace__
+ array.__bool__
+ array.__complex__
+ array.__dlpack__
+ array.__dlpack_device__
+ array.__eq__
+ array.__float__
+ array.__floordiv__
+ array.__ge__
+ array.__getitem__
+ array.__gt__
+ array.__index__
+ array.__int__
+ array.__invert__
+ array.__le__
+ array.__lshift__
+ array.__lt__
+ array.__matmul__
+ array.__mod__
+ array.__mul__
+ array.__ne__
+ array.__neg__
+ array.__or__
+ array.__pos__
+ array.__pow__
+ array.__rshift__
+ array.__setitem__
+ array.__sub__
+ array.__truediv__
+ array.__xor__
+ array.to_device
diff --git a/spec/2023.12/API_specification/broadcasting.rst b/spec/2023.12/API_specification/broadcasting.rst
new file mode 100644
index 000000000..abb3ed222
--- /dev/null
+++ b/spec/2023.12/API_specification/broadcasting.rst
@@ -0,0 +1,128 @@
+.. _broadcasting:
+
+Broadcasting
+============
+
+ Array API specification for broadcasting semantics.
+
+Overview
+--------
+
+**Broadcasting** refers to the automatic (implicit) expansion of array dimensions to be of equal sizes without copying array data for the purpose of making arrays with different shapes have compatible shapes for element-wise operations.
+
+Broadcasting facilitates user ergonomics by encouraging users to avoid unnecessary copying of array data and can **potentially** enable more memory-efficient element-wise operations through vectorization, reduced memory consumption, and cache locality.
+
+Algorithm
+---------
+
+Given an element-wise operation involving two compatible arrays, an array having a singleton dimension (i.e., a dimension whose size is one) is broadcast (i.e., virtually repeated) across an array having a corresponding non-singleton dimension.
+
+If two arrays are of unequal rank, the array having a lower rank is promoted to a higher rank by (virtually) prepending singleton dimensions until the number of dimensions matches that of the array having a higher rank.
+
+The results of the element-wise operation must be stored in an array having a shape determined by the following algorithm.
+
+#. Let ``A`` and ``B`` both be arrays.
+
+#. Let ``shape1`` be a tuple describing the shape of array ``A``.
+
+#. Let ``shape2`` be a tuple describing the shape of array ``B``.
+
+#. Let ``N1`` be the number of dimensions of array ``A`` (i.e., the result of ``len(shape1)``).
+
+#. Let ``N2`` be the number of dimensions of array ``B`` (i.e., the result of ``len(shape2)``).
+
+#. Let ``N`` be the maximum value of ``N1`` and ``N2`` (i.e., the result of ``max(N1, N2)``).
+
+#. Let ``shape`` be a temporary list of length ``N`` for storing the shape of the result array.
+
+#. Let ``i`` be ``N-1``.
+
+#. Repeat, while ``i >= 0``
+
+ #. Let ``n1`` be ``N1 - N + i``.
+
+ #. If ``n1 >= 0``, let ``d1`` be the size of dimension ``n1`` for array ``A`` (i.e., the result of ``shape1[n1]``); else, let ``d1`` be ``1``.
+
+ #. Let ``n2`` be ``N2 - N + i``.
+
+ #. If ``n2 >= 0``, let ``d2`` be the size of dimension ``n2`` for array ``B`` (i.e., the result of ``shape2[n2]``); else, let ``d2`` be ``1``.
+
+ #. If ``d1 == 1``, then set the ``i``\th element of ``shape`` to ``d2``.
+
+ #. Else, if ``d2 == 1``, then
+
+ - set the ``i``\th element of ``shape`` to ``d1``.
+
+ #. Else, if ``d1 == d2``, then
+
+ - set the ``i``\th element of ``shape`` to ``d1``.
+
+ #. Else, throw an exception.
+
+ #. Set ``i`` to ``i-1``.
+
+#. Let ``tuple(shape)`` be the shape of the result array.
+
+Examples
+~~~~~~~~
+
+The following examples demonstrate the application of the broadcasting algorithm for two compatible arrays.
+
+::
+
+ A (4d array): 8 x 1 x 6 x 1
+ B (3d array): 7 x 1 x 5
+ ---------------------------------
+ Result (4d array): 8 x 7 x 6 x 5
+ A (2d array): 5 x 4
+ B (1d array): 1
+ -------------------------
+ Result (2d array): 5 x 4
+ A (2d array): 5 x 4
+ B (1d array): 4
+ -------------------------
+ Result (2d array): 5 x 4
+ A (3d array): 15 x 3 x 5
+ B (3d array): 15 x 1 x 5
+ ------------------------------
+ Result (3d array): 15 x 3 x 5
+ A (3d array): 15 x 3 x 5
+ B (2d array): 3 x 5
+ ------------------------------
+ Result (3d array): 15 x 3 x 5
+ A (3d array): 15 x 3 x 5
+ B (2d array): 3 x 1
+ ------------------------------
+ Result (3d array): 15 x 3 x 5
+
+
+The following examples demonstrate array shapes which do **not** broadcast.
+
+::
+
+ A (1d array): 3
+ B (1d array): 4 # dimension does not match
+
+ A (2d array): 2 x 1
+ B (3d array): 8 x 4 x 3 # second dimension does not match
+
+ A (3d array): 15 x 3 x 5
+ B (2d array): 15 x 3 # singleton dimensions can only be prepended, not appended
+
+In-place Semantics
+------------------
+
+As implied by the broadcasting algorithm, in-place element-wise operations (including ``__setitem__``) must not change the shape of the in-place array as a result of broadcasting. Such operations should only be supported in the case where the right-hand operand can broadcast to the shape of the left-hand operand, after any indexing operations are performed.
+
+For example:
+
+::
+
+ x = empty((2, 3, 4))
+ a = empty((1, 3, 4))
+
+ # This is OK. The shape of a, (1, 3, 4), can broadcast to the shape of x[...], (2, 3, 4)
+ x[...] = a
+
+ # This is not allowed. The shape of a, (1, 3, 4), can NOT broadcast to the shape of x[1, ...], (3, 4)
+ x[1, ...] = a
diff --git a/spec/2023.12/API_specification/constants.rst b/spec/2023.12/API_specification/constants.rst
new file mode 100644
index 000000000..71cb8688d
--- /dev/null
+++ b/spec/2023.12/API_specification/constants.rst
@@ -0,0 +1,26 @@
+Constants
+=========
+
+ Array API specification for constants.
+
+A conforming implementation of the array API standard must provide and support the following constants adhering to the following conventions.
+
+- Each constant must have a Python floating-point data type (i.e., ``float``) and be provided as a Python scalar value.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api.constants
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: attribute.rst
+
+ e
+ inf
+ nan
+ newaxis
+ pi
diff --git a/spec/2023.12/API_specification/creation_functions.rst b/spec/2023.12/API_specification/creation_functions.rst
new file mode 100644
index 000000000..ff5c06368
--- /dev/null
+++ b/spec/2023.12/API_specification/creation_functions.rst
@@ -0,0 +1,36 @@
+Creation Functions
+==================
+
+ Array API specification for creating arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ arange
+ asarray
+ empty
+ empty_like
+ eye
+ from_dlpack
+ full
+ full_like
+ linspace
+ meshgrid
+ ones
+ ones_like
+ tril
+ triu
+ zeros
+ zeros_like
diff --git a/spec/2023.12/API_specification/data_type_functions.rst b/spec/2023.12/API_specification/data_type_functions.rst
new file mode 100644
index 000000000..d42968c7b
--- /dev/null
+++ b/spec/2023.12/API_specification/data_type_functions.rst
@@ -0,0 +1,26 @@
+Data Type Functions
+===================
+
+ Array API specification for data type functions.
+
+A conforming implementation of the array API standard must provide and support the following data type functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ astype
+ can_cast
+ finfo
+ iinfo
+ isdtype
+ result_type
diff --git a/spec/2023.12/API_specification/data_types.rst b/spec/2023.12/API_specification/data_types.rst
new file mode 100644
index 000000000..5987dd322
--- /dev/null
+++ b/spec/2023.12/API_specification/data_types.rst
@@ -0,0 +1,143 @@
+.. _data-types:
+
+Data Types
+==========
+
+ Array API specification for supported data types.
+
+A conforming implementation of the array API standard must provide and support
+the following data types ("dtypes") in its array object, and as data type
+objects in its main namespace under the specified names:
+
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| dtype object | description |
++==============+============================================================================================================================================================================================+
+| bool | Boolean (``True`` or ``False``). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| int8 | An 8-bit signed integer whose values exist on the interval ``[-128, +127]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| int16 | A 16-bit signed integer whose values exist on the interval ``[β32,767, +32,767]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| int32 | A 32-bit signed integer whose values exist on the interval ``[β2,147,483,647, +2,147,483,647]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| int64 | A 64-bit signed integer whose values exist on the interval ``[β9,223,372,036,854,775,807, +9,223,372,036,854,775,807]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| uint8 | An 8-bit unsigned integer whose values exist on the interval ``[0, +255]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| uint16 | A 16-bit unsigned integer whose values exist on the interval ``[0, +65,535]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| uint32 | A 32-bit unsigned integer whose values exist on the interval ``[0, +4,294,967,295]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| uint64 | A 64-bit unsigned integer whose values exist on the interval ``[0, +18,446,744,073,709,551,615]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| float32 | IEEE 754 single-precision (32-bit) binary floating-point number (see IEEE 754-2019). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| float64 | IEEE 754 double-precision (64-bit) binary floating-point number (see IEEE 754-2019). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| complex64 | Single-precision (64-bit) complex floating-point number whose real and imaginary components must be IEEE 754 single-precision (32-bit) binary floating-point numbers (see IEEE 754-2019). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| complex128 | Double-precision (128-bit) complex floating-point number whose real and imaginary components must be IEEE 754 double-precision (64-bit) binary floating-point numbers (see IEEE 754-2019). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+Data type objects must have the following methods (no attributes are required):
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. currentmodule:: array_api.data_types
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ __eq__
+
+
+.. note::
+ A conforming implementation of the array API standard may provide and
+ support additional data types beyond those described in this specification.
+ It may also support additional methods and attributes on dtype objects.
+
+.. note::
+ IEEE 754-2019 requires support for subnormal (a.k.a., denormal) numbers, which are useful for supporting gradual underflow. However, hardware support for subnormal numbers is not universal, and many platforms (e.g., accelerators) and compilers support toggling denormals-are-zero (DAZ) and/or flush-to-zero (FTZ) behavior to increase performance and to guard against timing attacks.
+
+ Accordingly, subnormal behavior is left unspecified and, thus, implementation-defined. Conforming implementations may vary in their support for subnormal numbers.
+
+
+Use of data type objects
+------------------------
+
+Data type objects are used as ``dtype`` specifiers in functions and methods
+(e.g., ``zeros((2, 3), dtype=float32)``), accessible as ``.dtype`` attribute on
+arrays, and used in various casting and introspection functions (e.g.,
+``isdtype(x.dtype, 'integral')``).
+
+``dtype`` keywords in functions specify the data type of arrays returned from
+functions or methods. ``dtype`` keywords are not required to affect the data
+type used for intermediate calculations or results (e.g., implementors are free
+to use a higher-precision data type when accumulating values for reductions, as
+long as the returned array has the specified data type).
+
+.. note::
+ Implementations may provide other ways to specify data types (e.g., ``zeros((2, 3), dtype='f4')``) which are not described in this specification; however, in order to ensure portability, array library consumers are recommended to use data type objects as provided by specification conforming array libraries.
+
+See :ref:`type-promotion` for specification guidance describing the rules governing the interaction of two or more data types or data type objects.
+
+
+.. _data-type-defaults:
+
+Default Data Types
+------------------
+
+A conforming implementation of the array API standard must define the following default data types.
+
+- a default real-valued floating-point data type (either ``float32`` or ``float64``).
+- a default complex floating-point data type (either ``complex64`` or ``complex128``).
+- a default integer data type (either ``int32`` or ``int64``).
+- a default array index data type (either ``int32`` or ``int64``).
+
+The default real-valued floating-point and complex floating-point data types must be the same across platforms.
+
+The default complex floating-point point data type should match the default real-valued floating-point data type. For example, if the default real-valued floating-point data type is ``float32``, the default complex floating-point data type must be ``complex64``. If the default real-valued floating-point data type is ``float64``, the default complex floating-point data type must be ``complex128``.
+
+The default integer data type should be the same across platforms, but the default may vary depending on whether Python is 32-bit or 64-bit.
+
+The default array index data type may be ``int32`` on 32-bit platforms, but the default should be ``int64`` otherwise.
+
+Note that it is possible that a library supports multiple devices, with not all
+those device types supporting the same data types. In this case, the default
+integer or floating-point data types may vary with device. If that is the case,
+the library should clearly warn about this in its documentation.
+
+.. note::
+ The default data types should be clearly defined in a conforming library's documentation.
+
+
+.. _data-type-categories:
+
+Data Type Categories
+--------------------
+
+For the purpose of organizing functions within this specification, the following data type categories are defined.
+
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| data type category | dtypes |
++============================+========================================================================================================================================================+
+| Numeric | ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``uint16``, ``uint32``, ``uint64``, ``float32``, ``float64``, ``complex64``, and ``complex128``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Real-valued | ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``uint16``, ``uint32``, ``uint64``, ``float32``, and ``float64``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Integer | ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``uint16``, ``uint32``, and ``uint64``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Floating-point | ``float32``, ``float64``, ``complex64``, and ``complex128``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Real-valued floating-point | ``float32`` and ``float64``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Complex floating-point | ``complex64`` and ``complex128``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Boolean | ``bool``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+
+.. note::
+ Conforming libraries are not required to organize data types according to these categories. These categories are only intended for use within this specification.
diff --git a/spec/2023.12/API_specification/elementwise_functions.rst b/spec/2023.12/API_specification/elementwise_functions.rst
new file mode 100644
index 000000000..4919cff98
--- /dev/null
+++ b/spec/2023.12/API_specification/elementwise_functions.rst
@@ -0,0 +1,84 @@
+.. _element-wise-functions:
+
+Element-wise Functions
+======================
+
+ Array API specification for element-wise functions.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ abs
+ acos
+ acosh
+ add
+ asin
+ asinh
+ atan
+ atan2
+ atanh
+ bitwise_and
+ bitwise_left_shift
+ bitwise_invert
+ bitwise_or
+ bitwise_right_shift
+ bitwise_xor
+ ceil
+ clip
+ conj
+ copysign
+ cos
+ cosh
+ divide
+ equal
+ exp
+ expm1
+ floor
+ floor_divide
+ greater
+ greater_equal
+ hypot
+ imag
+ isfinite
+ isinf
+ isnan
+ less
+ less_equal
+ log
+ log1p
+ log2
+ log10
+ logaddexp
+ logical_and
+ logical_not
+ logical_or
+ logical_xor
+ maximum
+ minimum
+ multiply
+ negative
+ not_equal
+ positive
+ pow
+ real
+ remainder
+ round
+ sign
+ signbit
+ sin
+ sinh
+ square
+ sqrt
+ subtract
+ tan
+ tanh
+ trunc
diff --git a/spec/2023.12/API_specification/function_and_method_signatures.rst b/spec/2023.12/API_specification/function_and_method_signatures.rst
new file mode 100644
index 000000000..0eca2ac69
--- /dev/null
+++ b/spec/2023.12/API_specification/function_and_method_signatures.rst
@@ -0,0 +1,63 @@
+.. _function-and-method-signatures:
+
+Function and method signatures
+==============================
+
+Function signatures in this standard adhere to the following:
+
+1. Positional parameters should be `positional-only `_ parameters.
+ Positional-only parameters have no externally-usable name. When a function
+ accepting positional-only parameters is called, positional arguments are
+ mapped to these parameters based solely on their order.
+
+ *Rationale: existing libraries have incompatible conventions, and using names
+ of positional parameters is not normal/recommended practice.*
+
+ .. note::
+
+ Positional-only parameters are only available in Python >= 3.8. Libraries
+ still supporting 3.7 or 3.6 may consider making the API standard-compliant
+ namespace >= 3.8. Alternatively, they can add guidance to their users in the
+ documentation to use the functions as if they were positional-only.
+
+2. Optional parameters should be `keyword-only `_ arguments.
+
+ *Rationale: this leads to more readable code, and it makes it easier to
+ evolve an API over time by adding keywords without having to worry about
+ keyword order.*
+
+3. For functions that have a single positional array parameter, that parameter
+ is called ``x``. For functions that have multiple array parameters, those
+ parameters are called ``xi`` with ``i = 1, 2, ...`` (i.e., ``x1``, ``x2``).
+
+4. Signatures include type annotations. The type annotations are also added to
+ individual parameter and return value descriptions. For code which aims to
+ adhere to the standard, adding type annotations is strongly recommended.
+
+A function signature and description will look like:
+
+::
+
+ funcname(x1, x2, /, *, key1=-1, key2=None) -> out:
+ Parameters
+
+ x1 : array
+ description
+ x2 : array
+ description
+ key1 : int
+ description
+ key2 : Optional[str]
+ description
+
+ Returns
+
+ out : array
+ description
+
+
+Method signatures will follow the same conventions modulo the addition of ``self``.
+
+Note that there are a few exceptions to rules (1) and (2), in cases where
+it enhances readability or use of the non-default form of the parameter in
+question is commonly used in code written for existing array libraries.
diff --git a/spec/2023.12/API_specification/index.rst b/spec/2023.12/API_specification/index.rst
new file mode 100644
index 000000000..ffc3d3775
--- /dev/null
+++ b/spec/2023.12/API_specification/index.rst
@@ -0,0 +1,41 @@
+.. _api-specification:
+
+API specification
+=================
+
+A conforming implementation of the array API standard must provide and support the APIs and behavior detailed in this specification while adhering to the following conventions.
+
+- When a function signature includes a `/`, positional parameters must be `positional-only `_ parameters. See :ref:`function-and-method-signatures`.
+- When a function signature includes a `*`, optional parameters must be `keyword-only `_ arguments. See :ref:`function-and-method-signatures`.
+- Broadcasting semantics must follow the semantics defined in :ref:`broadcasting`.
+- Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+- Functions may only be required for a subset of input data types. Libraries may choose to implement functions for additional data types, but that behavior is not required by the specification. See :ref:`data-type-categories`.
+- Unless stated otherwise, functions must adhere to the type promotion rules defined in :ref:`type-promotion`.
+- Unless stated otherwise, floating-point operations must adhere to IEEE 754-2019.
+- Unless stated otherwise, element-wise mathematical functions must satisfy the minimum accuracy requirements defined in :ref:`accuracy`.
+
+
+.. toctree::
+ :caption: API specification
+ :maxdepth: 3
+
+ array_object
+ broadcasting
+ constants
+ creation_functions
+ data_type_functions
+ data_types
+ elementwise_functions
+ function_and_method_signatures
+ indexing
+ indexing_functions
+ inspection
+ linear_algebra_functions
+ manipulation_functions
+ searching_functions
+ set_functions
+ sorting_functions
+ statistical_functions
+ type_promotion
+ utility_functions
+ version
diff --git a/spec/2023.12/API_specification/indexing.rst b/spec/2023.12/API_specification/indexing.rst
new file mode 100644
index 000000000..eb61c26d5
--- /dev/null
+++ b/spec/2023.12/API_specification/indexing.rst
@@ -0,0 +1,208 @@
+.. _indexing:
+
+Indexing
+========
+
+ Array API specification for indexing arrays.
+
+A conforming implementation of the array API standard must adhere to the following conventions.
+
+Single-axis Indexing
+--------------------
+
+To index a single array axis, an array must support standard Python indexing rules. Let ``n`` be the axis (dimension) size.
+
+- An integer index must be an object satisfying `operator.index `_ (e.g., ``int``).
+
+- Nonnegative indices must start at ``0`` (i.e., zero-based indexing).
+
+- **Valid** nonnegative indices must reside on the half-open interval ``[0, n)``.
+
+ .. note::
+ This specification does not require bounds checking. The behavior for out-of-bounds integer indices is left unspecified.
+
+- Negative indices must count backward from the last array index, starting from ``-1`` (i.e., negative-one-based indexing, where ``-1`` refers to the last array index).
+
+ .. note::
+ A negative index ``j`` is equivalent to ``n-j``; the former is syntactic sugar for the latter, providing a shorthand for indexing elements that would otherwise need to be specified in terms of the axis (dimension) size.
+
+- **Valid** negative indices must reside on the closed interval ``[-n, -1]``.
+
+ .. note::
+ This specification does not require bounds checking. The behavior for out-of-bounds integer indices is left unspecified.
+
+- A negative index ``j`` is related to a zero-based nonnegative index ``i`` via ``i = n+j``.
+
+- Colons ``:`` must be used for `slices `_: ``start:stop:step``, where ``start`` is inclusive and ``stop`` is exclusive.
+
+ .. note::
+ The specification does not support returning scalar (i.e., non-array) values from operations, including indexing. In contrast to standard Python indexing rules, for any index, or combination of indices, which select a single value, the result must be a zero-dimensional array containing the selected value.
+
+Slice Syntax
+~~~~~~~~~~~~
+
+The basic slice syntax is ``i:j:k`` where ``i`` is the starting index, ``j`` is the stopping index, and ``k`` is the step (``k != 0``). A slice may contain either one or two colons, with either an integer value or nothing on either side of each colon. The following are valid slices.
+
+::
+
+ A[:]
+ A[i:]
+ A[:j]
+ A[i:k]
+ A[::]
+ A[i::]
+ A[:j:]
+ A[::k]
+ A[i:j:]
+ A[i::k]
+ A[:j:k]
+ A[i::k]
+ A[i:j:k]
+
+.. note::
+ Slice syntax can be equivalently achieved using the Python built-in `slice() `_ API. From the perspective of ``A``, the behavior of ``A[i:j:k]`` and ``A[slice(i, j, k)]`` is indistinguishable (i.e., both retrieve the same set of items from ``__getitem__``).
+
+Using a slice to index a single array axis must select ``m`` elements with index values
+
+::
+
+ i, i+k, i+2k, i+3k, ..., i+(m-1)k
+
+where
+
+::
+
+ m = q + r
+
+and ``q`` and ``r`` (``r != 0``) are the quotient and remainder obtained by dividing ``j-i`` by ``k``
+
+::
+
+ j - i = qk + r
+
+such that
+
+::
+
+ j > i + (m-1)k
+
+.. note::
+ For ``i`` on the interval ``[0, n)`` (where ``n`` is the axis size), ``j`` on the interval ``(0, n]``, ``i`` less than ``j``, and positive step ``k``, a starting index ``i`` is **always** included, while the stopping index ``j`` is **always** excluded. This preserves ``x[:i]+x[i:]`` always being equal to ``x``.
+
+.. note::
+ Using a slice to index into a single array axis should select the same elements as using a slice to index a Python list of the same size.
+
+Slice syntax must have the following defaults. Let ``n`` be the axis (dimension) size.
+
+- If ``k`` is not provided (e.g., ``0:10``), ``k`` must equal ``1``.
+- If ``k`` is greater than ``0`` and ``i`` is not provided (e.g., ``:10:2``), ``i`` must equal ``0``.
+- If ``k`` is greater than ``0`` and ``j`` is not provided (e.g., ``0::2``), ``j`` must equal ``n``.
+- If ``k`` is less than ``0`` and ``i`` is not provided (e.g., ``:10:-2``), ``i`` must equal ``n-1``.
+- If ``k`` is less than ``0`` and ``j`` is not provided (e.g., ``0::-2``), ``j`` must equal ``-n-1``.
+
+Using a slice to index a single array axis must adhere to the following rules. Let ``n`` be the axis (dimension) size.
+
+- If ``i`` equals ``j``, a slice must return an empty array, whose axis (dimension) size along the indexed axis is ``0``.
+
+- Indexing via ``:`` and ``::`` must be equivalent and have defaults derived from the rules above. Both ``:`` and ``::`` indicate to select all elements along a single axis (dimension).
+
+ .. note::
+ This specification does not require "clipping" out-of-bounds slice indices. This is in contrast to Python slice semantics where ``0:100`` and ``0:10`` are equivalent on a list of length ``10``.
+
+The following ranges for the start and stop values of a slice must be supported. Let ``n`` be the axis (dimension) size being sliced. For a slice ``i:j:k``, the behavior specified above should be implemented for the following:
+
+- ``i`` or ``j`` omitted (``None``).
+- ``-n <= i <= n``.
+- For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
+- For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
+
+The behavior outside of these bounds is unspecified.
+
+.. note::
+ *Rationale: this is consistent with bounds checking for integer indexing; the behavior of out-of-bounds indices is left unspecified. Implementations may choose to clip (consistent with Python* ``list`` *slicing semantics), raise an exception, return junk values, or some other behavior depending on device requirements and performance considerations.*
+
+Multi-axis Indexing
+-------------------
+
+Multi-dimensional arrays must extend the concept of single-axis indexing to multiple axes by applying single-axis indexing rules along each axis (dimension) and supporting the following additional rules. Let ``N`` be the number of dimensions ("rank") of a multi-dimensional array ``A``.
+
+- Each axis may be independently indexed via single-axis indexing by providing a comma-separated sequence ("selection tuple") of single-axis indexing expressions (e.g., ``A[:, 2:10, :, 5]``).
+
+ .. note::
+ In Python, ``A[(exp1, exp2, ..., expN)]`` is equivalent to ``A[exp1, exp2, ..., expN]``; the latter is syntactic sugar for the former.
+
+ Accordingly, if ``A`` has rank ``1``, then ``A[(2:10,)]`` must be equivalent to ``A[2:10]``. If ``A`` has rank ``2``, then ``A[(2:10, :)]`` must be equivalent to ``A[2:10, :]``. And so on and so forth.
+
+- Providing a single nonnegative integer ``i`` as a single-axis index must index the same elements as the slice ``i:i+1``.
+
+- Providing a single negative integer ``i`` as a single-axis index must index the same elements as the slice ``n+i:n+i+1``, where ``n`` is the axis (dimension) size.
+
+- Providing a single integer as a single-axis index must reduce the number of array dimensions by ``1`` (i.e., the array rank must decrease by one; if ``A`` has rank ``2``, ``rank(A)-1 == rank(A[0, :])``). In particular, a selection tuple with the ``m``\th element an integer (and all other entries ``:``) indexes a sub-array with rank ``N-1``.
+
+ .. note::
+ When providing a single integer as a single-axis index to an array of rank ``1``, the result should be an array of rank ``0``, not a NumPy scalar. Note that this behavior differs from NumPy.
+
+- Providing a slice must retain array dimensions (i.e., the array rank must remain the same; ``rank(A) == rank(A[:])``).
+
+- Providing `ellipsis `_ must apply ``:`` to each dimension necessary to index all dimensions (e.g., if ``A`` has rank ``4``, ``A[1:, ..., 2:5] == A[1:, :, :, 2:5]``). Only a single ellipsis must be allowed. An ``IndexError`` exception must be raised if more than one ellipsis is provided.
+
+- Providing an empty tuple or an ellipsis to an array of rank ``0`` must result in an array of the same rank (i.e., if ``A`` has rank ``0``, ``A == A[()]`` and ``A == A[...]``).
+
+ .. note::
+ This behavior differs from NumPy where providing an empty tuple to an array of rank ``0`` returns a NumPy scalar.
+
+- Each ``None`` in the selection tuple must expand the dimensions of the resulting selection by one dimension of size ``1``. The position of the added dimension must be the same as the position of ``None`` in the selection tuple.
+
+ .. note::
+ Expanding dimensions can be equivalently achieved via repeated invocation of :func:`~array_api.expand_dims`.
+
+ .. note::
+ The constant ``newaxis`` is an alias of ``None`` and can thus be used in a similar manner as ``None``.
+
+- Except in the case of providing a single ellipsis (e.g., ``A[2:10, ...]`` or ``A[1:, ..., 2:5]``), the number of provided single-axis indexing expressions (excluding ``None``) should equal ``N``. For example, if ``A`` has rank ``2``, a single-axis indexing expression should be explicitly provided for both axes (e.g., ``A[2:10, :]``). An ``IndexError`` exception should be raised if the number of provided single-axis indexing expressions (excluding ``None``) is less than ``N``.
+
+ .. note::
+ Some libraries, such as SymPy, support flat indexing (i.e., providing a single-axis indexing expression to a higher-dimensional array). That practice is not supported here.
+
+ To perform flat indexing, use ``reshape(x, (-1,))[integer]``.
+
+- An ``IndexError`` exception must be raised if the number of provided single-axis indexing expressions (excluding ``None``) is greater than ``N``.
+
+ .. note::
+ This specification leaves unspecified the behavior of providing a slice which attempts to select elements along a particular axis, but whose starting index is out-of-bounds.
+
+ *Rationale: this is consistent with bounds-checking for single-axis indexing. An implementation may choose to set the axis (dimension) size of the result array to* ``0`` *, raise an exception, return junk values, or some other behavior depending on device requirements and performance considerations.*
+
+Boolean Array Indexing
+----------------------
+
+.. admonition:: Data-dependent output shape
+ :class: admonition important
+
+ For common boolean array use cases (e.g., using a dynamically-sized boolean array mask to filter the values of another array), the shape of the output array is data-dependent; hence, array libraries which build computation graphs (e.g., JAX, Dask, etc.) may find boolean array indexing difficult to implement. Accordingly, such libraries may choose to omit boolean array indexing. See :ref:`data-dependent-output-shapes` section for more details.
+
+An array must support indexing where the **sole index** is an ``M``-dimensional boolean array ``B`` with shape ``S1 = (s1, ..., sM)`` according to the following rules. Let ``A`` be an ``N``-dimensional array with shape ``S2 = (s1, ..., sM, ..., sN)``.
+
+ .. note::
+ The prohibition against combining boolean array indices with other single-axis indexing expressions includes the use of ``None``. To expand dimensions of the returned array, use repeated invocation of :func:`~array_api.expand_dims`.
+
+- If ``N >= M``, then ``A[B]`` must replace the first ``M`` dimensions of ``A`` with a single dimension having a size equal to the number of ``True`` elements in ``B``. The values in the resulting array must be in row-major (C-style order); this is equivalent to ``A[nonzero(B)]``.
+
+ .. note::
+ For example, if ``N == M == 2``, indexing ``A`` via a boolean array ``B`` will return a one-dimensional array whose size is equal to the number of ``True`` elements in ``B``.
+
+- If ``N < M``, then an ``IndexError`` exception must be raised.
+
+- The size of each dimension in ``B`` must equal the size of the corresponding dimension in ``A`` or be ``0``, beginning with the first dimension in ``A``. If a dimension size does not equal the size of the corresponding dimension in ``A`` and is not ``0``, then an ``IndexError`` exception must be raised.
+
+- The elements of a boolean index array must be iterated in row-major, C-style order, with the exception of zero-dimensional boolean arrays.
+
+- A zero-dimensional boolean index array (equivalent to ``True`` or ``False``) must follow the same axis replacement rules stated above. Namely, a zero-dimensional boolean index array removes zero dimensions and adds a single dimension of length ``1`` if the index array's value is ``True`` and of length ``0`` if the index array's value is ``False``. Accordingly, for a zero-dimensional boolean index array ``B``, the result of ``A[B]`` has shape ``S = (1, s1, ..., sN)`` if the index array's value is ``True`` and has shape ``S = (0, s1, ..., sN)`` if the index array's value is ``False``.
+
+Return Values
+-------------
+
+The result of an indexing operation (e.g., multi-axis indexing, boolean array indexing, etc) must be an array of the same data type as the indexed array.
+
+.. note::
+ The specified return value behavior includes indexing operations which return a single value (e.g., accessing a single element within a one-dimensional array).
diff --git a/spec/2023.12/API_specification/indexing_functions.rst b/spec/2023.12/API_specification/indexing_functions.rst
new file mode 100644
index 000000000..aef298566
--- /dev/null
+++ b/spec/2023.12/API_specification/indexing_functions.rst
@@ -0,0 +1,23 @@
+.. _indexing-functions:
+
+Indexing Functions
+===================
+
+ Array API specification for functions for indexing arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ take
diff --git a/spec/2023.12/API_specification/inspection.rst b/spec/2023.12/API_specification/inspection.rst
new file mode 100644
index 000000000..89d9c602a
--- /dev/null
+++ b/spec/2023.12/API_specification/inspection.rst
@@ -0,0 +1,42 @@
+.. _inspection:
+
+Inspection
+==========
+
+ Array API specification for namespace inspection utilities.
+
+A conforming implementation of the array API standard must provide and support the following functions and associated inspection APIs.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api.info
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ __array_namespace_info__
+
+
+Inspection APIs
+---------------
+
+In the namespace (or class) returned by ``__array_namespace_info__``, a conforming implementation of the array API standard must provide and support the following functions (or methods) for programmatically querying data type and device support, capabilities, and other specification-defined implementation-specific behavior, as documented in the functions described below.
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ capabilities
+ default_device
+ default_dtypes
+ devices
+ dtypes
diff --git a/spec/2023.12/API_specification/linear_algebra_functions.rst b/spec/2023.12/API_specification/linear_algebra_functions.rst
new file mode 100644
index 000000000..04d36f50a
--- /dev/null
+++ b/spec/2023.12/API_specification/linear_algebra_functions.rst
@@ -0,0 +1,23 @@
+Linear Algebra Functions
+========================
+
+ Array API specification for linear algebra functions.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+.. currentmodule:: array_api
+
+Objects in API
+--------------
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ matmul
+ matrix_transpose
+ tensordot
+ vecdot
diff --git a/spec/2023.12/API_specification/manipulation_functions.rst b/spec/2023.12/API_specification/manipulation_functions.rst
new file mode 100644
index 000000000..395c1c3e2
--- /dev/null
+++ b/spec/2023.12/API_specification/manipulation_functions.rst
@@ -0,0 +1,34 @@
+Manipulation Functions
+======================
+
+ Array API specification for manipulating arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ broadcast_arrays
+ broadcast_to
+ concat
+ expand_dims
+ flip
+ moveaxis
+ permute_dims
+ repeat
+ reshape
+ roll
+ squeeze
+ stack
+ tile
+ unstack
diff --git a/spec/2023.12/API_specification/searching_functions.rst b/spec/2023.12/API_specification/searching_functions.rst
new file mode 100644
index 000000000..c952f1aad
--- /dev/null
+++ b/spec/2023.12/API_specification/searching_functions.rst
@@ -0,0 +1,27 @@
+.. _searching-functions:
+
+Searching Functions
+===================
+
+ Array API specification for functions for searching arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ argmax
+ argmin
+ nonzero
+ searchsorted
+ where
diff --git a/spec/2023.12/API_specification/set_functions.rst b/spec/2023.12/API_specification/set_functions.rst
new file mode 100644
index 000000000..addf31e1f
--- /dev/null
+++ b/spec/2023.12/API_specification/set_functions.rst
@@ -0,0 +1,24 @@
+Set Functions
+=============
+
+ Array API specification for creating and operating on sets.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ unique_all
+ unique_counts
+ unique_inverse
+ unique_values
diff --git a/spec/2023.12/API_specification/sorting_functions.rst b/spec/2023.12/API_specification/sorting_functions.rst
new file mode 100644
index 000000000..ad3af8857
--- /dev/null
+++ b/spec/2023.12/API_specification/sorting_functions.rst
@@ -0,0 +1,31 @@
+Sorting Functions
+=================
+
+ Array API specification for sorting functions.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+.. note::
+
+ For floating-point input arrays, the sort order of NaNs and signed zeros is unspecified and thus implementation-dependent.
+
+ Implementations may choose to sort signed zeros (``-0 < +0``) or may choose to rely solely on value equality (``==``).
+
+ Implementations may choose to sort NaNs (e.g., to the end or to the beginning of a returned array) or leave them in-place. Should an implementation sort NaNs, the sorting convention should be clearly documented in the conforming implementation's documentation.
+
+ While defining a sort order for IEEE 754 floating-point numbers is recommended in order to facilitate reproducible and consistent sort results, doing so is not currently required by this specification.
+
+.. currentmodule:: array_api
+
+Objects in API
+--------------
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ argsort
+ sort
diff --git a/spec/2023.12/API_specification/statistical_functions.rst b/spec/2023.12/API_specification/statistical_functions.rst
new file mode 100644
index 000000000..20e02b3f9
--- /dev/null
+++ b/spec/2023.12/API_specification/statistical_functions.rst
@@ -0,0 +1,28 @@
+Statistical Functions
+=====================
+
+ Array API specification for statistical functions.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ cumulative_sum
+ max
+ mean
+ min
+ prod
+ std
+ sum
+ var
diff --git a/spec/2023.12/API_specification/type_promotion.rst b/spec/2023.12/API_specification/type_promotion.rst
new file mode 100644
index 000000000..339b90e45
--- /dev/null
+++ b/spec/2023.12/API_specification/type_promotion.rst
@@ -0,0 +1,148 @@
+.. _type-promotion:
+
+Type Promotion Rules
+====================
+
+ Array API specification for type promotion rules.
+
+Type promotion rules can be understood at a high level from the following diagram:
+
+.. image:: ../../_static/images/dtype_promotion_lattice.png
+ :target: Type promotion diagram
+
+*Type promotion diagram. Promotion between any two types is given by their join on this lattice. Only the types of participating arrays matter, not their values. Dashed lines indicate that behavior for Python scalars is undefined on overflow. Boolean, integer and floating-point dtypes are not connected, indicating mixed-kind promotion is undefined.*
+
+Rules
+-----
+
+A conforming implementation of the array API standard must implement the following type promotion rules governing the common result type for two **array** operands during an arithmetic operation.
+
+A conforming implementation of the array API standard may support additional type promotion rules beyond those described in this specification.
+
+.. note::
+ Type codes are used here to keep tables readable; they are not part of the standard. In code, use the data type objects specified in :ref:`data-types` (e.g., ``int16`` rather than ``'i2'``).
+
+..
+ Note: please keep table columns aligned
+
+The following type promotion tables specify the casting behavior for operations involving two array operands. When more than two array operands participate, application of the promotion tables is associative (i.e., the result does not depend on operand order).
+
+Signed integer type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+----+----+
+| | i1 | i2 | i4 | i8 |
++========+====+====+====+====+
+| **i1** | i1 | i2 | i4 | i8 |
++--------+----+----+----+----+
+| **i2** | i2 | i2 | i4 | i8 |
++--------+----+----+----+----+
+| **i4** | i4 | i4 | i4 | i8 |
++--------+----+----+----+----+
+| **i8** | i8 | i8 | i8 | i8 |
++--------+----+----+----+----+
+
+where
+
+- **i1**: 8-bit signed integer (i.e., ``int8``)
+- **i2**: 16-bit signed integer (i.e., ``int16``)
+- **i4**: 32-bit signed integer (i.e., ``int32``)
+- **i8**: 64-bit signed integer (i.e., ``int64``)
+
+Unsigned integer type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+----+----+
+| | u1 | u2 | u4 | u8 |
++========+====+====+====+====+
+| **u1** | u1 | u2 | u4 | u8 |
++--------+----+----+----+----+
+| **u2** | u2 | u2 | u4 | u8 |
++--------+----+----+----+----+
+| **u4** | u4 | u4 | u4 | u8 |
++--------+----+----+----+----+
+| **u8** | u8 | u8 | u8 | u8 |
++--------+----+----+----+----+
+
+where
+
+- **u1**: 8-bit unsigned integer (i.e., ``uint8``)
+- **u2**: 16-bit unsigned integer (i.e., ``uint16``)
+- **u4**: 32-bit unsigned integer (i.e., ``uint32``)
+- **u8**: 64-bit unsigned integer (i.e., ``uint64``)
+
+Mixed unsigned and signed integer type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+----+
+| | u1 | u2 | u4 |
++========+====+====+====+
+| **i1** | i2 | i4 | i8 |
++--------+----+----+----+
+| **i2** | i2 | i4 | i8 |
++--------+----+----+----+
+| **i4** | i4 | i4 | i8 |
++--------+----+----+----+
+| **i8** | i8 | i8 | i8 |
++--------+----+----+----+
+
+Floating-point type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++---------+-----+-----+-----+-----+
+| | f4 | f8 | c8 | c16 |
++=========+=====+=====+=====+=====+
+| **f4** | f4 | f8 | c8 | c16 |
++---------+-----+-----+-----+-----+
+| **f8** | f8 | f8 | c16 | c16 |
++---------+-----+-----+-----+-----+
+| **c8** | c8 | c16 | c8 | c16 |
++---------+-----+-----+-----+-----+
+| **c16** | c16 | c16 | c16 | c16 |
++---------+-----+-----+-----+-----+
+
+where
+
+- **f4**: single-precision (32-bit) floating-point number (i.e., ``float32``)
+- **f8**: double-precision (64-bit) floating-point number (i.e., ``float64``)
+- **c8**: single-precision complex floating-point number (i.e., ``complex64``)
+ composed of two single-precision (32-bit) floating-point numbers
+- **c16**: double-precision complex floating-point number (i.e., ``complex128``)
+ composed of two double-precision (64-bit) floating-point numbers
+
+
+Notes
+~~~~~
+
+- Type promotion rules must apply when determining the common result type for two **array** operands during an arithmetic operation, regardless of array dimension. Accordingly, zero-dimensional arrays must be subject to the same type promotion rules as dimensional arrays.
+- Type promotion of non-numerical data types to numerical data types is unspecified (e.g., ``bool`` to ``intxx`` or ``floatxx``).
+
+.. note::
+ Mixed integer and floating-point type promotion rules are not specified because behavior varies between implementations.
+
+Mixing arrays with Python scalars
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using Python scalars (i.e., instances of ``bool``, ``int``, ``float``, ``complex``) together with arrays must be supported for:
+
+- ``array scalar``
+- ``scalar array``
+
+where ```` is a built-in operator (including in-place operators, but excluding the matmul ``@`` operator; see :ref:`operators` for operators supported by the array object) and ``scalar`` has a type and value compatible with the array data type:
+
+- a Python ``bool`` for a ``bool`` array data type.
+- a Python ``int`` within the bounds of the given data type for integer array :ref:`data-types`.
+- a Python ``int`` or ``float`` for real-valued floating-point array data types.
+- a Python ``int``, ``float``, or ``complex`` for complex floating-point array data types.
+
+Provided the above requirements are met, the expected behavior is equivalent to:
+
+1. Convert the scalar to zero-dimensional array with the same data type as that of the array used in the expression.
+2. Execute the operation for ``array 0-D array`` (or ``0-D array array`` if ``scalar`` was the left-hand argument).
+
+.. note::
+ Behavior is not specified when mixing a Python ``float`` and an array with an integer data type; this may give ``float32``, ``float64``, or raise an exception. Behavior is implementation-specific.
+
+ Similarly, behavior is not specified when mixing a Python ``complex`` and an array with a real-valued data type; this may give ``complex64``, ``complex128``, or raise an exception. Behavior is implementation-specific.
+
+ Behavior is also not specified for integers outside of the bounds of a given integer data type. Integers outside of bounds may result in overflow or an error.
diff --git a/spec/2023.12/API_specification/utility_functions.rst b/spec/2023.12/API_specification/utility_functions.rst
new file mode 100644
index 000000000..5105fa3df
--- /dev/null
+++ b/spec/2023.12/API_specification/utility_functions.rst
@@ -0,0 +1,22 @@
+Utility Functions
+=================
+
+ Array API specification for utility functions.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ all
+ any
diff --git a/spec/2023.12/API_specification/version.rst b/spec/2023.12/API_specification/version.rst
new file mode 100644
index 000000000..346395d9a
--- /dev/null
+++ b/spec/2023.12/API_specification/version.rst
@@ -0,0 +1,22 @@
+Version
+=======
+
+ Array API specification for versioning.
+
+A conforming implementation of the array API standard must provide a `__array_api_version__` attribute - see :ref:`api-versioning` for details.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: attribute.rst
+ :nosignatures:
+
+ __array_api_version__
diff --git a/spec/2023.12/assumptions.md b/spec/2023.12/assumptions.md
new file mode 100644
index 000000000..b11482c5a
--- /dev/null
+++ b/spec/2023.12/assumptions.md
@@ -0,0 +1,77 @@
+(Assumptions)=
+
+# Assumptions
+
+## Hardware and software environments
+
+No assumptions on a specific hardware environment are made. It must be possible
+to create an array library adhering to this standard that runs (efficiently) on
+a variety of different hardware: CPUs with different architectures, GPUs,
+distributed systems and TPUs and other emerging accelerators.
+
+The same applies to software environments: it must be possible to create an
+array library adhering to this standard that runs efficiently independent of
+what compilers, build-time or run-time execution environment, or distribution
+and install method is employed. Parallel execution, JIT compilation, and
+delayed (lazy) evaluation must all be possible.
+
+The variety of hardware and software environments puts _constraints_ on choices
+made in the API standard. For example, JIT compilers may require output dtypes
+of functions to be predictable from input dtypes only rather than input values.
+
+
+(assumptions-dependencies)=
+
+## Dependencies
+
+The only dependency that's assumed in this standard is that on Python itself.
+Python >= 3.8 is assumed, motivated by the use of positional-only parameters
+(see [function and method signatures](API_specification/function_and_method_signatures.rst)).
+
+Importantly, array libraries are not assumed to be aware of each other, or of
+a common array-specific layer. The [use cases](use_cases.md) do not require
+such a dependency, and building and evolving an array library is easier without
+such a coupling. Facilitation support of multiple array types in downstream
+libraries is an important use case however, the assumed dependency structure
+for that is:
+
+
+
+Array libraries may know how to interoperate with each other, for example by
+constructing their own array type from that of another library or by shared
+memory use of an array (see [Data interchange mechanisms](design_topics/data_interchange.rst)).
+This can be done without a dependency though - only adherence to a protocol is
+enough.
+
+Array-consuming libraries will have to depend on one or more array libraries.
+That could be a "soft dependency" though, meaning retrieving an array library
+namespace from array instances that are passed in, but not explicitly doing
+`import arraylib_name`.
+
+
+## Backwards compatibility
+
+The assumption made during creation of this standard is that libraries are
+constrained by backwards compatibility guarantees to their users, and are
+likely unwilling to make significant backwards-incompatible changes for the
+purpose of conforming to this standard. Therefore it is assumed that the
+standard will be made available in a new namespace within each library, or the
+library will provide a way to retrieve a module or module-like object that
+adheres to this standard. See {ref}`how-to-adopt-this-api` for more details.
+
+
+## Production code & interactive use
+
+It is assumed that the primary use case is writing production code, for example
+in array-consuming libraries. As a consequence, making it easy to ensure that
+code is written as intended and has unambiguous semantics is preferred - and
+clear exceptions must be raised otherwise.
+
+It is also assumed that this does not significantly detract from the
+interactive user experience. However, in case existing libraries differ in
+behavior, the more strict version of that behavior is typically preferred. A
+good example is array inputs to functions - while NumPy accepts lists, tuples,
+generators, and anything else that could be turned into an array, most other
+libraries only accept their own array types. This standard follows the latter choice.
+It is likely always possible to put a thin "interactive use convenience layer"
+on top of a more strict behavior.
diff --git a/spec/2023.12/benchmark_suite.md b/spec/2023.12/benchmark_suite.md
new file mode 100644
index 000000000..41066c6a4
--- /dev/null
+++ b/spec/2023.12/benchmark_suite.md
@@ -0,0 +1,3 @@
+# Benchmark suite
+
+Adding a benchmark suite is planned in the future.
diff --git a/spec/2023.12/changelog.rst b/spec/2023.12/changelog.rst
new file mode 100644
index 000000000..701a3dbcd
--- /dev/null
+++ b/spec/2023.12/changelog.rst
@@ -0,0 +1,5 @@
+Changelog per API standard version
+==================================
+
+.. include:: ../../CHANGELOG.md
+ :parser: myst_parser.sphinx_
diff --git a/spec/2023.12/conf.py b/spec/2023.12/conf.py
new file mode 100644
index 000000000..f1bee91d4
--- /dev/null
+++ b/spec/2023.12/conf.py
@@ -0,0 +1,13 @@
+import sys
+from pathlib import Path
+
+sys.path.insert(0, str(Path(__file__).parents[2] / "src"))
+
+from array_api_stubs import _2023_12 as stubs_mod
+from _array_api_conf import *
+
+release = "2023.12"
+
+nav_title = html_theme_options.get("nav_title") + " v{}".format(release)
+html_theme_options.update({"nav_title": nav_title})
+sys.modules["array_api"] = stubs_mod
diff --git a/spec/2023.12/design_topics/C_API.rst b/spec/2023.12/design_topics/C_API.rst
new file mode 100644
index 000000000..6a44596b0
--- /dev/null
+++ b/spec/2023.12/design_topics/C_API.rst
@@ -0,0 +1,94 @@
+.. _C-API:
+
+C API
+=====
+
+Use of a C API is out of scope for this array API, as mentioned in :ref:`Scope`.
+There are a lot of libraries that do use such an API - in particular via Cython code
+or via direct usage of the NumPy C API. When the maintainers of such libraries
+want to use this array API standard to support multiple types of arrays, they
+need a way to deal with that issue. This section aims to provide some guidance.
+
+The assumption in the rest of this section is that performance matters for the library,
+and hence the goal is to make other array types work without converting to a
+``numpy.ndarray`` or another particular array type. If that's not the case (e.g. for a
+visualization package), then other array types can simply be handled by converting
+to the supported array type.
+
+.. note::
+ Often a zero-copy conversion to ``numpy.ndarray`` is possible, at least for CPU arrays.
+ If that's the case, this may be a good way to support other array types.
+ The main difficulty in that case will be getting the return array type right - however,
+ this standard does provide a Python-level API for array construction that should allow
+ doing this. A relevant question is if it's possible to know with
+ certainty that a conversion will be zero-copy. This may indeed be
+ possible, see :ref:`data-interchange`.
+
+
+Example situations for C/Cython usage
+-------------------------------------
+
+Situation 1: a Python package that is mostly pure Python, with a limited number of Cython extensions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+ Projects in this situation include Statsmodels, scikit-bio and QuTiP
+
+Main strategy: documentation. The functionality using Cython code will not support other array types (or only with conversion to/from ``numpy.ndarray``), which can be documented per function.
+
+
+Situation 2: a Python package that contains a lot of Cython code
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+ Projects in this situation include scikit-learn and scikit-image
+
+Main strategy: add support for other array types *per submodule*. This keeps it manageable to explain to the user which functionality does and doesn't have support.
+
+Longer term: specific support for particular array types (e.g. ``cupy.ndarray`` can be supported with Python-only code via ``cupy.ElementwiseKernel``).
+
+
+Situation 3: a Python package that uses the NumPy or Python C API directly
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+ Projects in this situation include SciPy and Astropy
+
+Strategy: similar to *situation 2*, but the number of submodules that can support all array types may be limited.
+
+
+Device support
+--------------
+
+Supporting non-CPU array types in code using the C API or Cython seems problematic,
+this almost inevitably will require custom device-specific code (e.g., CUDA, ROCm) or
+something like JIT compilation with Numba.
+
+
+Other longer-term approaches
+----------------------------
+
+Further Python API standardization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There may be cases where it makes sense to standardize additional sets of
+functions, because they're important enough that array libraries tend to
+reimplement them. An example of this may be *special functions*, as provided
+by ``scipy.special``. Bessel and gamma functions for example are commonly
+reimplemented by array libraries. This may avoid having to drop into a
+particular implementation that does use a C API (e.g., one can then rely on
+``arraylib.special.gamma`` rather than having to use ``scipy.special.gamma``).
+
+HPy
+~~~
+
+`HPy `_ is a new project that will provide a higher-level
+C API and ABI than CPython offers. A Cython backend targeting HPy will be provided as well.
+
+- Better PyPy support
+- Universal ABI - single binary for all supported Python versions
+- Cython backend generating HPy rather than CPython code
+
+HPy isn't quite ready for mainstream usage today, but once it is it may
+help make supporting multiple array libraries or adding non-CPU device
+support to Cython more feasible.
diff --git a/spec/2023.12/design_topics/accuracy.rst b/spec/2023.12/design_topics/accuracy.rst
new file mode 100644
index 000000000..8c97db698
--- /dev/null
+++ b/spec/2023.12/design_topics/accuracy.rst
@@ -0,0 +1,77 @@
+.. _accuracy:
+
+Accuracy
+========
+
+ Array API specification for minimum accuracy requirements.
+
+Arithmetic Operations
+---------------------
+
+The results of element-wise arithmetic operations
+
+- ``+``
+- ``-``
+- ``*``
+- ``/``
+- ``%``
+
+including the corresponding element-wise array APIs defined in this standard
+
+- add
+- subtract
+- multiply
+- divide
+
+for floating-point operands must return the nearest representable value according to IEEE 754-2019 and a supported rounding mode. By default, the rounding mode should be ``roundTiesToEven`` (i.e., ties rounded toward the nearest value with an even least significant bit).
+
+Mathematical Functions
+----------------------
+
+This specification does **not** precisely define the behavior of the following functions
+
+- acos
+- acosh
+- asin
+- asinh
+- atan
+- atan2
+- atanh
+- cos
+- cosh
+- exp
+- expm1
+- log
+- log1p
+- log2
+- log10
+- pow
+- sin
+- sinh
+- tan
+- tanh
+
+except to require specific results for certain argument values that represent boundary cases of interest.
+
+.. note::
+ To help readers identify functions lacking precisely defined accuracy behavior, this specification uses the phrase "implementation-dependent approximation" in function descriptions.
+
+For other argument values, these functions should compute approximations to the results of respective mathematical functions; however, this specification recognizes that array libraries may be constrained by underlying hardware and/or seek to optimize performance over absolute accuracy and, thus, allows some latitude in the choice of approximation algorithms.
+
+Although the specification leaves the choice of algorithms to the implementation, this specification recommends (but does not specify) that implementations use the approximation algorithms for IEEE 754-2019 arithmetic contained in `FDLIBM `_, the freely distributable mathematical library from Sun Microsystems, or some other comparable IEEE 754-2019 compliant mathematical library.
+
+.. note::
+ With exception of a few mathematical functions, returning results which are indistinguishable from correctly rounded infinitely precise results is difficult, if not impossible, to achieve due to the algorithms involved, the limits of finite-precision, and error propagation. However, this specification recognizes that numerical accuracy alignment among array libraries is desirable in order to ensure portability and reproducibility. Accordingly, for each mathematical function, the specification test suite includes test values which span a function's domain and reports the average and maximum deviation from either a designated standard implementation (e.g., an arbitrary precision arithmetic implementation) or an average computed across a subset of known array library implementations. Such reporting aids users who need to know how accuracy varies among libraries and developers who need to check the validity of their implementations.
+
+Statistical Functions
+---------------------
+
+This specification does not specify accuracy requirements for statistical functions; however, this specification does expect that a conforming implementation of the array API standard will make a best-effort attempt to ensure that its implementations are theoretically sound and numerically robust.
+
+.. note::
+ In order for an array library to pass the specification test suite, an array library's statistical function implementations must satisfy certain bare-minimum accuracy requirements (e.g., accurate summation of a small set of positive integers). Unfortunately, imposing more rigorous accuracy requirements is not possible without severely curtailing possible implementation algorithms and unduly increasing implementation complexity.
+
+Linear Algebra
+--------------
+
+This specification does not specify accuracy requirements for linear algebra functions; however, this specification does expect that a conforming implementation of the array API standard will make a best-effort attempt to ensure that its implementations are theoretically sound and numerically robust.
diff --git a/spec/2023.12/design_topics/complex_numbers.rst b/spec/2023.12/design_topics/complex_numbers.rst
new file mode 100644
index 000000000..0eca79e91
--- /dev/null
+++ b/spec/2023.12/design_topics/complex_numbers.rst
@@ -0,0 +1,61 @@
+.. _complex-numbers:
+
+Complex Numbers
+===============
+
+The Complex Plane
+-----------------
+
+Mathematically, equality comparison between complex numbers depends on the choice of topology. For example, the complex plane has a continuum of infinities; however, when the complex plane is projected onto the surface of a sphere (a stereographic projection commonly referred to as the *Riemann sphere*), infinities coalesce into a single *point at infinity*, thus modeling the extended complex plane. For the former, the value :math:`\infty + 3j` is distinct from (i.e., does not equal) :math:`\infty + 4j`, while, for the latter, :math:`\infty + 3j` does equal :math:`\infty + 4j`.
+
+Modeling complex numbers as a Riemann sphere conveys certain mathematical niceties (e.g., well-behaved division by zero and preservation of the identity :math:`\frac{1}{\frac{1}{z}} = z`); however, translating the model to IEEE 754 floating-point operations can lead to some unexpected results. For example, according to IEEE 754, :math:`+\infty` and :math:`-\infty` are distinct values; hence, for equality comparison, if :math:`x = +\infty` and :math:`y = -\infty`, then :math:`x \neq y`. In contrast, if we convert :math:`x` and :math:`y` to their complex number equivalents :math:`x = +\infty + 0j` and :math:`y = -\infty + 0j` and then interpret within the context of the extended complex plane, we arrive at the opposite result; namely, :math:`x = y`.
+
+In short, given the constraints of floating-point arithmetic and the subtleties of signed zeros, infinities, NaNs, and their interaction, crafting a specification which always yields intuitive results and satisfies all use cases involving complex numbers is not possible. Instead, this specification attempts to follow precedent (e.g., C99, Python, Julia, NumPy, and elsewhere), while also minimizing surprise. The result is an imperfect balance in which certain APIs may appear to embrace the one-infinity model found in C/C++ for algebraic operations involving complex numbers (e.g., considering :math:`\infty + \operatorname{NaN}\ j` to be infinite, irrespective of the imaginary component's value, including NaN), while other APIs may rely on the complex plane with its multiplicity of infinities (e.g., in transcendental functions). Accordingly, consumers of this specification should expect that certain results involving complex numbers for one operation may not be wholly consistent with results involving complex numbers for another operation.
+
+
+.. _branch-cuts:
+
+Branch Cuts
+-----------
+
+In the mathematical field of complex analysis, a **branch cut** is a curve in the complex plane across which an analytic multi-valued function is discontinuous. Branch cuts are often taken as lines or line segments, and the choice of any particular branch cut is a matter of convention.
+
+For example, consider the function :math:`z^2` which maps a complex number :math:`z` to a well-defined number :math:`z^2`. The function's inverse function :math:`\sqrt{z}` does not, however, map to a single value. For example, for :math:`z = 1`, :math:`\sqrt{1} = \pm 1`. While one can choose a unique principal value for this and similar functions (e.g., in this case, the principal square root is :math:`+1`), choices cannot be made continuous over the whole complex plane, as lines of discontinuity must occur. To handle discontinuities, one commonly adopts branch cuts, which are not, in general, unique. Instead, one chooses a branch cut as a matter of convention in order to give simple analytic properties.
+
+Branch cuts do not arise for single-valued trigonometric, hyperbolic, integer power, or exponential functions; however, branch cuts do arise for their multi-valued inverses.
+
+In contrast to real-valued floating-point numbers which have well-defined behavior as specified in IEEE 754, complex-valued floating-point numbers have no equivalent specification. Accordingly, this specification chooses to follow C99 conventions for special cases and branch cuts for those functions supporting complex numbers. For those functions which do not have C99 equivalents (e.g., linear algebra APIs), the specification relies on dominant conventions among existing array libraries.
+
+.. warning::
+ All branch cuts documented in this specification are considered **provisional**. While conforming implementations of the array API standard should adopt the branch cuts described in this standard, consumers of array API standard implementations should **not** assume that branch cuts are consistent between implementations.
+
+ Provided no issues arise due to the choice of branch cut, the provisional status is likely to be removed in a future revision of this standard.
+
+
+.. _complex-number-ordering:
+
+Complex Number Ordering
+-----------------------
+
+Given a set :math:`\{a_1, \ldots, a_n\}`, an order relation must satisfy the following properties:
+
+1. **Reflexive**: for any :math:`a` in the set, :math:`a \leq a`.
+2. **Transitive**: for any :math:`a`, :math:`b`, and :math:`c` in the set, if :math:`a \leq b` and :math:`b \leq c`, then :math:`a \leq c`.
+3. **Antisymmetric**: for any :math:`a` and :math:`b` in the set, if :math:`a \leq b` and :math:`b \leq a`, then :math:`a = b`.
+4. **Total Order**: in addition to the *partial order* established by 1-3, for any :math:`a` and :math:`b` in the set, either :math:`a \leq b` or :math:`b \leq a` (or both).
+5. **Compatible with Addition**: for all :math:`a`, :math:`b`, and :math:`c` in the set, if :math:`a \leq b`, then :math:`a + c \leq b + c`.
+6. **Compatible with Multiplication**: for all :math:`a`, :math:`b`, and :math:`c` in the set, if :math:`a \leq b` and :math:`0 \leq c`, then :math:`ac \leq bc`.
+
+Defining an order relation for complex numbers which satisfies all six properties defined above is not possible. Accordingly, this specification does not require that a conforming implementation of the array API standard adopt any specific complex number order relation.
+
+In order to satisfy backward compatibility guarantees, conforming implementations of the array API standard may choose to define an ordering for complex numbers (e.g., lexicographic); however, consumers of the array API standard should **not** assume that complex number ordering is consistent between implementations or even supported.
+
+If a conforming implementation chooses to define an ordering for complex numbers, the ordering must be clearly documented.
+
+
+Valued-based Promotion
+----------------------
+
+According to the type promotion rules described in this specification (see :ref:`type-promotion`), only the data types of the input arrays participating in an operation matter, not their values. The same principle applies to situations in which one or more results of operations on real-valued arrays are mathematically defined in the complex domain, but not in their real domain.
+
+By convention, the principal square root of :math:`-1` is :math:`j`, where :math:`j` is the imaginary unit. Despite this convention, for those operations supporting type promotion, conforming implementations must only consider input array data types when determining the data type of the output array. For example, if a real-valued input array is provided to :func:`~array_api.sqrt`, the output array must also be real-valued, even if the input array contains negative values. Accordingly, if a consumer of a conforming implementation of this specification desires for an operation's results to include the complex domain, the consumer should first cast the input array(s) to an appropriate complex floating-point data type before performing the operation.
diff --git a/spec/2023.12/design_topics/copies_views_and_mutation.rst b/spec/2023.12/design_topics/copies_views_and_mutation.rst
new file mode 100644
index 000000000..52be1c805
--- /dev/null
+++ b/spec/2023.12/design_topics/copies_views_and_mutation.rst
@@ -0,0 +1,77 @@
+.. _copyview-mutability:
+
+Copy-view behaviour and mutability
+==================================
+
+.. admonition:: Mutating views
+ :class: important
+
+ Array API consumers are *strongly* advised to avoid *any* mutating operations when an array object may be either a "view" (i.e., an array whose data refers to memory that belongs to another array) or own memory of which one or more other array objects may be views. This admonition may become more strict in the future (e.g., this specification may require that view mutation be prohibited and trigger an exception). Accordingly, only perform mutation operations (e.g., in-place assignment) when absolutely confident that array data belongs to one, and only one, array object.
+
+Strided array implementations (e.g. NumPy, PyTorch, CuPy, MXNet) typically
+have the concept of a "view", meaning an array containing data in memory that
+belongs to another array (i.e. a different "view" on the original data).
+Views are useful for performance reasons - not copying data to a new location
+saves memory and is faster than copying - but can also affect the semantics
+of code. This happens when views are combined with *mutating* operations.
+This simple example illustrates that:
+
+.. code-block:: python
+
+ x = ones(1)
+ y = x[:] # `y` *may* be a view on the data of `x`
+ y -= 1 # if `y` is a view, this modifies `x`
+
+Code as simple as the above example will not be portable between array
+libraries - for NumPy/PyTorch/CuPy/MXNet ``x`` will contain the value ``0``,
+while for TensorFlow/JAX/Dask it will contain the value ``1``. The combination
+of views and mutability is fundamentally problematic here if the goal is to
+be able to write code with unambiguous semantics.
+
+Views are necessary for getting good performance out of the current strided
+array libraries. It is not always clear however when a library will return a
+view, and when it will return a copy. This API standard does not attempt to
+specify this - libraries can do either.
+
+There are several types of operations that do in-place mutation of data
+contained in arrays. These include:
+
+1. Inplace operators (e.g. ``*=``)
+2. Item assignment (e.g. ``x[0] = 1``)
+3. Slice assignment (e.g., ``x[:2, :] = 3``)
+4. The `out=` keyword present in some strided array libraries (e.g. ``sin(x, out=y)``)
+
+Libraries like TensorFlow and JAX tend to support inplace operators, provide
+alternative syntax for item and slice assignment (e.g. an ``update_index``
+function or ``x.at[idx].set(y)``), and have no need for ``out=``.
+
+A potential solution could be to make views read-only, or use copy-on-write
+semantics. Both are hard to implement and would present significant issues
+for backwards compatibility for current strided array libraries. Read-only
+views would also not be a full solution, given that mutating the original
+(base) array will also result in ambiguous semantics. Hence this API standard
+does not attempt to go down this route.
+
+Both inplace operators and item/slice assignment can be mapped onto
+equivalent functional expressions (e.g. ``x[idx] = val`` maps to
+``x.at[idx].set(val)``), and given that both inplace operators and item/slice
+assignment are very widely used in both library and end user code, this
+standard chooses to include them.
+
+The situation with ``out=`` is slightly different - it's less heavily used, and
+easier to avoid. It's also not an optimal API, because it mixes an
+"efficiency of implementation" consideration ("you're allowed to do this
+inplace") with the semantics of a function ("the output _must_ be placed into
+this array). There are libraries that do some form of tracing or abstract
+interpretation over a language that does not support mutation (to make
+analysis easier); in those cases implementing ``out=`` with correct handling of
+views may even be impossible to do. There's alternatives, for example the
+donated arguments in JAX or working buffers in LAPACK, that allow the user to
+express "you _may_ overwrite this data, do whatever is fastest". Given that
+those alternatives aren't widely used in array libraries today, this API
+standard chooses to (a) leave out ``out=``, and (b) not specify another method
+of reusing arrays that are no longer needed as buffers.
+
+This leaves the problem of the initial example - with this API standard it
+remains possible to write code that will not work the same for all array
+libraries. This is something that the user must be careful about.
diff --git a/spec/2023.12/design_topics/data_dependent_output_shapes.rst b/spec/2023.12/design_topics/data_dependent_output_shapes.rst
new file mode 100644
index 000000000..43daa9765
--- /dev/null
+++ b/spec/2023.12/design_topics/data_dependent_output_shapes.rst
@@ -0,0 +1,15 @@
+.. _data-dependent-output-shapes:
+
+Data-dependent output shapes
+============================
+
+Array libraries which build computation graphs commonly employ static analysis that relies upon known shapes. For example, JAX requires known array sizes when compiling code, in order to perform static memory allocation. Functions and operations which are value-dependent present difficulties for such libraries, as array sizes cannot be inferred ahead of time without also knowing the contents of the respective arrays.
+
+While value-dependent functions and operations are not impossible to implement for array libraries which build computation graphs, this specification does not want to impose an undue burden on such libraries and permits omission of value-dependent operations. All other array libraries are expected, however, to implement the value-dependent operations included in this specification in order to be array specification compliant.
+
+Value-dependent operations are demarcated in this specification using an admonition similar to the following:
+
+.. admonition:: Data-dependent output shape
+ :class: important
+
+ The shape of the output array for this function/operation depends on the data values in the input array; hence, array libraries which build computation graphs (e.g., JAX, Dask, etc.) may find this function/operation difficult to implement without knowing array values. Accordingly, such libraries may choose to omit this function. See :ref:`data-dependent-output-shapes` section for more details.
diff --git a/spec/2023.12/design_topics/data_interchange.rst b/spec/2023.12/design_topics/data_interchange.rst
new file mode 100644
index 000000000..3b3040672
--- /dev/null
+++ b/spec/2023.12/design_topics/data_interchange.rst
@@ -0,0 +1,105 @@
+.. _data-interchange:
+
+Data interchange mechanisms
+===========================
+
+This section discusses the mechanism to convert one type of array into another.
+As discussed in the :ref:`assumptions-dependencies ` section,
+*functions* provided by an array library are not expected to operate on
+*array types* implemented by another library. Instead, the array can be
+converted to a "native" array type.
+
+The interchange mechanism must offer the following:
+
+1. Data access via a protocol that describes the memory layout of the array
+ in an implementation-independent manner.
+
+ *Rationale: any number of libraries must be able to exchange data, and no
+ particular package must be needed to do so.*
+
+2. Support for all dtypes in this API standard (see :ref:`data-types`).
+
+3. Device support. It must be possible to determine on what device the array
+ that is to be converted lives.
+
+ *Rationale: there are CPU-only, GPU-only, and multi-device array types;
+ it's best to support these with a single protocol (with separate
+ per-device protocols it's hard to figure out unambiguous rules for which
+ protocol gets used, and the situation will get more complex over time
+ as TPU's and other accelerators become more widely available).*
+
+4. Zero-copy semantics where possible, making a copy only if needed (e.g.
+ when data is not contiguous in memory).
+
+ *Rationale: performance.*
+
+5. A Python-side and a C-side interface, the latter with a stable C ABI.
+
+ *Rationale: all prominent existing array libraries are implemented in
+ C/C++, and are released independently from each other. Hence a stable C
+ ABI is required for packages to work well together.*
+
+DLPack: An in-memory tensor structure
+-------------------------------------
+
+The best candidate for this protocol is
+`DLPack `_, and hence that is what this
+standard has chosen as the primary/recommended protocol. Note that the
+``asarray`` function also supports the Python buffer protocol (CPU-only) to
+support libraries that already implement buffer protocol support.
+
+.. note::
+ The main alternatives to DLPack are device-specific methods:
+
+ - The `buffer protocol `_ on CPU
+ - ``__cuda_array_interface__`` for CUDA, specified in the Numba documentation
+ `here `_
+ (Python-side only at the moment)
+
+ An issue with device-specific protocols are: if two libraries both
+ support multiple device types, in which order should the protocols be
+ tried? A growth in the number of protocols to support each time a new
+ device gets supported by array libraries (e.g. TPUs, AMD GPUs, emerging
+ hardware accelerators) also seems undesirable.
+
+ In addition to the above argument, it is also clear from adoption
+ patterns that DLPack has the widest support. The buffer protocol, despite
+ being a lot older and standardized as part of Python itself via PEP 3118,
+ hardly has any support from array libraries. CPU interoperability is
+ mostly dealt with via the NumPy-specific ``__array__`` (which, when called,
+ means the object it is attached to must return a ``numpy.ndarray``
+ containing the data the object holds).
+
+ See the `RFC to adopt DLPack `_
+ for discussion that preceded the adoption of DLPack.
+
+DLPack's documentation can be found at: https://dmlc.github.io/dlpack/latest/.
+
+The `Python specification of DLPack `__
+page gives a high-level specification for data exchange in Python using DLPack.
+
+.. note::
+ DLPack is a standalone protocol/project and can therefore be used outside of
+ this standard. Python libraries that want to implement only DLPack support
+ are recommended to do so using the same syntax and semantics as outlined
+ below. They are not required to return an array object from ``from_dlpack``
+ which conforms to this standard.
+
+Non-supported use cases
+-----------------------
+
+Use of DLPack requires that the data can be represented by a strided, in-memory
+layout on a single device. This covers usage by a large range of, but not all,
+known and possible array libraries. Use cases that are not supported by DLPack
+include:
+
+- Distributed arrays, i.e., the data residing on multiple nodes or devices,
+- Sparse arrays, i.e., sparse representations where a data value (typically
+ zero) is implicit.
+
+There may be other reasons why it is not possible or desirable for an
+implementation to materialize the array as strided data in memory. In such
+cases, the implementation may raise a `BufferError` in the `__dlpack__` or
+`__dlpack_device__` method. In case an implementation is never able to export
+its array data via DLPack, it may omit `__dlpack__` and `__dlpack_device__`
+completely, and hence `from_dlpack` may raise an `AttributeError`.
diff --git a/spec/2023.12/design_topics/device_support.rst b/spec/2023.12/design_topics/device_support.rst
new file mode 100644
index 000000000..593b0b9fa
--- /dev/null
+++ b/spec/2023.12/design_topics/device_support.rst
@@ -0,0 +1,112 @@
+.. _device-support:
+
+Device support
+==============
+
+For libraries that support execution on more than a single hardware device - e.g. CPU and GPU, or multiple GPUs - it is important to be able to control on which device newly created arrays get placed and where execution happens. Attempting to be fully implicit doesn't always scale well to situations with multiple GPUs.
+
+Existing libraries employ one or more of these three methods to exert such control over data placement:
+
+1. A global default device, which may be fixed or user-switchable.
+2. A context manager to control device assignment within its scope.
+3. Local control for data allocation target device via explicit keywords, and a method to transfer arrays to another device.
+
+Libraries differ in how execution is controlled, via a context manager or with the convention that execution takes place on the same device where all argument arrays are allocated. And they may or may not allow mixing arrays on different devices via implicit data transfers.
+
+This standard chooses to add support for method 3 (local control), with the convention that execution takes place on the same device where all argument arrays are allocated. The rationale for choosing method 3 is because it's the most explicit and granular, with its only downside being verbosity. A context manager may be added in the future - see :ref:`device-out-of-scope` for details.
+
+Intended usage
+--------------
+
+The intended usage for the device support in the current version of the
+standard is *device handling in library code*. The assumed pattern is that
+users create arrays (for which they can use all the relevant device syntax
+that the library they use provides), and that they then pass those arrays
+into library code which may have to do the following:
+
+- Create new arrays on the same device as an array that's passed in.
+- Determine whether two input arrays are present on the same device or not.
+- Move an array from one device to another.
+- Create output arrays on the same device as the input arrays.
+- Pass on a specified device to other library code.
+
+.. note::
+ Given that there is not much that's currently common in terms of
+ device-related syntax between different array libraries, the syntax included
+ in the standard is kept as minimal as possible while enabling the
+ above-listed use cases.
+
+Syntax for device assignment
+----------------------------
+
+The array API provides the following syntax for device assignment and
+cross-device data transfer:
+
+1. A ``.device`` property on the array object, which returns a ``Device`` object
+ representing the device the data in the array is stored on, and supports
+ comparing devices for equality with ``==`` and ``!=`` within the same library
+ (e.g., by implementing ``__eq__``); comparing device objects from different
+ libraries is out of scope).
+2. A ``device=None`` keyword for array creation functions, which takes an
+ instance of a ``Device`` object.
+3. A ``.to_device`` method on the array object to copy an array to a different device.
+
+.. note::
+ The current API standard does **not** include a universal ``Device`` object
+ recognized by all compliant libraries. Accordingly, the standard does not
+ provide a means of instantiating a ``Device`` object to point to a specific
+ physical or logical device.
+
+ The choice to not include a standardized ``Device`` object may be revisited
+ in a future revision of this standard.
+
+ For array libraries which concern themselves with multi-device support,
+ including CPU and GPU, they are free to expose a library-specific device
+ object (e.g., for creating an array on a particular device). While a
+ library-specific device object can be used as input to ``to_device``, beware
+ that this will mean non-portability as code will be specific to that
+ library.
+
+Semantics
+---------
+
+Handling devices is complex, and some frameworks have elaborate policies for
+handling device placement. Therefore this section only gives recommendations,
+rather than hard requirements:
+
+- Respect explicit device assignment (i.e. if the input to the ``device=`` keyword is not ``None``, guarantee that the array is created on the given device, and raise an exception otherwise).
+- Preserve device assignment as much as possible (e.g. output arrays from a function are expected to be on the same device as input arrays to the function).
+- Raise an exception if an operation involves arrays on different devices (i.e. avoid implicit data transfer between devices).
+- Use a default for ``device=None`` which is consistent between functions within the same library.
+- If a library has multiple ways of controlling device placement, the most explicit method should have the highest priority. For example:
+
+ 1. If ``device=`` keyword is specified, that always takes precedence
+
+ 2. If ``device=None``, then use the setting from a context manager, if set.
+
+ 3. If no context manager was used, then use the global default device/strategy
+
+.. _device-out-of-scope:
+
+Out of scope for device support
+-------------------------------
+
+Individual libraries may offers APIs for one or more of the following topics,
+however those are out of scope for this standard:
+
+- Identifying a specific physical or logical device across libraries
+- Setting a default device globally
+- Stream/queue control
+- Distributed allocation
+- Memory pinning
+- A context manager for device control
+
+.. note::
+ A context manager for controlling the default device is present in most existing array
+ libraries (NumPy being the exception). There are concerns with using a
+ context manager however. A context manager can be tricky to use at a high
+ level, since it may affect library code below function calls (non-local
+ effects). See, e.g., `this PyTorch issue `_
+ for a discussion on a good context manager API.
+
+ Adding a context manager may be considered in a future version of this API standard.
diff --git a/spec/2023.12/design_topics/exceptions.rst b/spec/2023.12/design_topics/exceptions.rst
new file mode 100644
index 000000000..570fe56e3
--- /dev/null
+++ b/spec/2023.12/design_topics/exceptions.rst
@@ -0,0 +1,28 @@
+.. _exceptions:
+
+Exceptions
+==========
+
+This standard specifies expected syntax and semantics for a set of APIs. When
+inputs to an API do not match what is expected, libraries may emit warnings,
+raise exceptions, or misbehave in unexpected ways. In general, it is not
+possible to foresee or specify all the ways in which unexpected or invalid
+inputs are provided. Therefore, this standard does not attempt to specify
+exception or warning types to the extent needed in order to do exception
+handling in a portable manner. In general, it is expected that array library
+implementers follow `the guidance given by the documentation of the Python
+language `__, and either use
+builtin exception or warning types that are appropriate for the
+situation or use custom exceptions or warnings that derive from those builtin
+ones.
+
+In specific cases, it may be useful to provide guidance to array library
+authors regarding what an appropriate exception is. That guidance will be
+phrased as *should* rather than *must* (typically in a *Raises* section),
+because (a) there may be reasons for an implementer to deviate, and (b) more
+often than not, existing array library implementation already differ in their
+choices, and it may not be worth them breaking backward compatibility in order
+to comply with a "must" in this standard.
+
+In other cases, this standard will only specify that an exception should or
+must be raised, but not mention what type of exception that is.
diff --git a/spec/2023.12/design_topics/index.rst b/spec/2023.12/design_topics/index.rst
new file mode 100644
index 000000000..548eda90c
--- /dev/null
+++ b/spec/2023.12/design_topics/index.rst
@@ -0,0 +1,18 @@
+Design topics & constraints
+===========================
+
+.. toctree::
+ :caption: Design topics & constraints
+ :maxdepth: 1
+
+ copies_views_and_mutation
+ data_dependent_output_shapes
+ lazy_eager
+ data_interchange
+ device_support
+ static_typing
+ accuracy
+ exceptions
+ complex_numbers
+ C_API
+ parallelism
diff --git a/spec/2023.12/design_topics/lazy_eager.rst b/spec/2023.12/design_topics/lazy_eager.rst
new file mode 100644
index 000000000..63297ac73
--- /dev/null
+++ b/spec/2023.12/design_topics/lazy_eager.rst
@@ -0,0 +1,43 @@
+.. _lazy-eager:
+
+Lazy vs. eager execution
+========================
+
+While the execution model for implementations is out of scope of this standard,
+there are a few aspects of lazy (or graph-based) execution as contrasted to
+eager execution that may have an impact on the prescribed semantics of
+individual APIs, and will therefore show up in the API specification.
+
+One important difference is data-dependent or value-dependent behavior, as
+described in :ref:`data-dependent-output-shapes`. Because such behavior is hard
+to implement, implementers may choose to omit such APIs from their library.
+
+Another difference is when the Python language itself prescribes that a
+specific type *must* be returned. For those cases, it is not possible to return
+a lazy/delayed kind of object to avoid computing a value. This is the case for
+five dunder methods: `__bool__`, `__int__`, `__float__`, `__complex__` and
+`__index__`. Each implementation has only two choices when one of these methods
+is called:
+
+1. Compute a value of the required type (a Python scalar of type `bool`, `int`,
+ `float` or `complex`), or
+2. Raise an exception.
+
+When an implementation is 100% lazy, for example when it serializes a
+computation graph, computing the value is not possible and hence such an
+implementation has no choice but to raise an exception. For a "mostly lazy"
+implementation, it may make sense to trigger execution instead - but it is not
+required to, both choices are valid.
+
+A common code construct where this happens is conditional logic, e.g.::
+
+ vals = compute_something()
+ if all(vals):
+ # The if-statement will make Python call the __bool__ method
+ # on the result of `all(vals)`.
+ do_something_else()
+
+Note that the API does not contain control flow constructs, as of now, that
+would allow avoiding the implicit `__bool__` call in the example above. The
+only control flow-like function is `where`, but there's no function like `cond`
+to replace an `if`-statement.
diff --git a/spec/2023.12/design_topics/parallelism.rst b/spec/2023.12/design_topics/parallelism.rst
new file mode 100644
index 000000000..f013a9cf9
--- /dev/null
+++ b/spec/2023.12/design_topics/parallelism.rst
@@ -0,0 +1,24 @@
+Parallelism
+===========
+
+Parallelism is mostly, but not completely, an execution or runtime concern
+rather than an API concern. Execution semantics are out of scope for this API
+standard, and hence won't be discussed further here. The API related part
+involves how libraries allow users to exercise control over the parallelism
+they offer, such as:
+
+- Via environment variables. This is the method of choice for BLAS libraries and libraries using OpenMP.
+- Via a keyword to individual functions or methods. Examples include the ``n_jobs`` keyword used in scikit-learn and the ``workers`` keyword used in SciPy.
+- Build-time settings to enable a parallel or distributed backend.
+- Via letting the user set chunk sizes. Dask uses this approach.
+
+When combining multiple libraries, one has to deal with auto-parallelization
+semantics and nested parallelism. Two things that could help improve the
+coordination of parallelization behavior in a stack of Python libraries are:
+
+1. A common API pattern for enabling parallelism
+2. A common library providing a parallelization layer
+
+Option (1) may possibly fit in a future version of this array API standard.
+`array-api issue 4 `_ contains
+more detailed discussion on the topic of parallelism.
diff --git a/spec/2023.12/design_topics/static_typing.rst b/spec/2023.12/design_topics/static_typing.rst
new file mode 100644
index 000000000..26a1fb901
--- /dev/null
+++ b/spec/2023.12/design_topics/static_typing.rst
@@ -0,0 +1,50 @@
+Static typing
+=============
+
+Good support for static typing both in array libraries and array-consuming
+code is desirable. Therefore the exact type or set of types for each
+parameter, keyword and return value is specified for functions and methods -
+see :ref:`function-and-method-signatures`. That section specifies arrays
+simply as ``array``; what that means is dealt with in this section.
+
+Introducing type annotations in libraries became more relevant only when
+Python 2.7 support was dropped at the start of 2020. As a consequence, using
+type annotations with array libraries is largely still a work in progress.
+This version of the API standard does not deal with trying to type *array
+properties* like shape, dimensionality or dtype, because that's not a solved
+problem in individual array libraries yet.
+
+An ``array`` type annotation can mean either the type of one specific array
+object, or some superclass or typing Protocol - as long as it is consistent
+with the array object specified in :ref:`array-object`. To illustrate by
+example:
+
+.. code-block:: python
+
+ # `Array` is a particular class in the library
+ def sin(x: Array, / ...) -> Array:
+ ...
+
+and
+
+.. code-block:: python
+
+ # There's some base class `_BaseArray`, and there may be multiple
+ # array subclasses inside the library
+ A = TypeVar('A', bound=_BaseArray)
+ def sin(x: A, / ...) -> A:
+ ...
+
+should both be fine. There may be other variations possible. Also note that
+this standard does not require that input and output array types are the same
+(they're expected to be defined in the same library though). Given that
+array libraries don't have to be aware of other types of arrays defined in
+other libraries (see :ref:`assumptions-dependencies`), this should be enough
+for a single array library.
+
+That said, an array-consuming library aiming to support multiple array types
+may need more - for example a protocol to enable structural subtyping. This
+API standard currently takes the position that it does not provide any
+reference implementation or package that can or should be relied on at
+runtime, hence no such protocol is defined here. This may be dealt with in a
+future version of this standard.
diff --git a/spec/2023.12/extensions/fourier_transform_functions.rst b/spec/2023.12/extensions/fourier_transform_functions.rst
new file mode 100644
index 000000000..170ae390b
--- /dev/null
+++ b/spec/2023.12/extensions/fourier_transform_functions.rst
@@ -0,0 +1,45 @@
+Fourier transform Functions
+===========================
+
+ Array API specification for Fourier transform functions.
+
+Extension name and usage
+------------------------
+
+The name of the namespace providing the extension must be: ``fft``.
+
+If implemented, this ``fft`` extension must be retrievable via::
+
+ >>> xp = x.__array_namespace__()
+ >>> if hasattr(xp, 'fft'):
+ >>> # Use `xp.fft`
+
+
+Objects in API
+--------------
+
+A conforming implementation of this ``fft`` extension must provide and support the following functions.
+
+.. currentmodule:: array_api.fft
+
+..
+ NOTE: please keep the functions and their inverse together
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ fft
+ ifft
+ fftn
+ ifftn
+ rfft
+ irfft
+ rfftn
+ irfftn
+ hfft
+ ihfft
+ fftfreq
+ rfftfreq
+ fftshift
+ ifftshift
diff --git a/spec/2023.12/extensions/index.rst b/spec/2023.12/extensions/index.rst
new file mode 100644
index 000000000..3b9409954
--- /dev/null
+++ b/spec/2023.12/extensions/index.rst
@@ -0,0 +1,34 @@
+.. _extensions:
+
+Extensions
+==========
+
+Extensions are coherent sets of functionality that are commonly implemented
+across array libraries. Each array library supporting this standard may, but is
+not required to, implement an extension. If an extension is supported, it
+must be accessible inside the main array API supporting namespace as a separate
+namespace.
+
+Extension module implementors must aim to provide all functions and other
+public objects in an extension. The rationale for this is that downstream usage
+can then check whether or not the extension is present (using ``hasattr(xp,
+'extension_name')`` should be enough), and can then assume that functions are
+implemented. This in turn makes it also easy for array-consuming libraries to
+document which array libraries they support - e.g., "all libraries implementing
+the array API standard and its linear algebra extension".
+
+The mechanism through which the extension namespace is made available is up to
+the implementer, e.g. via a regular submodule that is imported under the
+``linalg`` name, or via a module-level ``__getattr__``.
+
+The functions in an extension must adhere to the same conventions as those in
+the array API standard. See :ref:`api-specification`.
+
+------------------------------------------------------------------------------
+
+.. toctree::
+ :caption: Extension modules:
+ :maxdepth: 1
+
+ fourier_transform_functions
+ linear_algebra_functions
diff --git a/spec/2023.12/extensions/linear_algebra_functions.rst b/spec/2023.12/extensions/linear_algebra_functions.rst
new file mode 100644
index 000000000..938221c79
--- /dev/null
+++ b/spec/2023.12/extensions/linear_algebra_functions.rst
@@ -0,0 +1,116 @@
+.. _linear-algebra-extension:
+
+Linear Algebra Extension
+========================
+
+ Array API specification for linear algebra functions.
+
+Extension name and usage
+------------------------
+
+The name of the namespace providing the extension must be: ``linalg``.
+
+If implemented, this ``linalg`` extension must be retrievable via::
+
+ >>> xp = x.__array_namespace__()
+ >>> if hasattr(xp, 'linalg'):
+ >>> # Use `xp.linalg`
+
+
+Design Principles
+-----------------
+
+A principal goal of this specification is to standardize commonly implemented interfaces among array libraries. While this specification endeavors to avoid straying too far from common practice, this specification does, with due restraint, seek to address design decisions arising more from historical accident than first principles. This is especially true for linear algebra APIs, which have arisen and evolved organically over time and have often been tied to particular underlying implementations (e.g., to BLAS and LAPACK).
+
+Accordingly, the standardization process affords the opportunity to reduce interface complexity among linear algebra APIs by inferring and subsequently codifying common design themes, thus allowing more consistent APIs. What follows is the set of design principles governing the APIs which follow:
+
+1. **Batching**: if an operation is explicitly defined in terms of matrices (i.e., two-dimensional arrays), then the associated interface should support "batching" (i.e., the ability to perform the operation over a "stack" of matrices). Example operations include:
+
+ - ``inv``: computing the multiplicative inverse of a square matrix.
+ - ``cholesky``: performing Cholesky decomposition.
+ - ``matmul``: performing matrix multiplication.
+
+2. **Data types**: if an operation requires decimal operations and :ref:`type-promotion` semantics are undefined (e.g., as is the case for mixed-kind promotions), then the associated interface should be specified as being restricted to floating-point data types. While the specification uses the term "SHOULD" rather than "MUST", a conforming implementation of the array API standard should only ignore the restriction provided overly compelling reasons for doing so. Example operations which should be limited to floating-point data types include:
+
+ - ``inv``: computing the multiplicative inverse.
+ - ``slogdet``: computing the natural logarithm of the absolute value of the determinant.
+ - ``norm``: computing the matrix or vector norm.
+
+ Certain operations are solely comprised of multiplications and additions. Accordingly, associated interfaces need not be restricted to floating-point data types. However, careful consideration should be given to overflow, and use of floating-point data types may be more prudent in practice. Example operations include:
+
+ - ``matmul``: performing matrix multiplication.
+ - ``trace``: computing the sum along the diagonal.
+ - ``cross``: computing the vector cross product.
+
+ Lastly, certain operations may be performed independent of data type, and, thus, the associated interfaces should support all data types specified in this standard. Example operations include:
+
+ - ``matrix_transpose``: computing the transpose.
+ - ``diagonal``: returning the diagonal.
+
+3. **Return values**: if an interface has more than one return value, the interface should return a namedtuple consisting of each value.
+
+ In general, interfaces should avoid polymorphic return values (e.g., returning an array **or** a namedtuple, dependent on, e.g., an optional keyword argument). Dedicated interfaces for each return value type are preferred, as dedicated interfaces are easier to reason about at both the implementation level and user level. Example interfaces which could be combined into a single overloaded interface, but are not, include:
+
+ - ``eigh``: computing both eigenvalues and eigenvectors.
+ - ``eigvalsh``: computing only eigenvalues.
+
+4. **Implementation agnosticism**: a standardized interface should eschew parameterization (including keyword arguments) biased toward particular implementations.
+
+ Historically, at a time when all array computing happened on CPUs, BLAS and LAPACK underpinned most numerical computing libraries and environments. Naturally, language and library abstractions catered to the parameterization of those libraries, often exposing low-level implementation details verbatim in their higher-level interfaces, even if such choices would be considered poor or ill-advised by today's standards (e.g., NumPy's use of `UPLO` in `eigh`). However, the present day is considerably different. While still important, BLAS and LAPACK no longer hold a monopoly over linear algebra operations, especially given the proliferation of devices and hardware on which such operations must be performed. Accordingly, interfaces must be conservative in the parameterization they support in order to best ensure universality. Such conservatism applies even to performance optimization parameters afforded by certain hardware.
+
+5. **Orthogonality**: an interface should have clearly defined and delineated functionality which, ideally, has no overlap with the functionality of other interfaces in the specification. Providing multiple interfaces which can all perform the same operation creates unnecessary confusion regarding interface applicability (i.e., which interface is best at which time) and decreases readability of both library and user code. Where overlap is possible, the specification must be parsimonious in the number of interfaces, ensuring that each interface provides a unique and compelling abstraction. Examples of related interfaces which provide distinct levels of abstraction (and generality) include:
+
+ - ``vecdot``: computing the dot product of two vectors.
+ - ``matmul``: performing matrix multiplication (including between two vectors and thus the dot product).
+ - ``tensordot``: computing tensor contractions (generalized sum-products).
+ - ``einsum``: expressing operations in terms of Einstein summation convention, including dot products and tensor contractions.
+
+ The above can be contrasted with, e.g., NumPy, which provides the following interfaces for computing the dot product or related operations:
+
+ - ``dot``: dot product, matrix multiplication, and tensor contraction.
+ - ``inner``: dot product.
+ - ``vdot``: dot product with flattening and complex conjugation.
+ - ``multi_dot``: chained dot product.
+ - ``tensordot``: tensor contraction.
+ - ``matmul``: matrix multiplication (dot product for two vectors).
+ - ``einsum``: Einstein summation convention.
+
+ where ``dot`` is overloaded based on input array dimensionality and ``vdot`` and ``inner`` exhibit a high degree of overlap with other interfaces. By consolidating interfaces and more clearly delineating behavior, this specification aims to ensure that each interface has a unique purpose and defined use case.
+
+.. currentmodule:: array_api.linalg
+
+Objects in API
+--------------
+
+A conforming implementation of this ``linalg`` extension must provide and support the following functions.
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ cholesky
+ cross
+ det
+ diagonal
+ eigh
+ eigvalsh
+ inv
+ matmul
+ matrix_norm
+ matrix_power
+ matrix_rank
+ matrix_transpose
+ outer
+ pinv
+ qr
+ slogdet
+ solve
+ svd
+ svdvals
+ tensordot
+ trace
+ vecdot
+ vector_norm
diff --git a/spec/2023.12/future_API_evolution.md b/spec/2023.12/future_API_evolution.md
new file mode 100644
index 000000000..443f683d5
--- /dev/null
+++ b/spec/2023.12/future_API_evolution.md
@@ -0,0 +1,60 @@
+(future-API-evolution)=
+
+# Future API standard evolution
+
+## Scope extensions
+
+Proposals for scope extensions in a future version of the API standard will follow
+the process documented at https://github.com/data-apis/governance/blob/master/process_document.md
+
+In summary, proposed new APIs go through several maturity stages, and will only be
+accepted in a future version of this API standard once they have reached the "Final"
+maturity stage, which means multiple array libraries have compliant implementations
+and real-world experience from use of those implementations is available.
+
+
+## Backwards compatibility
+
+Functions, objects, keywords and specified behavior are added to this API standard
+only if those are already present in multiple existing array libraries, and if there is
+data that those APIs are used. Therefore it is highly unlikely that future versions
+of this standard will make backwards-incompatible changes.
+
+The aim is for future versions to be 100% backwards compatible with older versions.
+Any exceptions must have strong rationales and be clearly documented in the updated
+API specification.
+
+
+(api-versioning)=
+
+## Versioning
+
+This API standard uses the following versioning scheme:
+
+- The version is date-based, in the form `yyyy.mm` (e.g., `2020.12`).
+- The version shall not include a standard way to do `alpha`/`beta`/`rc` or
+ `.post`/`.dev` type versions.
+ _Rationale: that's for Python packages, not for a standard._
+- The version must be made available at runtime via an attribute
+ `__array_api_version__` by a compliant implementation, in `'yyyy.mm'` format
+ as a string, in the namespace that implements the API standard.
+ _Rationale: dunder version strings are the standard way of doing this._
+
+No utilities for dealing with version comparisons need to be provided; given
+the format simple string comparisons with Python operators (`=-`, `<`, `>=`,
+etc.) will be enough.
+
+```{note}
+
+Rationale for the `yyyy.mm` versioning scheme choice:
+the API will be provided as part of a library, which already has a versioning
+scheme (typically PEP 440 compliant and in the form `major.minor.bugfix`),
+and a way to access it via `module.__version__`. The API standard version is
+completely independent from the package version. Given the standardization
+process, it resembles a C/C++ versioning scheme (e.g. `C99`, `C++14`) more
+than Python package versioning.
+```
+
+The frequency of releasing a new version of an API standard will likely be at
+regular intervals and on the order of one year, however no assumption on
+frequency of new versions appearing must be made.
diff --git a/spec/2023.12/index.rst b/spec/2023.12/index.rst
new file mode 100644
index 000000000..3e51cc68e
--- /dev/null
+++ b/spec/2023.12/index.rst
@@ -0,0 +1,37 @@
+Python array API standard
+=========================
+
+Contents
+--------
+
+.. toctree::
+ :caption: Context
+ :maxdepth: 1
+
+ purpose_and_scope
+ use_cases
+ assumptions
+
+.. toctree::
+ :caption: API
+ :maxdepth: 1
+
+ design_topics/index
+ future_API_evolution
+ API_specification/index
+ extensions/index
+
+.. toctree::
+ :caption: Methodology and Usage
+ :maxdepth: 1
+
+ usage_data
+ verification_test_suite
+ benchmark_suite
+
+.. toctree::
+ :caption: Other
+ :maxdepth: 1
+
+ changelog
+ license
diff --git a/spec/2023.12/license.rst b/spec/2023.12/license.rst
new file mode 100644
index 000000000..06ec75dfc
--- /dev/null
+++ b/spec/2023.12/license.rst
@@ -0,0 +1,9 @@
+License
+=======
+
+All content on this website and the corresponding
+`GitHub repository `__ is licensed
+under the following license:
+
+ .. include:: ../../LICENSE
+ :parser: myst_parser.sphinx_
diff --git a/spec/2023.12/purpose_and_scope.md b/spec/2023.12/purpose_and_scope.md
new file mode 100644
index 000000000..f375c9512
--- /dev/null
+++ b/spec/2023.12/purpose_and_scope.md
@@ -0,0 +1,470 @@
+# Purpose and scope
+
+## Introduction
+
+Python users have a wealth of choice for libraries and frameworks for
+numerical computing, data science, machine learning, and deep learning. New
+frameworks pushing forward the state of the art in these fields are appearing
+every year. One unintended consequence of all this activity and creativity
+has been fragmentation in multidimensional array (a.k.a. tensor) libraries -
+which are the fundamental data structure for these fields. Choices include
+NumPy, Tensorflow, PyTorch, Dask, JAX, CuPy, MXNet, Xarray, and others.
+
+The APIs of each of these libraries are largely similar, but with enough
+differences that it's quite difficult to write code that works with multiple
+(or all) of these libraries. This array API standard aims to address that
+issue, by specifying an API for the most common ways arrays are constructed
+and used.
+
+Why not simply pick an existing API and bless that as the standard? In short,
+because there are often good reasons for the current inconsistencies between
+libraries. The most obvious candidate for that existing API is NumPy. However
+NumPy was not designed with non-CPU devices, graph-based libraries, or JIT
+compilers in mind. Other libraries often deviate from NumPy for good
+(necessary) reasons. Choices made in this API standard are often the same
+ones NumPy makes, or close to it, but are different where necessary to make
+sure all existing array libraries can adopt this API.
+
+
+### This API standard
+
+This document aims to standardize functionality that exists in most/all array
+libraries and either is commonly used or is needed for
+consistency/completeness. Usage is determined via analysis of downstream
+libraries, see {ref}`usage-data`. An example of consistency is: there are
+functional equivalents for all Python operators (including the rarely used
+ones).
+
+Beyond usage and consistency, there's a set of use cases that inform the API
+design to ensure it's fit for a wide range of users and situations - see
+{ref}`use-cases`.
+
+A question that may arise when reading this document is: _"what about
+functionality that's not present in this document?_ This:
+
+- means that there is no guarantee the functionality is present in libraries
+ adhering to the standard
+- does _not_ mean that that functionality is unimportant
+- may indicate that that functionality, if present in a particular array
+ library, is unlikely to be present in all other libraries
+
+### History
+
+The first library for numerical and scientific computing in Python was
+Numeric, developed in the mid-1990s. In the early 2000s a second, similar
+library, Numarray, was created. In 2005 NumPy was written, superceding both
+Numeric and Numarray and resolving the fragmentation at that time. For
+roughly a decade, NumPy was the only widely used array library. Over the past
+~5 years, mainly due to the emergence of new hardware and the rise of deep
+learning, many other libraries have appeared, leading to more severe
+fragmentation. Concepts and APIs in newer libraries were often inspired by
+(or copied from) those in older ones - and then changed or improved upon to
+fit new needs and use cases. Individual library authors discussed ideas,
+however there was never (before this array API standard) a serious attempt
+to coordinate between all libraries to avoid fragmentation and arrive at a
+common API standard.
+
+The idea for this array API standard grew gradually out of many conversations
+between maintainers during 2019-2020. It quickly became clear that any
+attempt to write a new "reference library" to fix the current fragmentation
+was infeasible - unlike in 2005, there are now too many different use cases
+and too many stakeholders, and the speed of innovation is too high. In May
+2020 an initial group of maintainers was assembled in the [Consortium for
+Python Data API Standards](https://data-apis.org/) to start drafting a
+specification for an array API that could be adopted by each of the existing
+array and tensor libraries. That resulted in this document, describing that
+API.
+
+
+(Scope)=
+
+## Scope (includes out-of-scope / non-goals)
+
+This section outlines what is in scope and out of scope for this API standard.
+
+### In scope
+
+The scope of the array API standard includes:
+
+- Functionality which needs to be included in an array library for it to adhere
+ to this standard.
+- Names of functions, methods, classes and other objects.
+- Function signatures, including type annotations.
+- Semantics of functions and methods. I.e. expected outputs including precision
+ for and dtypes of numerical results.
+- Semantics in the presence of `nan`'s, `inf`'s, empty arrays (i.e. arrays
+ including one or more dimensions of size `0`).
+- Casting rules, broadcasting, indexing
+- Data interchange. I.e. protocols to convert one type of array into another
+ type, potentially sharing memory.
+- Device support.
+
+Furthermore, meta-topics included in this standard include:
+
+- Use cases for the API standard and assumptions made in it
+- API standard adoption
+- API standard versioning
+- Future API standard evolution
+- Array library and API standard versioning
+- Verification of API standard conformance
+
+The concrete set of functionality that is in scope for this version of the
+standard is shown in this diagram:
+
+
+
+
+**Goals** for the API standard include:
+
+- Make it possible for array-consuming libraries to start using multiple types
+ of arrays as inputs.
+- Enable more sharing and reuse of code built on top of the core functionality
+ in the API standard.
+- For authors of new array libraries, provide a concrete API that can be
+ adopted as is, rather than each author having to decide what to borrow from
+ where and where to deviate.
+- Make the learning curve for users less steep when they switch from one array
+ library to another one.
+
+
+### Out of scope
+
+1. Implementations of the standard are out of scope.
+
+ _Rationale: the standard will consist of a document and an accompanying test
+ suite with which the conformance of an implementation can be verified. Actual
+ implementations will live in array libraries; no reference implementation is
+ planned._
+
+2. Execution semantics are out of scope. This includes single-threaded vs.
+ parallel execution, task scheduling and synchronization, eager vs. delayed
+ evaluation, performance characteristics of a particular implementation of the
+ standard, and other such topics.
+
+ _Rationale: execution is the domain of implementations. Attempting to specify
+ execution behavior in a standard is likely to require much more fine-grained
+ coordination between developers of implementations, and hence is likely to
+ become an obstacle to adoption._
+
+3. Non-Python API standardization (e.g., Cython or NumPy C APIs)
+
+ _Rationale: this is an important topic for some array-consuming libraries,
+ but there is no widely shared C/Cython API and hence it doesn't make sense at
+ this point in time to standardize anything. See
+ the [C API section](design_topics/C_API.rst) for more details._
+
+4. Standardization of these dtypes is out of scope: bfloat16, extended
+ precision floating point, datetime, string, object and void dtypes.
+
+ _Rationale: these dtypes aren't uniformly supported, and their inclusion at
+ this point in time could put a significant implementation burden on
+ libraries. It is expected that some of these dtypes - in particular
+ `bfloat16` - will be included in a future version of the standard._
+
+5. The following topics are out of scope: I/O, polynomials, error handling,
+ testing routines, building and packaging related functionality, methods of
+ binding compiled code (e.g., `cffi`, `ctypes`), subclassing of an array
+ class, masked arrays, and missing data.
+
+ _Rationale: these topics are not core functionality for an array library,
+ and/or are too tied to implementation details._
+
+6. NumPy (generalized) universal functions, i.e. ufuncs and gufuncs.
+
+ _Rationale: these are NumPy-specific concepts, and are mostly just a
+ particular way of building regular functions with a few extra
+ methods/properties._
+
+7. Behaviour for unexpected/invalid input to functions and methods.
+
+ _Rationale: there are a huge amount of ways in which users can provide
+ invalid or unspecified input to functionality in the standard. Exception
+ types or other resulting behaviour cannot be completely covered and would
+ be hard to make consistent between libraries._
+
+
+**Non-goals** for the API standard include:
+
+- Making array libraries identical so they can be merged.
+
+ _Each library will keep having its own particular strength, whether it's
+ offering functionality beyond what's in the standard, performance advantages
+ for a given use case, specific hardware or software environment support, or
+ more._
+
+- Implement a backend or runtime switching system to be able to switch from one
+ array library to another with a single setting or line of code.
+
+ _This may be feasible, however it's assumed that when an array-consuming
+ library switches from one array type to another, some testing and possibly
+ code adjustment for performance or other reasons may be needed._
+
+- Making it possible to mix multiple array libraries in function calls.
+
+ _Most array libraries do not know about other libraries, and the functions
+ they implement may try to convert "foreign" input, or raise an exception.
+ This behaviour is hard to specify; ensuring only a single array type is
+ used is best left to the end user._
+
+
+### Implications of in/out of scope
+
+If something is out of scope and therefore will not be part of (the current
+version of) the API standard, that means that there are no guarantees that that
+functionality works the same way, or even exists at all, across the set of
+array libraries that conform to the standard. It does _not_ imply that this
+functionality is less important or should not be used.
+
+
+## Stakeholders
+
+Arrays are fundamental to scientific computing, data science, and machine
+learning and deep learning. Hence there are many stakeholders for an array API
+standard. The _direct_ stakeholders of this standard are **authors/maintainers of
+Python array libraries**. There are many more types of _indirect_ stakeholders
+though, including:
+
+- maintainers of libraries and other programs which depend on array libraries
+ (called "array-consuming libraries" in the rest of this document)
+- authors of non-Python array libraries
+- developers of compilers and runtimes with array-specific functionality
+- end users
+
+Libraries that are being actively considered - in terms of current behaviour and
+API surface - during the creation of the first version of this standard
+include:
+
+- [NumPy](https://numpy.org)
+- [TensorFlow](https://www.tensorflow.org/)
+- [PyTorch](https://pytorch.org/)
+- [MXNet](https://numpy.mxnet.io/)
+- [JAX](https://github.com/google/jax)
+- [Dask](https://dask.org/)
+- [CuPy](https://cupy.chainer.org/)
+
+Other Python array libraries that are currently under active development and
+could adopt this API standard include:
+
+- [xarray](https://xarray.pydata.org/)
+- [PyData/Sparse](https://sparse.pydata.org)
+- [Weld](https://github.com/weld-project/weld)
+- [Bohrium](https://bohrium.readthedocs.io/)
+- [Arkouda](https://github.com/mhmerrill/arkouda)
+- [Legate](https://research.nvidia.com/publication/2019-11_Legate-NumPy%3A-Accelerated)
+
+There are a huge amount of array-consuming libraries; some of the most
+prominent ones that are being taken into account - in terms of current array
+API usage or impact of design decisions on them - include (this list is likely
+to grow it over time):
+
+- [Pandas](https://pandas.pydata.org/)
+- [SciPy](https://github.com/scipy/scipy)
+- [scikit-learn](https://scikit-learn.org/)
+- [Matplotlib](https://matplotlib.org/)
+- [scikit-image](https://scikit-image.org/)
+- [NetworkX](https://networkx.github.io/)
+
+Array libraries in other languages, some of which may grow a Python API in the
+future or have taken inspiration from NumPy or other array libraries, include:
+
+- [Xtensor](https://xtensor.readthedocs.io) (C++, cross-language)
+- [XND](https://xnd.io/) (C, cross-language)
+- [stdlib](https://stdlib.io/) (JavaScript)
+- [rust-ndarray](https://github.com/rust-ndarray/ndarray) (Rust)
+- [rray](https://github.com/r-lib/rray) (R)
+- [ND4J](https://github.com/deeplearning4j/nd4j) (JVM)
+- [NumSharp](https://github.com/SciSharp/NumSharp) (C#)
+
+Compilers, runtimes, and dispatching layers for which this API standard may be
+relevant:
+
+- [Cython](https://cython.org/)
+- [Numba](http://numba.pydata.org/)
+- [Pythran](https://pythran.readthedocs.io/en/latest/)
+- [Transonic](https://transonic.readthedocs.io)
+- [ONNX](https://onnx.ai/)
+- [Apache TVM](https://tvm.apache.org/)
+- [MLIR](https://mlir.llvm.org/)
+- [TACO](https://github.com/tensor-compiler/taco)
+- [unumpy](https://github.com/Quansight-Labs/unumpy)
+- [einops](https://github.com/arogozhnikov/einops)
+- [Apache Arrow](https://arrow.apache.org/)
+
+
+
+## How to read this document
+
+For guidance on how to read and understand the type annotations included in this specification, consult the Python [documentation](https://docs.python.org/3/library/typing.html).
+
+
+(how-to-adopt-this-api)=
+
+## How to adopt this API
+
+Most (all) existing array libraries will find something in this API standard
+that is incompatible with a current implementation, and that they cannot
+change due to backwards compatibility concerns. Therefore we expect that each
+of those libraries will want to offer a standard-compliant API in a _new
+namespace_. The question then becomes: how does a user access this namespace?
+
+The simplest method is: document the import to use to directly access the
+namespace (e.g. `import package_name.array_api`). This has two issues though:
+
+1. Array-consuming libraries that want to support multiple array libraries
+ then have to explicitly import each library.
+2. It is difficult to _version_ the array API standard implementation (see
+ {ref}`api-versioning`).
+
+To address both issues, a uniform way must be provided by a conforming
+implementation to access the API namespace, namely a [method on the array
+object](array.__array_namespace__):
+
+```
+xp = x.__array_namespace__()
+```
+
+The method must take one keyword, `api_version=None`, to make it possible to
+request a specific API version:
+
+```
+xp = x.__array_namespace__(api_version='2020.10')
+```
+
+The `xp` namespace must contain all functionality specified in
+{ref}`api-specification`. The namespace may contain other functionality; however,
+including additional functionality is not recommended as doing so may hinder
+portability and inter-operation of array libraries within user code.
+
+### Checking an array object for Compliance
+
+Array-consuming libraries are likely to want a mechanism for determining
+whether a provided array is specification compliant. The recommended approach
+to check for compliance is by checking whether an array object has an
+`__array_namespace__` attribute, as this is the one distinguishing feature of
+an array-compliant object.
+
+Checking for an `__array_namespace__` attribute can be implemented as a small
+utility function similar to the following.
+
+```python
+def is_array_api_obj(x):
+ return hasattr(x, '__array_namespace__')
+```
+
+```{note}
+Providing a "reference library" on which people depend is out-of-scope for
+the standard. Hence the standard cannot, e.g., provide an array ABC from
+which libraries can inherit to enable an `isinstance` check. However, note
+that the `numpy.array_api` implementation aims to provide a reference
+implementation with only the behavior specified in this standard - it may
+prove useful for verifying one is writing portable code.
+```
+
+### Discoverability of conforming implementations
+
+It may be useful to have a way to discover all packages in a Python
+environment which provide a conforming array API implementation, and the
+namespace that that implementation resides in.
+To assist array-consuming libraries which need to create arrays originating
+from multiple conforming array implementations, or developers who want to perform
+for example cross-library testing, libraries may provide an
+{pypa}`entry point ` in order to make an array API
+namespace discoverable.
+
+:::{admonition} Optional feature
+Given that entry points typically require build system & package installer
+specific implementation, this standard chooses to recommend rather than
+mandate providing an entry point.
+:::
+
+The following code is an example for how one can discover installed
+conforming libraries:
+
+```python
+from importlib.metadata import entry_points
+
+try:
+ eps = entry_points()['array_api']
+ ep = next(ep for ep in eps if ep.name == 'package_name')
+except TypeError:
+ # The dict interface for entry_points() is deprecated in py3.10,
+ # supplanted by a new select interface.
+ ep = entry_points(group='array_api', name='package_name')
+
+xp = ep.load()
+```
+
+An entry point must have the following properties:
+
+- **group**: equal to `array_api`.
+- **name**: equal to the package name.
+- **object reference**: equal to the array API namespace import path.
+
+
+* * *
+
+## Conformance
+
+A conforming implementation of the array API standard must provide and support
+all the functions, arguments, data types, syntax, and semantics described in
+this specification.
+
+A conforming implementation of the array API standard may provide additional
+values, objects, properties, data types, and functions beyond those described
+in this specification.
+
+Libraries which aim to provide a conforming implementation but haven't yet
+completed such an implementation may, and are encouraged to, provide details on
+the level of (non-)conformance. For details on how to do this, see
+[Verification - measuring conformance](verification_test_suite.md).
+
+
+* * *
+
+## Terms and Definitions
+
+For the purposes of this specification, the following terms and definitions apply.
+
+
+
+**array**:
+a (usually fixed-size) multidimensional container of items of the same type and size.
+
+**axis**:
+an array dimension.
+
+**branch cut**:
+a curve in the complex plane across which a given complex function fails to be continuous.
+
+**broadcast**:
+automatic (implicit) expansion of array dimensions to be of equal sizes without copying array data for the purpose of making arrays with different shapes have compatible shapes for element-wise operations.
+
+**compatible**:
+two arrays whose dimensions are compatible (i.e., where the size of each dimension in one array is either equal to one or to the size of the corresponding dimension in a second array).
+
+**element-wise**:
+an operation performed element-by-element, in which individual array elements are considered in isolation and independently of other elements within the same array.
+
+**matrix**:
+a two-dimensional array.
+
+**rank**:
+number of array dimensions (not to be confused with the number of linearly independent columns of a matrix).
+
+**shape**:
+a tuple of `N` non-negative integers that specify the sizes of each dimension and where `N` corresponds to the number of dimensions.
+
+**singleton dimension**:
+a dimension whose size is one.
+
+**vector**:
+a one-dimensional array.
+
+* * *
+
+## Normative References
+
+The following referenced documents are indispensable for the application of this specification.
+
+- __IEEE 754-2019: IEEE Standard for Floating-Point Arithmetic.__ Institute of Electrical and Electronic Engineers, New York (2019).
+- Scott Bradner. 1997. "Key words for use in RFCs to Indicate Requirement Levels". RFC 2119. doi:[10.17487/rfc2119](https://tools.ietf.org/html/rfc2119).
diff --git a/spec/2023.12/usage_data.md b/spec/2023.12/usage_data.md
new file mode 100644
index 000000000..c2dcd5d65
--- /dev/null
+++ b/spec/2023.12/usage_data.md
@@ -0,0 +1,86 @@
+(usage-data)=
+
+# Usage Data
+
+> Summary of existing array API design and usage.
+
+## Introduction
+
+With rare exception, technical standardization ("standardization") occurs neither in a vacuum nor from first principles. Instead, standardization finds its origins in two or more, sometimes competing, implementations differing in design and behavior. These differences introduce friction as those (e.g., downstream end-users and library authors) who operate at higher levels of abstraction must either focus on an implementation subset (e.g., only NumPy-like array libraries) or accommodate variation through increased complexity (e.g., if NumPy array, call method `.foo()`; else if Dask array, call method `.bar()`).
+
+Standardization aspires to reduce this friction and is a process which codifies that which is common, while still encouraging experimentation and innovation. Through the process of standardization, implementations can align around a subset of established practices and channel development resources toward that which is new and novel. In short, standardization aims to thwart reinventing the proverbial wheel.
+
+A foundational step in standardization is articulating a subset of established practices and defining those practices in unambiguous terms. To this end, the standardization process must approach the problem from two directions: **design** and **usage**. The former direction seeks to understand
+
+- current implementation design (APIs, names, signatures, classes, and objects)
+- current implementation semantics (calling conventions and behavior)
+
+while the latter direction seeks to quantify API
+
+- consumers (e.g., which downstream libraries utilize an API?)
+- usage frequency (e.g., how often is an API consumed?)
+- consumption patterns (e.g., which optional arguments are provided and in what context?)
+
+By analyzing both design and usage, the standardization process grounds specification decisions in empirical data and analysis.
+
+## Design
+
+To understand API design, standardization follows the following process.
+
+- Identify a representative sample of commonly used Python array libraries (e.g., NumPy, Dask Array, CuPy, MXNet, JAX, TensorFlow, and PyTorch).
+- Acquire public APIs (e.g., by analyzing module exports and scraping public documentation).
+- Unify and standardize public API data representation for subsequent analysis.
+- Extract commonalities and differences by analyzing the intersection and complement of available APIs.
+- Derive a common API subset suitable for standardization (based on prevalence and ease of implementation), where such a subset may include attribute names, method names, and positional and keyword arguments.
+- Leverage usage data to validate API need and to inform naming conventions, supported data types, and/or optional arguments.
+- Summarize findings and provide tooling for additional analysis and exploration.
+
+See the [`array-api-comparison`](https://github.com/data-apis/array-api-comparison)
+repository for design data and summary analysis.
+
+## Usage
+
+To understand usage patterns, standardization follows the following process.
+
+- Identify a representative sample of commonly used Python libraries ("downstream libraries") which consume the subset of array libraries identified during design analysis (e.g., pandas, Matplotlib, SciPy, Xarray, scikit-learn, and scikit-image).
+- Instrument downstream libraries in order to record Python array API calls.
+- Collect traces while running downstream library test suites.
+- Transform trace data into structured data (e.g., as JSON) for subsequent analysis.
+- Generate empirical APIs based on provided arguments and associated types, noting which downstream library called which empirical API and at what frequency.
+- Derive a single inferred API which unifies the individual empirical API calling semantics.
+- Organize API results in human-readable form as type definition files.
+- Compare the inferred API to the documented API.
+
+The following is an inferred API for `numpy.arange`. The docstring includes the number of lines of code that invoked this function for each downstream library when running downstream library test suites.
+
+```python
+def arange(
+ _0: object,
+ /,
+ *_args: object,
+ dtype: Union[type, str, numpy.dtype, None] = ...,
+ step: Union[int, float] = ...,
+ stop: int = ...,
+):
+ """
+ usage.dask: 347
+ usage.matplotlib: 359
+ usage.pandas: 894
+ usage.sample-usage: 4
+ usage.scipy: 1173
+ usage.skimage: 174
+ usage.sklearn: 373
+ usage.xarray: 666
+ """
+ ...
+```
+
+See the [`python-record-api`](https://github.com/data-apis/python-record-api) repository for source code, usage data, and analysis. To perform a similar analysis on additional downstream libraries, including those not publicly released, see the published PyPI [package](https://pypi.org/project/record_api/).
+
+## Use in Decision-Making
+
+Design and usage data support specification decision-making in the following ways.
+
+- Validate user stories to ensure that proposals satisfy existing needs.
+- Define scope to ensure that proposals address general array library design requirements (i.e., proposals must have broad applicability and be possible to implement with a reasonable amount of effort).
+- Inform technical design discussions to ensure that proposals are grounded in empirical data.
diff --git a/spec/2023.12/use_cases.md b/spec/2023.12/use_cases.md
new file mode 100644
index 000000000..e24aa50db
--- /dev/null
+++ b/spec/2023.12/use_cases.md
@@ -0,0 +1,235 @@
+(use-cases)=
+
+# Use cases
+
+Use cases inform the requirements for, and design choices made in, this array
+API standard. This section first discusses what types of use cases are
+considered, and then works out a few concrete use cases in more detail.
+
+## Types of use cases
+
+- Packages that depend on a specific array library currently, and would like
+ to support multiple of them (e.g. for GPU or distributed array support, for
+ improved performance, or for reaching a wider user base).
+- Writing new libraries/tools that wrap multiple array libraries.
+- Projects that implement new types of arrays with, e.g., hardware-specific
+ optimizations or auto-parallelization behavior, and need an API to put on
+ top that is familiar to end users.
+- End users that want to switch from one library to another without learning
+ about all the small differences between those libraries.
+
+
+## Concrete use cases
+
+- {ref}`use-case-scipy`
+- {ref}`use-case-einops`
+- {ref}`use-case-xtensor`
+- {ref}`use-case-numba`
+
+
+(use-case-scipy)=
+
+### Use case 1: add hardware accelerator and distributed support to SciPy
+
+When surveying a representative set of advanced users and research software
+engineers in 2019 (for [this NSF proposal](https://figshare.com/articles/Mid-Scale_Research_Infrastructure_-_The_Scientific_Python_Ecosystem/8009441)),
+the single most common pain point brought up about SciPy was performance.
+
+SciPy heavily relies on NumPy (its only non-optional runtime dependency).
+NumPy provides an array implementation that's in-memory, CPU-only and
+single-threaded. Common performance-related wishes users have are:
+
+- parallel algorithms (can be multi-threaded or multiprocessing based)
+- support for distributed arrays (with Dask in particular)
+- support for GPUs and other hardware accelerators (shortened to just "GPU"
+ in the rest of this use case)
+
+Some parallelism can be supported in SciPy, it has a `workers` keyword
+(similar to scikit-learn's `n_jobs` keyword) that allows specifying to use
+parallelism in some algorithms. However SciPy itself will not directly start
+depending on a GPU or distributed array implementation, or contain (e.g.)
+CUDA code - that's not maintainable given the resources for development.
+_However_, there is a way to provide distributed or GPU support. Part of the
+solution is provided by NumPy's "array protocols" (see [gh-1](https://github.com/data-apis/array-api/issues/1)), that allow
+dispatching to other array implementations. The main problem then becomes how
+to know whether this will work with a particular distributed or GPU array
+implementation - given that there are zero other array implementations that
+are even close to providing full NumPy compatibility - without adding that
+array implementation as a dependency.
+
+It's clear that SciPy functionality that relies on compiled extensions (C,
+C++, Cython, Fortran) directly can't easily be run on another array library
+than NumPy (see [C API](design_topics/C_API.rst) for more details about this topic). Pure Python
+code can work though. There's two main possibilities:
+
+1. Testing with another package, manually or in CI, and simply provide a list
+ of functionality that is found to work. Then make ad-hoc fixes to expand
+ the set that works.
+2. Start relying on a well-defined subset of the NumPy API (or a new
+ NumPy-like API), for which compatibility is guaranteed.
+
+Option (2) seems strongly preferable, and that "well-defined subset" is _what
+an API standard should provide_. Testing will still be needed, to ensure there
+are no critical corner cases or bugs between array implementations, however
+that's then a very tractable task.
+
+As a concrete example, consider the spectral analysis functions in `scipy.signal`.
+All of those functions (e.g., `periodogram`, `spectrogram`, `csd`, `welch`, `stft`,
+`istft`) are pure Python - with the exception of `lombscargle` which is ~40
+lines of Cython - and uses NumPy function calls, array attributes and
+indexing. The beginning of each function could be changed to retrieve the
+module that implements the array API standard for the given input array type,
+and then functions from that module could be used instead of NumPy functions.
+
+If the user has another array type, say a CuPy or PyTorch array `x` on their
+GPU, doing:
+```
+from scipy import signal
+
+signal.welch(x)
+```
+will result in:
+```
+# For CuPy
+ValueError: object __array__ method not producing an array
+
+# For PyTorch
+TypeError: can't convert cuda:0 device type tensor to numpy.
+```
+and therefore the user will have to explicitly convert to and from a
+`numpy.ndarray` (which is quite inefficient):
+```
+# For CuPy
+x_np = cupy.asnumpy(x)
+freq, Pxx = (cupy.asarray(res) for res in signal.welch(x_np))
+
+# For PyTorch
+x_np = x.cpu().numpy()
+# Note: ends up with tensors on CPU, may still have to move them back
+freq, Pxx = (torch.tensor(res) for res in signal.welch(x_np))
+```
+This code will look a little different for each array library. The end goal
+here is to be able to write this instead as:
+```
+freq, Pxx = signal.welch(x)
+```
+and have `freq`, `Pxx` be arrays of the same type and on the same device as `x`.
+
+```{note}
+
+This type of use case applies to many other libraries, from scikit-learn
+and scikit-image to domain-specific libraries like AstroPy and
+scikit-bio, to code written for a single purpose or user.
+```
+
+(use-case-einops)=
+
+### Use case 2: simplify einops by removing the backend system
+
+[einops](https://github.com/arogozhnikov/einops) is a library that provides flexible tensor operations and supports many array libraries (NumPy, TensorFlow, PyTorch, CuPy, MXNet, JAX).
+Most of the code in `einops` is:
+
+- [einops.py](https://github.com/arogozhnikov/einops/blob/master/einops/einops.py)
+ contains the functions it offers as public API (`rearrange`, `reduce`, `repeat`).
+- [_backends.py](https://github.com/arogozhnikov/einops/blob/master/einops/_backends.py)
+ contains the glue code needed to support that many array libraries.
+
+The amount of code in each of those two files is almost the same (~550 LoC each).
+The typical pattern in `einops.py` is:
+```
+def some_func(x):
+ ...
+ backend = get_backend(x)
+ shape = backend.shape(x)
+ result = backend.reduce(x)
+ ...
+```
+With a standard array API, the `_backends.py` glue layer could almost completely disappear,
+because the purpose it serves (providing a unified interface to array operations from each
+of the supported backends) is already addressed by the array API standard.
+Hence the complete `einops` code base could be close to 50% smaller, and easier to maintain or add to.
+
+```{note}
+
+Other libraries that have a similar backend system to support many array libraries
+include [TensorLy](https://github.com/tensorly/tensorly), the (now discontinued)
+multi-backend version of [Keras](https://github.com/keras-team/keras),
+[Unumpy](https://github.com/Quansight-Labs/unumpy) and
+[EagerPy](https://github.com/jonasrauber/eagerpy). Many end users and
+organizations will also have such glue code - it tends to be needed whenever
+one tries to support multiple array types in a single API.
+```
+
+
+(use-case-xtensor)=
+
+### Use case 3: adding a Python API to xtensor
+
+[xtensor](https://github.com/xtensor-stack/xtensor) is a C++ array library
+that is NumPy-inspired and provides lazy arrays. It has Python (and Julia and R)
+bindings, however it does not have a Python array API.
+
+Xtensor aims to follow NumPy closely, however it only implements a subset of functionality
+and documents some API differences in
+[Notable differences with NumPy](https://xtensor.readthedocs.io/en/latest/numpy-differences.html).
+
+Note that other libraries document similar differences, see for example
+[this page for JAX](https://jax.readthedocs.io/en/latest/jax.numpy.html) and
+[this page for TensorFlow](https://www.tensorflow.org/guide/tf_numpy).
+
+Each time an array library author designs a new API, they have to choose (a)
+what subset of NumPy makes sense to implement, and (b) where to deviate
+because NumPy's API for a particular function is suboptimal or the semantics
+don't fit their execution model.
+
+This array API standard aims to provide an API that can be readily adopted,
+without having to make the above-mentioned choices.
+
+```{note}
+
+XND is another array library, written in C, that still needs a Python API.
+Array implementations in other languages are often in a similar situation,
+and could translate this array API standard 1:1 to their language.
+```
+
+
+(use-case-numba)=
+
+### Use case 4: make JIT compilation of array computations easier and more robust
+
+[Numba](https://github.com/numba/numba) is a Just-In-Time (JIT) compiler for
+numerical functions in Python; it is NumPy-aware. [PyPy](https://pypy.org)
+is an implementation of Python with a JIT at its core; its NumPy support relies
+on running NumPy itself through a compatibility layer (`cpyext`), while a
+previous attempt to implement NumPy support directly was unsuccessful.
+
+Other array libraries may have an internal JIT (e.g., TensorFlow, PyTorch,
+JAX, MXNet) or work with an external JIT like
+[XLA](https://www.tensorflow.org/xla) or [VTA](https://tvm.apache.org/docs/vta/index.html).
+
+Numba currently has to jump through some hoops to accommodate NumPy's casting rules
+and may not attain full compatibility with NumPy in some cases - see, e.g.,
+[this](https://github.com/numba/numba/issues/4749) or
+[this](https://github.com/numba/numba/issues/5907) example issue regarding (array) scalar
+return values.
+
+An [explicit suggestion from a Numba developer](https://twitter.com/esc___/status/1295389487485333505)
+for this array API standard was:
+
+> for JIT compilers (e.g. Numba) it will be important, that the type of the
+ returned value(s) depends only on the *types* of the input but not on the
+ *values*.
+
+A concrete goal for this use case is to have better matching between
+JIT-compiled and non-JIT execution. Here is an example from the Numba code
+base, the need for which should be avoided in the future:
+
+```
+def check(x, y):
+ got = cfunc(x, y)
+ np.testing.assert_array_almost_equal(got, pyfunc(x, y))
+ # Check the power operation conserved the input's dtype
+ # (this is different from Numpy, whose behaviour depends on
+ # the *values* of the arguments -- see PyArray_CanCastArrayTo).
+ self.assertEqual(got.dtype, x.dtype)
+```
diff --git a/spec/2023.12/verification_test_suite.md b/spec/2023.12/verification_test_suite.md
new file mode 100644
index 000000000..cbe770e48
--- /dev/null
+++ b/spec/2023.12/verification_test_suite.md
@@ -0,0 +1,62 @@
+# Verification - test suite
+
+## Measuring conformance
+
+In addition to the specification documents, a test suite is being developed to
+aid library developers check conformance to the spec. **NOTE: The test suite
+is still a work in progress.** It can be found at
+.
+
+It is important to note that while the aim of the array API test suite is to
+cover as much of the spec as possible, there are necessarily some aspects of
+the spec that are not covered by the test suite, typically because they are
+impossible to effectively test. Furthermore, if the test suite appears to
+diverge in any way from what the spec documents say, this should be considered
+a bug in the test suite. The specification is the ground source of truth.
+
+## Running the tests
+
+To run the tests, first clone the [test suite
+repo](https://github.com/data-apis/array-api-tests), and install the testing
+dependencies,
+
+ pip install pytest hypothesis
+
+or
+
+ conda install pytest hypothesis
+
+as well as the array libraries that you want to test. To run the tests, you
+need to specify the array library that is to be tested. There are two ways to
+do this. One way is to set the `ARRAY_API_TESTS_MODULE` environment variable.
+For example
+
+ ARRAY_API_TESTS_MODULE=numpy pytest
+
+Alternatively, edit the `array_api_tests/_array_module.py` file and change the
+line
+
+```py
+array_module = None
+```
+
+to
+
+```py
+import numpy as array_module
+```
+
+(replacing `numpy` with the array module namespace to be tested).
+
+In either case, the tests should be run with the `pytest` command.
+
+Aside from the two testing dependencies (`pytest` and `hypothesis`), the test
+suite has no dependencies. In particular, it does not depend on any specific
+array libraries such as NumPy. All tests are run using only the array library
+that is being tested, comparing results against the behavior as defined in the
+spec. The test suite is designed to be standalone so that it can easily be vendored.
+
+See the
+[README](https://github.com/data-apis/array-api-tests/blob/master/README.md)
+in the test suite repo for more information about how to run and interpret the
+test suite results.
diff --git a/spec/2024.12/API_specification/array_object.rst b/spec/2024.12/API_specification/array_object.rst
new file mode 100644
index 000000000..e3c7e8ae6
--- /dev/null
+++ b/spec/2024.12/API_specification/array_object.rst
@@ -0,0 +1,322 @@
+.. _array-object:
+
+Array object
+============
+
+ Array API specification for array object attributes and methods.
+
+A conforming implementation of the array API standard must provide and support an array object having the following attributes and methods.
+
+Furthermore, a conforming implementation of the array API standard must support, at minimum, array objects of rank (i.e., number of dimensions) ``0``, ``1``, ``2``, ``3``, and ``4`` and must explicitly document their maximum supported rank ``N``.
+
+.. note::
+ Conforming implementations must support zero-dimensional arrays.
+
+ Apart from array object attributes, such as ``ndim``, ``device``, and ``dtype``, all operations in this standard return arrays (or tuples of arrays), including those operations, such as ``mean``, ``var``, and ``std``, from which some common array libraries (e.g., NumPy) return scalar values.
+
+ *Rationale: always returning arrays is necessary to (1) support accelerator libraries where non-array return values could force device synchronization and (2) support delayed execution models where an array represents a future value.*
+
+-------------------------------------------------
+
+.. _operators:
+
+Operators
+---------
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python operators.
+
+Arithmetic Operators
+~~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python arithmetic operators.
+
+- ``+x``: :meth:`.array.__pos__`
+
+ - `operator.pos(x) `_
+ - `operator.__pos__(x) `_
+
+- `-x`: :meth:`.array.__neg__`
+
+ - `operator.neg(x) `_
+ - `operator.__neg__(x) `_
+
+- `x1 + x2`: :meth:`.array.__add__`
+
+ - `operator.add(x1, x2) `_
+ - `operator.__add__(x1, x2) `_
+
+- `x1 - x2`: :meth:`.array.__sub__`
+
+ - `operator.sub(x1, x2) `_
+ - `operator.__sub__(x1, x2) `_
+
+- `x1 * x2`: :meth:`.array.__mul__`
+
+ - `operator.mul(x1, x2) `_
+ - `operator.__mul__(x1, x2) `_
+
+- `x1 / x2`: :meth:`.array.__truediv__`
+
+ - `operator.truediv(x1,x2) `_
+ - `operator.__truediv__(x1, x2) `_
+
+- `x1 // x2`: :meth:`.array.__floordiv__`
+
+ - `operator.floordiv(x1, x2) `_
+ - `operator.__floordiv__(x1, x2) `_
+
+- `x1 % x2`: :meth:`.array.__mod__`
+
+ - `operator.mod(x1, x2) `_
+ - `operator.__mod__(x1, x2) `_
+
+- `x1 ** x2`: :meth:`.array.__pow__`
+
+ - `operator.pow(x1, x2) `_
+ - `operator.__pow__(x1, x2) `_
+
+Arithmetic operators should be defined for arrays having real-valued data types.
+
+Array Operators
+~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python array operators.
+
+- `x1 @ x2`: :meth:`.array.__matmul__`
+
+ - `operator.matmul(x1, x2) `_
+ - `operator.__matmul__(x1, x2) `_
+
+The matmul ``@`` operator should be defined for arrays having numeric data types.
+
+Bitwise Operators
+~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python bitwise operators.
+
+- `~x`: :meth:`.array.__invert__`
+
+ - `operator.inv(x) `_
+ - `operator.invert(x) `_
+ - `operator.__inv__(x) `_
+ - `operator.__invert__(x) `_
+
+- `x1 & x2`: :meth:`.array.__and__`
+
+ - `operator.and(x1, x2) `_
+ - `operator.__and__(x1, x2) `_
+
+- `x1 | x2`: :meth:`.array.__or__`
+
+ - `operator.or(x1, x2) `_
+ - `operator.__or__(x1, x2) `_
+
+- `x1 ^ x2`: :meth:`.array.__xor__`
+
+ - `operator.xor(x1, x2) `_
+ - `operator.__xor__(x1, x2) `_
+
+- `x1 << x2`: :meth:`.array.__lshift__`
+
+ - `operator.lshift(x1, x2) `_
+ - `operator.__lshift__(x1, x2) `_
+
+- `x1 >> x2`: :meth:`.array.__rshift__`
+
+ - `operator.rshift(x1, x2) `_
+ - `operator.__rshift__(x1, x2) `_
+
+Bitwise operators should be defined for arrays having integer and boolean data types.
+
+Comparison Operators
+~~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python comparison operators.
+
+- `x1 < x2`: :meth:`.array.__lt__`
+
+ - `operator.lt(x1, x2) `_
+ - `operator.__lt__(x1, x2) `_
+
+- `x1 <= x2`: :meth:`.array.__le__`
+
+ - `operator.le(x1, x2) `_
+ - `operator.__le__(x1, x2) `_
+
+- `x1 > x2`: :meth:`.array.__gt__`
+
+ - `operator.gt(x1, x2) `_
+ - `operator.__gt__(x1, x2) `_
+
+- `x1 >= x2`: :meth:`.array.__ge__`
+
+ - `operator.ge(x1, x2) `_
+ - `operator.__ge__(x1, x2) `_
+
+- `x1 == x2`: :meth:`.array.__eq__`
+
+ - `operator.eq(x1, x2) `_
+ - `operator.__eq__(x1, x2) `_
+
+- `x1 != x2`: :meth:`.array.__ne__`
+
+ - `operator.ne(x1, x2) `_
+ - `operator.__ne__(x1, x2) `_
+
+:meth:`.array.__lt__`, :meth:`.array.__le__`, :meth:`.array.__gt__`, :meth:`.array.__ge__` are only defined for arrays having real-valued data types. Other comparison operators should be defined for arrays having any data type.
+For backward compatibility, conforming implementations may support complex numbers; however, inequality comparison of complex numbers is unspecified and thus implementation-dependent (see :ref:`complex-number-ordering`).
+
+In-place Operators
+~~~~~~~~~~~~~~~~~~
+
+.. note::
+ In-place operations must be supported as discussed in :ref:`copyview-mutability`.
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following "in-place" Python operators.
+
+.. note::
+ This specification refers to the following operators as "in-place" as that is what these operators are called in `Python `. However, conforming array libraries which do not support array mutation may choose to not explicitly implement in-place Python operators. When a library does not implement a method corresponding to an in-place Python operator, Python falls back to the equivalent method for the corresponding binary arithmetic operation.
+
+An in-place operation must not change the data type or shape of the in-place array as a result of :ref:`type-promotion` or :ref:`broadcasting`.
+
+Let ``x1 += x2`` be a representative in-place operation. If, after applying type promotion (see :ref:`type-promotion`) to in-place operands ``x1`` and ``x2``, the resulting data type is equal to the data type of the array on the left-hand side of the operation (i.e., ``x1``), then an in-place operation must have the same behavior (including special cases) as the respective binary (i.e., two operand, non-assignment) operation. In this case, for the in-place addition ``x1 += x2``, the modified array ``x1`` must always equal the result of the equivalent binary arithmetic operation ``x1[...] = x1 + x2``.
+
+If, however, after applying type promotion (see :ref:`type-promotion`) to in-place operands, the resulting data type is not equal to the data type of the array on the left-hand side of the operation, then a conforming implementation may return results which differ from the respective binary operation due to casting behavior and selection of the operation's intermediate precision. The choice of casting behavior and intermediate precision is unspecified and thus implementation-defined.
+
+.. note::
+ Let ``x1`` be the operand on the left-hand side and ``x2`` be the operand on the right-hand side of an in-place operation. Consumers of the array API standard are advised of the following considerations when using in-place operations:
+
+ 1. In-place operations do not guarantee in-place mutation. A conforming library may or may not support in-place mutation.
+ 2. If, after applying broadcasting (see :ref:`broadcasting`) to in-place operands, the resulting shape is not equal to the shape of ``x1``, in-place operators may raise an exception.
+ 3. If, after applying type promotion (see :ref:`type-promotion`) to in-place operands, the resulting data type is not equal to the data type of ``x1``, the resulting data type may not equal the data type of ``x1`` and the operation's intermediate precision may be that of ``x1``, even if the promoted data type between ``x1`` and ``x2`` would have higher precision.
+
+ In general, for in-place operations, consumers of the array API standard are advised to ensure operands have the same data type and broadcast to the shape of the operand on the left-hand side of the operation in order to maximize portability.
+
+Arithmetic Operators
+""""""""""""""""""""
+
+- ``+=``. May be implemented via ``__iadd__``.
+- ``-=``. May be implemented via ``__isub__``.
+- ``*=``. May be implemented via ``__imul__``.
+- ``/=``. May be implemented via ``__itruediv__``.
+- ``//=``. May be implemented via ``__ifloordiv__``.
+- ``**=``. May be implemented via ``__ipow__``.
+- ``%=``. May be implemented via ``__imod__``.
+
+Array Operators
+"""""""""""""""
+
+- ``@=``. May be implemented via ``__imatmul__``.
+
+Bitwise Operators
+"""""""""""""""""
+
+- ``&=``. May be implemented via ``__iand__``.
+- ``|=``. May be implemented via ``__ior__``.
+- ``^=``. May be implemented via ``__ixor__``.
+- ``<<=``. May be implemented via ``__ilshift__``.
+- ``>>=``. May be implemented via ``__irshift__``.
+
+Reflected Operators
+~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following reflected operators.
+
+The results of applying reflected operators must match their non-reflected equivalents.
+
+.. note::
+ All operators for which ``array scalar`` is implemented must have an equivalent reflected operator implementation.
+
+Arithmetic Operators
+""""""""""""""""""""
+
+- ``__radd__``
+- ``__rsub__``
+- ``__rmul__``
+- ``__rtruediv__``
+- ``__rfloordiv__``
+- ``__rpow__``
+- ``__rmod__``
+
+Array Operators
+"""""""""""""""
+
+- ``__rmatmul__``
+
+Bitwise Operators
+"""""""""""""""""
+
+- ``__rand__``
+- ``__ror__``
+- ``__rxor__``
+- ``__rlshift__``
+- ``__rrshift__``
+
+-------------------------------------------------
+
+.. currentmodule:: array_api
+
+Attributes
+----------
+..
+ NOTE: please keep the attributes in alphabetical order
+
+
+.. autosummary::
+ :toctree: generated
+ :template: property.rst
+
+ array.dtype
+ array.device
+ array.mT
+ array.ndim
+ array.shape
+ array.size
+ array.T
+
+-------------------------------------------------
+
+Methods
+-------
+..
+ NOTE: please keep the methods in alphabetical order
+
+
+.. autosummary::
+ :toctree: generated
+ :template: property.rst
+
+ array.__abs__
+ array.__add__
+ array.__and__
+ array.__array_namespace__
+ array.__bool__
+ array.__complex__
+ array.__dlpack__
+ array.__dlpack_device__
+ array.__eq__
+ array.__float__
+ array.__floordiv__
+ array.__ge__
+ array.__getitem__
+ array.__gt__
+ array.__index__
+ array.__int__
+ array.__invert__
+ array.__le__
+ array.__lshift__
+ array.__lt__
+ array.__matmul__
+ array.__mod__
+ array.__mul__
+ array.__ne__
+ array.__neg__
+ array.__or__
+ array.__pos__
+ array.__pow__
+ array.__rshift__
+ array.__setitem__
+ array.__sub__
+ array.__truediv__
+ array.__xor__
+ array.to_device
diff --git a/spec/2024.12/API_specification/broadcasting.rst b/spec/2024.12/API_specification/broadcasting.rst
new file mode 100644
index 000000000..abb3ed222
--- /dev/null
+++ b/spec/2024.12/API_specification/broadcasting.rst
@@ -0,0 +1,128 @@
+.. _broadcasting:
+
+Broadcasting
+============
+
+ Array API specification for broadcasting semantics.
+
+Overview
+--------
+
+**Broadcasting** refers to the automatic (implicit) expansion of array dimensions to be of equal sizes without copying array data for the purpose of making arrays with different shapes have compatible shapes for element-wise operations.
+
+Broadcasting facilitates user ergonomics by encouraging users to avoid unnecessary copying of array data and can **potentially** enable more memory-efficient element-wise operations through vectorization, reduced memory consumption, and cache locality.
+
+Algorithm
+---------
+
+Given an element-wise operation involving two compatible arrays, an array having a singleton dimension (i.e., a dimension whose size is one) is broadcast (i.e., virtually repeated) across an array having a corresponding non-singleton dimension.
+
+If two arrays are of unequal rank, the array having a lower rank is promoted to a higher rank by (virtually) prepending singleton dimensions until the number of dimensions matches that of the array having a higher rank.
+
+The results of the element-wise operation must be stored in an array having a shape determined by the following algorithm.
+
+#. Let ``A`` and ``B`` both be arrays.
+
+#. Let ``shape1`` be a tuple describing the shape of array ``A``.
+
+#. Let ``shape2`` be a tuple describing the shape of array ``B``.
+
+#. Let ``N1`` be the number of dimensions of array ``A`` (i.e., the result of ``len(shape1)``).
+
+#. Let ``N2`` be the number of dimensions of array ``B`` (i.e., the result of ``len(shape2)``).
+
+#. Let ``N`` be the maximum value of ``N1`` and ``N2`` (i.e., the result of ``max(N1, N2)``).
+
+#. Let ``shape`` be a temporary list of length ``N`` for storing the shape of the result array.
+
+#. Let ``i`` be ``N-1``.
+
+#. Repeat, while ``i >= 0``
+
+ #. Let ``n1`` be ``N1 - N + i``.
+
+ #. If ``n1 >= 0``, let ``d1`` be the size of dimension ``n1`` for array ``A`` (i.e., the result of ``shape1[n1]``); else, let ``d1`` be ``1``.
+
+ #. Let ``n2`` be ``N2 - N + i``.
+
+ #. If ``n2 >= 0``, let ``d2`` be the size of dimension ``n2`` for array ``B`` (i.e., the result of ``shape2[n2]``); else, let ``d2`` be ``1``.
+
+ #. If ``d1 == 1``, then set the ``i``\th element of ``shape`` to ``d2``.
+
+ #. Else, if ``d2 == 1``, then
+
+ - set the ``i``\th element of ``shape`` to ``d1``.
+
+ #. Else, if ``d1 == d2``, then
+
+ - set the ``i``\th element of ``shape`` to ``d1``.
+
+ #. Else, throw an exception.
+
+ #. Set ``i`` to ``i-1``.
+
+#. Let ``tuple(shape)`` be the shape of the result array.
+
+Examples
+~~~~~~~~
+
+The following examples demonstrate the application of the broadcasting algorithm for two compatible arrays.
+
+::
+
+ A (4d array): 8 x 1 x 6 x 1
+ B (3d array): 7 x 1 x 5
+ ---------------------------------
+ Result (4d array): 8 x 7 x 6 x 5
+ A (2d array): 5 x 4
+ B (1d array): 1
+ -------------------------
+ Result (2d array): 5 x 4
+ A (2d array): 5 x 4
+ B (1d array): 4
+ -------------------------
+ Result (2d array): 5 x 4
+ A (3d array): 15 x 3 x 5
+ B (3d array): 15 x 1 x 5
+ ------------------------------
+ Result (3d array): 15 x 3 x 5
+ A (3d array): 15 x 3 x 5
+ B (2d array): 3 x 5
+ ------------------------------
+ Result (3d array): 15 x 3 x 5
+ A (3d array): 15 x 3 x 5
+ B (2d array): 3 x 1
+ ------------------------------
+ Result (3d array): 15 x 3 x 5
+
+
+The following examples demonstrate array shapes which do **not** broadcast.
+
+::
+
+ A (1d array): 3
+ B (1d array): 4 # dimension does not match
+
+ A (2d array): 2 x 1
+ B (3d array): 8 x 4 x 3 # second dimension does not match
+
+ A (3d array): 15 x 3 x 5
+ B (2d array): 15 x 3 # singleton dimensions can only be prepended, not appended
+
+In-place Semantics
+------------------
+
+As implied by the broadcasting algorithm, in-place element-wise operations (including ``__setitem__``) must not change the shape of the in-place array as a result of broadcasting. Such operations should only be supported in the case where the right-hand operand can broadcast to the shape of the left-hand operand, after any indexing operations are performed.
+
+For example:
+
+::
+
+ x = empty((2, 3, 4))
+ a = empty((1, 3, 4))
+
+ # This is OK. The shape of a, (1, 3, 4), can broadcast to the shape of x[...], (2, 3, 4)
+ x[...] = a
+
+ # This is not allowed. The shape of a, (1, 3, 4), can NOT broadcast to the shape of x[1, ...], (3, 4)
+ x[1, ...] = a
diff --git a/spec/2024.12/API_specification/constants.rst b/spec/2024.12/API_specification/constants.rst
new file mode 100644
index 000000000..71cb8688d
--- /dev/null
+++ b/spec/2024.12/API_specification/constants.rst
@@ -0,0 +1,26 @@
+Constants
+=========
+
+ Array API specification for constants.
+
+A conforming implementation of the array API standard must provide and support the following constants adhering to the following conventions.
+
+- Each constant must have a Python floating-point data type (i.e., ``float``) and be provided as a Python scalar value.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api.constants
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: attribute.rst
+
+ e
+ inf
+ nan
+ newaxis
+ pi
diff --git a/spec/2024.12/API_specification/creation_functions.rst b/spec/2024.12/API_specification/creation_functions.rst
new file mode 100644
index 000000000..ff5c06368
--- /dev/null
+++ b/spec/2024.12/API_specification/creation_functions.rst
@@ -0,0 +1,36 @@
+Creation Functions
+==================
+
+ Array API specification for creating arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ arange
+ asarray
+ empty
+ empty_like
+ eye
+ from_dlpack
+ full
+ full_like
+ linspace
+ meshgrid
+ ones
+ ones_like
+ tril
+ triu
+ zeros
+ zeros_like
diff --git a/spec/2024.12/API_specification/data_type_functions.rst b/spec/2024.12/API_specification/data_type_functions.rst
new file mode 100644
index 000000000..d42968c7b
--- /dev/null
+++ b/spec/2024.12/API_specification/data_type_functions.rst
@@ -0,0 +1,26 @@
+Data Type Functions
+===================
+
+ Array API specification for data type functions.
+
+A conforming implementation of the array API standard must provide and support the following data type functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ astype
+ can_cast
+ finfo
+ iinfo
+ isdtype
+ result_type
diff --git a/spec/2024.12/API_specification/data_types.rst b/spec/2024.12/API_specification/data_types.rst
new file mode 100644
index 000000000..5987dd322
--- /dev/null
+++ b/spec/2024.12/API_specification/data_types.rst
@@ -0,0 +1,143 @@
+.. _data-types:
+
+Data Types
+==========
+
+ Array API specification for supported data types.
+
+A conforming implementation of the array API standard must provide and support
+the following data types ("dtypes") in its array object, and as data type
+objects in its main namespace under the specified names:
+
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| dtype object | description |
++==============+============================================================================================================================================================================================+
+| bool | Boolean (``True`` or ``False``). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| int8 | An 8-bit signed integer whose values exist on the interval ``[-128, +127]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| int16 | A 16-bit signed integer whose values exist on the interval ``[β32,767, +32,767]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| int32 | A 32-bit signed integer whose values exist on the interval ``[β2,147,483,647, +2,147,483,647]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| int64 | A 64-bit signed integer whose values exist on the interval ``[β9,223,372,036,854,775,807, +9,223,372,036,854,775,807]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| uint8 | An 8-bit unsigned integer whose values exist on the interval ``[0, +255]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| uint16 | A 16-bit unsigned integer whose values exist on the interval ``[0, +65,535]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| uint32 | A 32-bit unsigned integer whose values exist on the interval ``[0, +4,294,967,295]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| uint64 | A 64-bit unsigned integer whose values exist on the interval ``[0, +18,446,744,073,709,551,615]``. |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| float32 | IEEE 754 single-precision (32-bit) binary floating-point number (see IEEE 754-2019). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| float64 | IEEE 754 double-precision (64-bit) binary floating-point number (see IEEE 754-2019). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| complex64 | Single-precision (64-bit) complex floating-point number whose real and imaginary components must be IEEE 754 single-precision (32-bit) binary floating-point numbers (see IEEE 754-2019). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| complex128 | Double-precision (128-bit) complex floating-point number whose real and imaginary components must be IEEE 754 double-precision (64-bit) binary floating-point numbers (see IEEE 754-2019). |
++--------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+Data type objects must have the following methods (no attributes are required):
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. currentmodule:: array_api.data_types
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ __eq__
+
+
+.. note::
+ A conforming implementation of the array API standard may provide and
+ support additional data types beyond those described in this specification.
+ It may also support additional methods and attributes on dtype objects.
+
+.. note::
+ IEEE 754-2019 requires support for subnormal (a.k.a., denormal) numbers, which are useful for supporting gradual underflow. However, hardware support for subnormal numbers is not universal, and many platforms (e.g., accelerators) and compilers support toggling denormals-are-zero (DAZ) and/or flush-to-zero (FTZ) behavior to increase performance and to guard against timing attacks.
+
+ Accordingly, subnormal behavior is left unspecified and, thus, implementation-defined. Conforming implementations may vary in their support for subnormal numbers.
+
+
+Use of data type objects
+------------------------
+
+Data type objects are used as ``dtype`` specifiers in functions and methods
+(e.g., ``zeros((2, 3), dtype=float32)``), accessible as ``.dtype`` attribute on
+arrays, and used in various casting and introspection functions (e.g.,
+``isdtype(x.dtype, 'integral')``).
+
+``dtype`` keywords in functions specify the data type of arrays returned from
+functions or methods. ``dtype`` keywords are not required to affect the data
+type used for intermediate calculations or results (e.g., implementors are free
+to use a higher-precision data type when accumulating values for reductions, as
+long as the returned array has the specified data type).
+
+.. note::
+ Implementations may provide other ways to specify data types (e.g., ``zeros((2, 3), dtype='f4')``) which are not described in this specification; however, in order to ensure portability, array library consumers are recommended to use data type objects as provided by specification conforming array libraries.
+
+See :ref:`type-promotion` for specification guidance describing the rules governing the interaction of two or more data types or data type objects.
+
+
+.. _data-type-defaults:
+
+Default Data Types
+------------------
+
+A conforming implementation of the array API standard must define the following default data types.
+
+- a default real-valued floating-point data type (either ``float32`` or ``float64``).
+- a default complex floating-point data type (either ``complex64`` or ``complex128``).
+- a default integer data type (either ``int32`` or ``int64``).
+- a default array index data type (either ``int32`` or ``int64``).
+
+The default real-valued floating-point and complex floating-point data types must be the same across platforms.
+
+The default complex floating-point point data type should match the default real-valued floating-point data type. For example, if the default real-valued floating-point data type is ``float32``, the default complex floating-point data type must be ``complex64``. If the default real-valued floating-point data type is ``float64``, the default complex floating-point data type must be ``complex128``.
+
+The default integer data type should be the same across platforms, but the default may vary depending on whether Python is 32-bit or 64-bit.
+
+The default array index data type may be ``int32`` on 32-bit platforms, but the default should be ``int64`` otherwise.
+
+Note that it is possible that a library supports multiple devices, with not all
+those device types supporting the same data types. In this case, the default
+integer or floating-point data types may vary with device. If that is the case,
+the library should clearly warn about this in its documentation.
+
+.. note::
+ The default data types should be clearly defined in a conforming library's documentation.
+
+
+.. _data-type-categories:
+
+Data Type Categories
+--------------------
+
+For the purpose of organizing functions within this specification, the following data type categories are defined.
+
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| data type category | dtypes |
++============================+========================================================================================================================================================+
+| Numeric | ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``uint16``, ``uint32``, ``uint64``, ``float32``, ``float64``, ``complex64``, and ``complex128``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Real-valued | ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``uint16``, ``uint32``, ``uint64``, ``float32``, and ``float64``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Integer | ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``uint16``, ``uint32``, and ``uint64``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Floating-point | ``float32``, ``float64``, ``complex64``, and ``complex128``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Real-valued floating-point | ``float32`` and ``float64``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Complex floating-point | ``complex64`` and ``complex128``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Boolean | ``bool``. |
++----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+
+.. note::
+ Conforming libraries are not required to organize data types according to these categories. These categories are only intended for use within this specification.
diff --git a/spec/2024.12/API_specification/elementwise_functions.rst b/spec/2024.12/API_specification/elementwise_functions.rst
new file mode 100644
index 000000000..9758c68db
--- /dev/null
+++ b/spec/2024.12/API_specification/elementwise_functions.rst
@@ -0,0 +1,86 @@
+.. _element-wise-functions:
+
+Element-wise Functions
+======================
+
+ Array API specification for element-wise functions.
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ abs
+ acos
+ acosh
+ add
+ asin
+ asinh
+ atan
+ atan2
+ atanh
+ bitwise_and
+ bitwise_left_shift
+ bitwise_invert
+ bitwise_or
+ bitwise_right_shift
+ bitwise_xor
+ ceil
+ clip
+ conj
+ copysign
+ cos
+ cosh
+ divide
+ equal
+ exp
+ expm1
+ floor
+ floor_divide
+ greater
+ greater_equal
+ hypot
+ imag
+ isfinite
+ isinf
+ isnan
+ less
+ less_equal
+ log
+ log1p
+ log2
+ log10
+ logaddexp
+ logical_and
+ logical_not
+ logical_or
+ logical_xor
+ maximum
+ minimum
+ multiply
+ negative
+ nextafter
+ not_equal
+ positive
+ pow
+ real
+ reciprocal
+ remainder
+ round
+ sign
+ signbit
+ sin
+ sinh
+ square
+ sqrt
+ subtract
+ tan
+ tanh
+ trunc
diff --git a/spec/2024.12/API_specification/function_and_method_signatures.rst b/spec/2024.12/API_specification/function_and_method_signatures.rst
new file mode 100644
index 000000000..0eca2ac69
--- /dev/null
+++ b/spec/2024.12/API_specification/function_and_method_signatures.rst
@@ -0,0 +1,63 @@
+.. _function-and-method-signatures:
+
+Function and method signatures
+==============================
+
+Function signatures in this standard adhere to the following:
+
+1. Positional parameters should be `positional-only `_ parameters.
+ Positional-only parameters have no externally-usable name. When a function
+ accepting positional-only parameters is called, positional arguments are
+ mapped to these parameters based solely on their order.
+
+ *Rationale: existing libraries have incompatible conventions, and using names
+ of positional parameters is not normal/recommended practice.*
+
+ .. note::
+
+ Positional-only parameters are only available in Python >= 3.8. Libraries
+ still supporting 3.7 or 3.6 may consider making the API standard-compliant
+ namespace >= 3.8. Alternatively, they can add guidance to their users in the
+ documentation to use the functions as if they were positional-only.
+
+2. Optional parameters should be `keyword-only `_ arguments.
+
+ *Rationale: this leads to more readable code, and it makes it easier to
+ evolve an API over time by adding keywords without having to worry about
+ keyword order.*
+
+3. For functions that have a single positional array parameter, that parameter
+ is called ``x``. For functions that have multiple array parameters, those
+ parameters are called ``xi`` with ``i = 1, 2, ...`` (i.e., ``x1``, ``x2``).
+
+4. Signatures include type annotations. The type annotations are also added to
+ individual parameter and return value descriptions. For code which aims to
+ adhere to the standard, adding type annotations is strongly recommended.
+
+A function signature and description will look like:
+
+::
+
+ funcname(x1, x2, /, *, key1=-1, key2=None) -> out:
+ Parameters
+
+ x1 : array
+ description
+ x2 : array
+ description
+ key1 : int
+ description
+ key2 : Optional[str]
+ description
+
+ Returns
+
+ out : array
+ description
+
+
+Method signatures will follow the same conventions modulo the addition of ``self``.
+
+Note that there are a few exceptions to rules (1) and (2), in cases where
+it enhances readability or use of the non-default form of the parameter in
+question is commonly used in code written for existing array libraries.
diff --git a/spec/2024.12/API_specification/index.rst b/spec/2024.12/API_specification/index.rst
new file mode 100644
index 000000000..ffc3d3775
--- /dev/null
+++ b/spec/2024.12/API_specification/index.rst
@@ -0,0 +1,41 @@
+.. _api-specification:
+
+API specification
+=================
+
+A conforming implementation of the array API standard must provide and support the APIs and behavior detailed in this specification while adhering to the following conventions.
+
+- When a function signature includes a `/`, positional parameters must be `positional-only `_ parameters. See :ref:`function-and-method-signatures`.
+- When a function signature includes a `*`, optional parameters must be `keyword-only `_ arguments. See :ref:`function-and-method-signatures`.
+- Broadcasting semantics must follow the semantics defined in :ref:`broadcasting`.
+- Unless stated otherwise, functions must support the data types defined in :ref:`data-types`.
+- Functions may only be required for a subset of input data types. Libraries may choose to implement functions for additional data types, but that behavior is not required by the specification. See :ref:`data-type-categories`.
+- Unless stated otherwise, functions must adhere to the type promotion rules defined in :ref:`type-promotion`.
+- Unless stated otherwise, floating-point operations must adhere to IEEE 754-2019.
+- Unless stated otherwise, element-wise mathematical functions must satisfy the minimum accuracy requirements defined in :ref:`accuracy`.
+
+
+.. toctree::
+ :caption: API specification
+ :maxdepth: 3
+
+ array_object
+ broadcasting
+ constants
+ creation_functions
+ data_type_functions
+ data_types
+ elementwise_functions
+ function_and_method_signatures
+ indexing
+ indexing_functions
+ inspection
+ linear_algebra_functions
+ manipulation_functions
+ searching_functions
+ set_functions
+ sorting_functions
+ statistical_functions
+ type_promotion
+ utility_functions
+ version
diff --git a/spec/2024.12/API_specification/indexing.rst b/spec/2024.12/API_specification/indexing.rst
new file mode 100644
index 000000000..058003c51
--- /dev/null
+++ b/spec/2024.12/API_specification/indexing.rst
@@ -0,0 +1,253 @@
+.. _indexing:
+
+Indexing
+========
+
+ Array API specification for indexing arrays.
+
+A conforming implementation of the array API standard must adhere to the following conventions.
+
+
+.. _indexing-single-axis:
+
+Single-axis Indexing
+--------------------
+
+To index a single array axis, an array must support standard Python indexing rules. Let ``n`` be the axis (dimension) size.
+
+- An integer index must be an object satisfying `operator.index `_ (e.g., ``int``).
+
+- Nonnegative indices must start at ``0`` (i.e., zero-based indexing).
+
+- **Valid** nonnegative indices must reside on the half-open interval ``[0, n)``.
+
+ .. note::
+ This specification does not require bounds checking. The behavior for out-of-bounds integer indices is left unspecified.
+
+- Negative indices must count backward from the last array index, starting from ``-1`` (i.e., negative-one-based indexing, where ``-1`` refers to the last array index).
+
+ .. note::
+ A negative index ``j`` is equivalent to ``n-j``; the former is syntactic sugar for the latter, providing a shorthand for indexing elements that would otherwise need to be specified in terms of the axis (dimension) size.
+
+- **Valid** negative indices must reside on the closed interval ``[-n, -1]``.
+
+ .. note::
+ This specification does not require bounds checking. The behavior for out-of-bounds integer indices is left unspecified.
+
+- A negative index ``j`` is related to a zero-based nonnegative index ``i`` via ``i = n+j``.
+
+- Colons ``:`` must be used for `slices `_: ``start:stop:step``, where ``start`` is inclusive and ``stop`` is exclusive.
+
+ .. note::
+ The specification does not support returning scalar (i.e., non-array) values from operations, including indexing. In contrast to standard Python indexing rules, for any index, or combination of indices, which select a single value, the result must be a zero-dimensional array containing the selected value.
+
+Slice Syntax
+~~~~~~~~~~~~
+
+The basic slice syntax is ``i:j:k`` where ``i`` is the starting index, ``j`` is the stopping index, and ``k`` is the step (``k != 0``). A slice may contain either one or two colons, with either an integer value or nothing on either side of each colon. The following are valid slices.
+
+::
+
+ A[:]
+ A[i:]
+ A[:j]
+ A[i:k]
+ A[::]
+ A[i::]
+ A[:j:]
+ A[::k]
+ A[i:j:]
+ A[i::k]
+ A[:j:k]
+ A[i::k]
+ A[i:j:k]
+
+.. note::
+ Slice syntax can be equivalently achieved using the Python built-in `slice() `_ API. From the perspective of ``A``, the behavior of ``A[i:j:k]`` and ``A[slice(i, j, k)]`` is indistinguishable (i.e., both retrieve the same set of items from ``__getitem__``).
+
+Using a slice to index a single array axis must select ``m`` elements with index values
+
+::
+
+ i, i+k, i+2k, i+3k, ..., i+(m-1)k
+
+where
+
+::
+
+ m = q + r
+
+and ``q`` and ``r`` (``r != 0``) are the quotient and remainder obtained by dividing ``j-i`` by ``k``
+
+::
+
+ j - i = qk + r
+
+such that
+
+::
+
+ j > i + (m-1)k
+
+.. note::
+ For ``i`` on the interval ``[0, n)`` (where ``n`` is the axis size), ``j`` on the interval ``(0, n]``, ``i`` less than ``j``, and positive step ``k``, a starting index ``i`` is **always** included, while the stopping index ``j`` is **always** excluded. This preserves ``x[:i]+x[i:]`` always being equal to ``x``.
+
+.. note::
+ Using a slice to index into a single array axis should select the same elements as using a slice to index a Python list of the same size.
+
+Slice syntax must have the following defaults. Let ``n`` be the axis (dimension) size.
+
+- If ``k`` is not provided (e.g., ``0:10``), ``k`` must equal ``1``.
+- If ``k`` is greater than ``0`` and ``i`` is not provided (e.g., ``:10:2``), ``i`` must equal ``0``.
+- If ``k`` is greater than ``0`` and ``j`` is not provided (e.g., ``0::2``), ``j`` must equal ``n``.
+- If ``k`` is less than ``0`` and ``i`` is not provided (e.g., ``:10:-2``), ``i`` must equal ``n-1``.
+- If ``k`` is less than ``0`` and ``j`` is not provided (e.g., ``0::-2``), ``j`` must equal ``-n-1``.
+
+Using a slice to index a single array axis must adhere to the following rules. Let ``n`` be the axis (dimension) size.
+
+- If ``i`` equals ``j``, a slice must return an empty array, whose axis (dimension) size along the indexed axis is ``0``.
+
+- Indexing via ``:`` and ``::`` must be equivalent and have defaults derived from the rules above. Both ``:`` and ``::`` indicate to select all elements along a single axis (dimension).
+
+ .. note::
+ This specification does not require "clipping" out-of-bounds slice indices. This is in contrast to Python slice semantics where ``0:100`` and ``0:10`` are equivalent on a list of length ``10``.
+
+The following ranges for the start and stop values of a slice must be supported. Let ``n`` be the axis (dimension) size being sliced. For a slice ``i:j:k``, the behavior specified above should be implemented for the following:
+
+- ``i`` or ``j`` omitted (``None``).
+- ``-n <= i <= n``.
+- For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
+- For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
+
+The behavior outside of these bounds is unspecified.
+
+.. note::
+ *Rationale: this is consistent with bounds checking for integer indexing; the behavior of out-of-bounds indices is left unspecified. Implementations may choose to clip (consistent with Python* ``list`` *slicing semantics), raise an exception, return junk values, or some other behavior depending on device requirements and performance considerations.*
+
+
+.. _indexing-multi-axis:
+
+Multi-axis Indexing
+-------------------
+
+Multi-dimensional arrays must extend the concept of single-axis indexing to multiple axes by applying single-axis indexing rules along each axis (dimension) and supporting the following additional rules. Let ``N`` be the number of dimensions ("rank") of a multi-dimensional array ``A``.
+
+- Each axis may be independently indexed via single-axis indexing by providing a comma-separated sequence ("selection tuple") of single-axis indexing expressions (e.g., ``A[:, 2:10, :, 5]``).
+
+ .. note::
+ In Python, ``A[(exp1, exp2, ..., expN)]`` is equivalent to ``A[exp1, exp2, ..., expN]``; the latter is syntactic sugar for the former.
+
+ Accordingly, if ``A`` has rank ``1``, then ``A[(2:10,)]`` must be equivalent to ``A[2:10]``. If ``A`` has rank ``2``, then ``A[(2:10, :)]`` must be equivalent to ``A[2:10, :]``. And so on and so forth.
+
+- Providing a single nonnegative integer ``i`` as a single-axis index must index the same elements as the slice ``i:i+1``.
+
+- Providing a single negative integer ``i`` as a single-axis index must index the same elements as the slice ``n+i:n+i+1``, where ``n`` is the axis (dimension) size.
+
+- Providing a single integer as a single-axis index must reduce the number of array dimensions by ``1`` (i.e., the array rank must decrease by one; if ``A`` has rank ``2``, ``rank(A)-1 == rank(A[0, :])``). In particular, a selection tuple with the ``m``\th element an integer (and all other entries ``:``) indexes a sub-array with rank ``N-1``.
+
+ .. note::
+ When providing a single integer as a single-axis index to an array of rank ``1``, the result should be an array of rank ``0``, not a NumPy scalar. Note that this behavior differs from NumPy.
+
+- Providing a slice must retain array dimensions (i.e., the array rank must remain the same; ``rank(A) == rank(A[:])``).
+
+- Providing `ellipsis `_ must apply ``:`` to each dimension necessary to index all dimensions (e.g., if ``A`` has rank ``4``, ``A[1:, ..., 2:5] == A[1:, :, :, 2:5]``). Only a single ellipsis must be allowed. An ``IndexError`` exception must be raised if more than one ellipsis is provided.
+
+- Providing an empty tuple or an ellipsis to an array of rank ``0`` must result in an array of the same rank (i.e., if ``A`` has rank ``0``, ``A == A[()]`` and ``A == A[...]``).
+
+ .. note::
+ This behavior differs from NumPy where providing an empty tuple to an array of rank ``0`` returns a NumPy scalar.
+
+- Each ``None`` in the selection tuple must expand the dimensions of the resulting selection by one dimension of size ``1``. The position of the added dimension must be the same as the position of ``None`` in the selection tuple.
+
+ .. note::
+ Expanding dimensions can be equivalently achieved via repeated invocation of :func:`~array_api.expand_dims`.
+
+ .. note::
+ The constant ``newaxis`` is an alias of ``None`` and can thus be used in a similar manner as ``None``.
+
+- Except in the case of providing a single ellipsis (e.g., ``A[2:10, ...]`` or ``A[1:, ..., 2:5]``), the number of provided single-axis indexing expressions (excluding ``None``) should equal ``N``. For example, if ``A`` has rank ``2``, a single-axis indexing expression should be explicitly provided for both axes (e.g., ``A[2:10, :]``). An ``IndexError`` exception should be raised if the number of provided single-axis indexing expressions (excluding ``None``) is less than ``N``.
+
+ .. note::
+ Some libraries, such as SymPy, support flat indexing (i.e., providing a single-axis indexing expression to a higher-dimensional array). That practice is not supported here.
+
+ To perform flat indexing, use ``reshape(x, (-1,))[integer]``.
+
+- An ``IndexError`` exception must be raised if the number of provided single-axis indexing expressions (excluding ``None``) is greater than ``N``.
+
+ .. note::
+ This specification leaves unspecified the behavior of providing a slice which attempts to select elements along a particular axis, but whose starting index is out-of-bounds.
+
+ *Rationale: this is consistent with bounds-checking for single-axis indexing. An implementation may choose to set the axis (dimension) size of the result array to* ``0`` *, raise an exception, return junk values, or some other behavior depending on device requirements and performance considerations.*
+
+Integer Array Indexing
+----------------------
+
+.. note::
+ Integer array indexing, as described in this specification, is a reduced subset of "vectorized indexing" semantics, as implemented in libraries such as NumPy. In vectorized indexing, integers and integer arrays are broadcasted to integer arrays having a common shape before being "zipped" together to form a list of index coordinates. This form of indexing diverges from the multi-axis indexing semantics described above (see :ref:`indexing-multi-axis`) where each element of an indexing tuple comprised of integers and slices independently indexes a particular axis. This latter form of indexing is commonly referred to as "orthogonal indexing" and is the default form of indexing outside of Python in languages such as Julia and MATLAB.
+
+An array must support indexing by an indexing tuple which contains only integers and integer arrays according to the following rules. Let ``A`` be an ``N``-dimensional array with shape ``S1``. Let ``T`` be a tuple ``(t1, t2, ..., tN)`` having length ``N``. Let ``tk`` be an individual element of ``T``.
+
+.. note::
+ This specification does not currently address indexing tuples which combine slices and integer arrays. Behavior for such indexing tuples is left unspecified and thus implementation-defined. This may be revisited in a future revision of this standard.
+
+.. note::
+ This specification does not currently address indexing tuples which include array-like elements, such as Python lists, tuples, and other sequences. Behavior when indexing an array using array-like elements is left unspecified and thus implementation-defined.
+
+- If ``tk`` is an integer array, ``tk`` should have the default array index data type (see :ref:`data-type-defaults`).
+
+.. note::
+ Conforming implementations of this standard may support integer arrays having other integer data types; however, consumers of this standard should be aware that integer arrays having uncommon array index data types such as ``int8`` and ``uint8`` may not be widely supported as index arrays across conforming array libraries. To dynamically resolve the default array index data type, including for that of the current device context, use the inspection API ``default_dtypes()``.
+
+- Providing a zero-dimensional integer array ``tk`` containing an integer index must be equivalent to providing an integer index having the value ``int(tk)``. Conversely, each integer index ``tk`` must be equivalent to a zero-dimensional integer array containing the same value and be treated as such, including shape inference and broadcasting. Accordingly, if ``T`` consists of only integers and zero-dimensional integer arrays, the result must be equivalent to indexing multiple axes using integer indices. For example, if ``A`` is a two-dimensional array, ``T`` is the tuple ``(i, J)``, ``i`` is a valid integer index, and ``J`` is a zero-dimensional array containing a valid integer index ``j``, the result of ``A[T]`` must be equivalent to ``A[(i,j)]`` (see :ref:`indexing-multi-axis`).
+
+- If ``tk`` is an integer array, each element in ``tk`` must independently satisfy the rules stated above for indexing a single-axis with an integer index (see :ref:`indexing-single-axis`).
+
+ .. note::
+ This specification does not require bounds checking. The behavior for out-of-bounds integer indices is left unspecified.
+
+- If ``tk`` is an integer array containing duplicate valid integer indices, the result must include the corresponding elements of ``A`` with the same duplication.
+
+ ..
+ TODO: once setitem semantics are determined, insert the following note: Given the assignment operation ``x[T] = y[...]``, if ``T`` contains an integer array having duplicate indices, the order in which elements in ``y`` are assigned to the corresponding element(s) in ``x`` is unspecified and thus implementation-defined.
+
+- If ``T`` contains at least one non-zero-dimensional integer array, all elements of ``T`` must be broadcast against each other to determine a common shape ``S2 = (s1, s2, ..., sN)`` according to standard broadcasting rules (see :ref:`broadcasting`). If one or more elements in ``T`` are not broadcast-compatible with the others, an exception must be raised.
+
+- After broadcasting elements of ``T`` to a common shape ``S2``, the resulting tuple ``U = (u1, u2, ..., uN)`` must only contain integer arrays having shape ``S2`` (i.e., ``u1 = broadcast_to(t1, S2)``, ``u2 = broadcast_to(t2, S2)``, et cetera).
+
+- Each element in ``U`` must specify a multi-dimensional index ``v_i = (u1[i], u2[i], ..., uN[i])``, where ``i`` ranges over ``S2``. The result of ``A[U]`` must be constructed by gathering elements from ``A`` at each coordinate tuple ``v_i``. For example, let ``A`` have shape ``(4,4)`` and ``U`` contain integer arrays equivalent to ``([0,1], [2,3])``, with ``u1 = [0,1]`` and ``u2 = [2,3]``. The resulting coordinate tuples must be ``(0,2)`` and ``(1,3)``, respectively, and the resulting array must have shape ``(2,)`` and contain elements ``A[(0,2)]`` and ``A[(1,3)]``.
+
+- The result of ``A[U]`` must be an array having the broadcasted shape ``S2``.
+
+Boolean Array Indexing
+----------------------
+
+.. admonition:: Data-dependent output shape
+ :class: admonition important
+
+ For common boolean array use cases (e.g., using a dynamically-sized boolean array mask to filter the values of another array), the shape of the output array is data-dependent; hence, array libraries which build computation graphs (e.g., JAX, Dask, etc.) may find boolean array indexing difficult to implement. Accordingly, such libraries may choose to omit boolean array indexing. See :ref:`data-dependent-output-shapes` section for more details.
+
+An array must support indexing where the **sole index** is an ``M``-dimensional boolean array ``B`` with shape ``S1 = (s1, ..., sM)`` according to the following rules. Let ``A`` be an ``N``-dimensional array with shape ``S2 = (s1, ..., sM, ..., sN)``.
+
+ .. note::
+ The prohibition against combining boolean array indices with other single-axis indexing expressions includes the use of ``None``. To expand dimensions of the returned array, use repeated invocation of :func:`~array_api.expand_dims`.
+
+- If ``N >= M``, then ``A[B]`` must replace the first ``M`` dimensions of ``A`` with a single dimension having a size equal to the number of ``True`` elements in ``B``. The values in the resulting array must be in row-major (C-style order); this is equivalent to ``A[nonzero(B)]``.
+
+ .. note::
+ For example, if ``N == M == 2``, indexing ``A`` via a boolean array ``B`` will return a one-dimensional array whose size is equal to the number of ``True`` elements in ``B``.
+
+- If ``N < M``, then an ``IndexError`` exception must be raised.
+
+- The size of each dimension in ``B`` must equal the size of the corresponding dimension in ``A`` or be ``0``, beginning with the first dimension in ``A``. If a dimension size does not equal the size of the corresponding dimension in ``A`` and is not ``0``, then an ``IndexError`` exception must be raised.
+
+- The elements of a boolean index array must be iterated in row-major, C-style order, with the exception of zero-dimensional boolean arrays.
+
+- A zero-dimensional boolean index array (equivalent to ``True`` or ``False``) must follow the same axis replacement rules stated above. Namely, a zero-dimensional boolean index array removes zero dimensions and adds a single dimension of length ``1`` if the index array's value is ``True`` and of length ``0`` if the index array's value is ``False``. Accordingly, for a zero-dimensional boolean index array ``B``, the result of ``A[B]`` has shape ``S = (1, s1, ..., sN)`` if the index array's value is ``True`` and has shape ``S = (0, s1, ..., sN)`` if the index array's value is ``False``.
+
+Return Values
+-------------
+
+The result of an indexing operation (e.g., multi-axis indexing, boolean array indexing, etc) must be an array of the same data type as the indexed array.
+
+.. note::
+ The specified return value behavior includes indexing operations which return a single value (e.g., accessing a single element within a one-dimensional array).
diff --git a/spec/2024.12/API_specification/indexing_functions.rst b/spec/2024.12/API_specification/indexing_functions.rst
new file mode 100644
index 000000000..c13e55ecf
--- /dev/null
+++ b/spec/2024.12/API_specification/indexing_functions.rst
@@ -0,0 +1,24 @@
+.. _indexing-functions:
+
+Indexing Functions
+===================
+
+ Array API specification for functions for indexing arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ take
+ take_along_axis
diff --git a/spec/2024.12/API_specification/inspection.rst b/spec/2024.12/API_specification/inspection.rst
new file mode 100644
index 000000000..89d9c602a
--- /dev/null
+++ b/spec/2024.12/API_specification/inspection.rst
@@ -0,0 +1,42 @@
+.. _inspection:
+
+Inspection
+==========
+
+ Array API specification for namespace inspection utilities.
+
+A conforming implementation of the array API standard must provide and support the following functions and associated inspection APIs.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api.info
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ __array_namespace_info__
+
+
+Inspection APIs
+---------------
+
+In the namespace (or class) returned by ``__array_namespace_info__``, a conforming implementation of the array API standard must provide and support the following functions (or methods) for programmatically querying data type and device support, capabilities, and other specification-defined implementation-specific behavior, as documented in the functions described below.
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ capabilities
+ default_device
+ default_dtypes
+ devices
+ dtypes
diff --git a/spec/2024.12/API_specification/linear_algebra_functions.rst b/spec/2024.12/API_specification/linear_algebra_functions.rst
new file mode 100644
index 000000000..04d36f50a
--- /dev/null
+++ b/spec/2024.12/API_specification/linear_algebra_functions.rst
@@ -0,0 +1,23 @@
+Linear Algebra Functions
+========================
+
+ Array API specification for linear algebra functions.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+.. currentmodule:: array_api
+
+Objects in API
+--------------
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ matmul
+ matrix_transpose
+ tensordot
+ vecdot
diff --git a/spec/2024.12/API_specification/manipulation_functions.rst b/spec/2024.12/API_specification/manipulation_functions.rst
new file mode 100644
index 000000000..395c1c3e2
--- /dev/null
+++ b/spec/2024.12/API_specification/manipulation_functions.rst
@@ -0,0 +1,34 @@
+Manipulation Functions
+======================
+
+ Array API specification for manipulating arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ broadcast_arrays
+ broadcast_to
+ concat
+ expand_dims
+ flip
+ moveaxis
+ permute_dims
+ repeat
+ reshape
+ roll
+ squeeze
+ stack
+ tile
+ unstack
diff --git a/spec/2024.12/API_specification/searching_functions.rst b/spec/2024.12/API_specification/searching_functions.rst
new file mode 100644
index 000000000..1a584f158
--- /dev/null
+++ b/spec/2024.12/API_specification/searching_functions.rst
@@ -0,0 +1,28 @@
+.. _searching-functions:
+
+Searching Functions
+===================
+
+ Array API specification for functions for searching arrays.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ argmax
+ argmin
+ count_nonzero
+ nonzero
+ searchsorted
+ where
diff --git a/spec/2024.12/API_specification/set_functions.rst b/spec/2024.12/API_specification/set_functions.rst
new file mode 100644
index 000000000..addf31e1f
--- /dev/null
+++ b/spec/2024.12/API_specification/set_functions.rst
@@ -0,0 +1,24 @@
+Set Functions
+=============
+
+ Array API specification for creating and operating on sets.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ unique_all
+ unique_counts
+ unique_inverse
+ unique_values
diff --git a/spec/2024.12/API_specification/sorting_functions.rst b/spec/2024.12/API_specification/sorting_functions.rst
new file mode 100644
index 000000000..ad3af8857
--- /dev/null
+++ b/spec/2024.12/API_specification/sorting_functions.rst
@@ -0,0 +1,31 @@
+Sorting Functions
+=================
+
+ Array API specification for sorting functions.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+.. note::
+
+ For floating-point input arrays, the sort order of NaNs and signed zeros is unspecified and thus implementation-dependent.
+
+ Implementations may choose to sort signed zeros (``-0 < +0``) or may choose to rely solely on value equality (``==``).
+
+ Implementations may choose to sort NaNs (e.g., to the end or to the beginning of a returned array) or leave them in-place. Should an implementation sort NaNs, the sorting convention should be clearly documented in the conforming implementation's documentation.
+
+ While defining a sort order for IEEE 754 floating-point numbers is recommended in order to facilitate reproducible and consistent sort results, doing so is not currently required by this specification.
+
+.. currentmodule:: array_api
+
+Objects in API
+--------------
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ argsort
+ sort
diff --git a/spec/2024.12/API_specification/statistical_functions.rst b/spec/2024.12/API_specification/statistical_functions.rst
new file mode 100644
index 000000000..eb5e1a5d6
--- /dev/null
+++ b/spec/2024.12/API_specification/statistical_functions.rst
@@ -0,0 +1,29 @@
+Statistical Functions
+=====================
+
+ Array API specification for statistical functions.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ cumulative_prod
+ cumulative_sum
+ max
+ mean
+ min
+ prod
+ std
+ sum
+ var
diff --git a/spec/2024.12/API_specification/type_promotion.rst b/spec/2024.12/API_specification/type_promotion.rst
new file mode 100644
index 000000000..7a82c763b
--- /dev/null
+++ b/spec/2024.12/API_specification/type_promotion.rst
@@ -0,0 +1,163 @@
+.. _type-promotion:
+
+Type Promotion Rules
+====================
+
+ Array API specification for type promotion rules.
+
+Type promotion rules can be understood at a high level from the following diagram:
+
+.. image:: ../../_static/images/dtype_promotion_lattice.png
+ :target: Type promotion diagram
+
+*Type promotion diagram. Promotion between any two types is given by their join on this lattice. Only the types of participating arrays matter, not their values. Dashed lines indicate that behavior for Python scalars is undefined on overflow. Boolean, integer and floating-point dtypes are not connected, indicating mixed-kind promotion is undefined.*
+
+Rules
+-----
+
+A conforming implementation of the array API standard must implement the following type promotion rules governing the common result type for two **array** operands during an arithmetic operation.
+
+A conforming implementation of the array API standard may support additional type promotion rules beyond those described in this specification.
+
+.. note::
+ Type codes are used here to keep tables readable; they are not part of the standard. In code, use the data type objects specified in :ref:`data-types` (e.g., ``int16`` rather than ``'i2'``).
+
+..
+ Note: please keep table columns aligned
+
+The following type promotion tables specify the casting behavior for operations involving two array operands. When more than two array operands participate, application of the promotion tables is associative (i.e., the result does not depend on operand order).
+
+Signed integer type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+----+----+
+| | i1 | i2 | i4 | i8 |
++========+====+====+====+====+
+| **i1** | i1 | i2 | i4 | i8 |
++--------+----+----+----+----+
+| **i2** | i2 | i2 | i4 | i8 |
++--------+----+----+----+----+
+| **i4** | i4 | i4 | i4 | i8 |
++--------+----+----+----+----+
+| **i8** | i8 | i8 | i8 | i8 |
++--------+----+----+----+----+
+
+where
+
+- **i1**: 8-bit signed integer (i.e., ``int8``)
+- **i2**: 16-bit signed integer (i.e., ``int16``)
+- **i4**: 32-bit signed integer (i.e., ``int32``)
+- **i8**: 64-bit signed integer (i.e., ``int64``)
+
+Unsigned integer type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+----+----+
+| | u1 | u2 | u4 | u8 |
++========+====+====+====+====+
+| **u1** | u1 | u2 | u4 | u8 |
++--------+----+----+----+----+
+| **u2** | u2 | u2 | u4 | u8 |
++--------+----+----+----+----+
+| **u4** | u4 | u4 | u4 | u8 |
++--------+----+----+----+----+
+| **u8** | u8 | u8 | u8 | u8 |
++--------+----+----+----+----+
+
+where
+
+- **u1**: 8-bit unsigned integer (i.e., ``uint8``)
+- **u2**: 16-bit unsigned integer (i.e., ``uint16``)
+- **u4**: 32-bit unsigned integer (i.e., ``uint32``)
+- **u8**: 64-bit unsigned integer (i.e., ``uint64``)
+
+Mixed unsigned and signed integer type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++--------+----+----+----+
+| | u1 | u2 | u4 |
++========+====+====+====+
+| **i1** | i2 | i4 | i8 |
++--------+----+----+----+
+| **i2** | i2 | i4 | i8 |
++--------+----+----+----+
+| **i4** | i4 | i4 | i8 |
++--------+----+----+----+
+| **i8** | i8 | i8 | i8 |
++--------+----+----+----+
+
+Floating-point type promotion table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++---------+-----+-----+-----+-----+
+| | f4 | f8 | c8 | c16 |
++=========+=====+=====+=====+=====+
+| **f4** | f4 | f8 | c8 | c16 |
++---------+-----+-----+-----+-----+
+| **f8** | f8 | f8 | c16 | c16 |
++---------+-----+-----+-----+-----+
+| **c8** | c8 | c16 | c8 | c16 |
++---------+-----+-----+-----+-----+
+| **c16** | c16 | c16 | c16 | c16 |
++---------+-----+-----+-----+-----+
+
+where
+
+- **f4**: single-precision (32-bit) floating-point number (i.e., ``float32``)
+- **f8**: double-precision (64-bit) floating-point number (i.e., ``float64``)
+- **c8**: single-precision complex floating-point number (i.e., ``complex64``)
+ composed of two single-precision (32-bit) floating-point numbers
+- **c16**: double-precision complex floating-point number (i.e., ``complex128``)
+ composed of two double-precision (64-bit) floating-point numbers
+
+Notes
+~~~~~
+
+- Type promotion rules must apply when determining the common result type for two **array** operands during an arithmetic operation, regardless of array dimension. Accordingly, zero-dimensional arrays must be subject to the same type promotion rules as dimensional arrays.
+- Type promotion of non-numerical data types to numerical data types is unspecified (e.g., ``bool`` to ``intxx`` or ``floatxx``).
+
+.. note::
+ Mixed integer and floating-point type promotion rules are not specified because behavior varies between implementations.
+
+
+.. _mixing-scalars-and-arrays:
+
+Mixing arrays with Python scalars
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using Python scalars (i.e., instances of ``bool``, ``int``, ``float``, ``complex``) together with arrays must be supported for:
+
+- ``array scalar``
+- ``scalar array``
+
+where ```` is a built-in operator (including in-place operators, but excluding the matmul ``@`` operator; see :ref:`operators` for operators supported by the array object) and ``scalar`` has a type and value compatible with the array data type:
+
+- a Python ``bool`` for a ``bool`` array data type.
+- a Python ``int`` within the bounds of the given data type for integer array :ref:`data-types`.
+- a Python ``int`` or ``float`` for real-valued floating-point array data types.
+- a Python ``int``, ``float``, or ``complex`` for complex floating-point array data types.
+
+Provided the above requirements are met, the expected behavior is equivalent to:
+
+1. Convert the scalar to a zero-dimensional array with the same data type as that of the array used in the expression.
+2. Execute the operation for ``array 0-D array`` (or ``0-D array array`` if ``scalar`` was the left-hand argument).
+
+Additionally, using Python ``complex`` scalars together with arrays must be supported for:
+
+- ``array scalar``
+- ``scalar array``
+
+where ```` is a built-in operator (including in-place operators, but excluding the matmul ``@`` operator; see :ref:`operators` for operators supported by the array object) and ``scalar`` has a type and value compatible with a promoted array data type:
+
+- a Python ``complex`` for real-valued floating-point array data types.
+
+Provided the above requirements are met, the expected behavior is equivalent to:
+
+1. Convert the scalar to a zero-dimensional array with a complex floating-point array data type having the same precision as that of the array operand used in the expression (e.g., if an array has a ``float32`` data type, the scalar must be converted to a zero-dimensional array having a ``complex64`` data type; if an array has a ``float64`` data type, the scalar must be converted to a zero-dimensional array have a ``complex128`` data type).
+2. Execute the operation for ``array 0-D array`` (or ``0-D array array`` if ``scalar`` was the left-hand argument).
+
+Behavior is not specified for integers outside of the bounds of a given integer data type. Integers outside of bounds may result in overflow or an error.
+
+Behavior is not specified when mixing a Python ``float`` and an array with an integer data type; this may give ``float32``, ``float64``, or raise an exception. Behavior is implementation-specific.
+
+Behavior is not specified when mixing a Python ``complex`` and an array with an integer data type; this may give ``complex64``, ``complex128``, or raise an exception. Behavior is implementation-specific.
diff --git a/spec/2024.12/API_specification/utility_functions.rst b/spec/2024.12/API_specification/utility_functions.rst
new file mode 100644
index 000000000..a09c99f79
--- /dev/null
+++ b/spec/2024.12/API_specification/utility_functions.rst
@@ -0,0 +1,23 @@
+Utility Functions
+=================
+
+ Array API specification for utility functions.
+
+A conforming implementation of the array API standard must provide and support the following functions.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ all
+ any
+ diff
diff --git a/spec/2024.12/API_specification/version.rst b/spec/2024.12/API_specification/version.rst
new file mode 100644
index 000000000..346395d9a
--- /dev/null
+++ b/spec/2024.12/API_specification/version.rst
@@ -0,0 +1,22 @@
+Version
+=======
+
+ Array API specification for versioning.
+
+A conforming implementation of the array API standard must provide a `__array_api_version__` attribute - see :ref:`api-versioning` for details.
+
+
+Objects in API
+--------------
+
+.. currentmodule:: array_api
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: attribute.rst
+ :nosignatures:
+
+ __array_api_version__
diff --git a/spec/2024.12/assumptions.md b/spec/2024.12/assumptions.md
new file mode 100644
index 000000000..b11482c5a
--- /dev/null
+++ b/spec/2024.12/assumptions.md
@@ -0,0 +1,77 @@
+(Assumptions)=
+
+# Assumptions
+
+## Hardware and software environments
+
+No assumptions on a specific hardware environment are made. It must be possible
+to create an array library adhering to this standard that runs (efficiently) on
+a variety of different hardware: CPUs with different architectures, GPUs,
+distributed systems and TPUs and other emerging accelerators.
+
+The same applies to software environments: it must be possible to create an
+array library adhering to this standard that runs efficiently independent of
+what compilers, build-time or run-time execution environment, or distribution
+and install method is employed. Parallel execution, JIT compilation, and
+delayed (lazy) evaluation must all be possible.
+
+The variety of hardware and software environments puts _constraints_ on choices
+made in the API standard. For example, JIT compilers may require output dtypes
+of functions to be predictable from input dtypes only rather than input values.
+
+
+(assumptions-dependencies)=
+
+## Dependencies
+
+The only dependency that's assumed in this standard is that on Python itself.
+Python >= 3.8 is assumed, motivated by the use of positional-only parameters
+(see [function and method signatures](API_specification/function_and_method_signatures.rst)).
+
+Importantly, array libraries are not assumed to be aware of each other, or of
+a common array-specific layer. The [use cases](use_cases.md) do not require
+such a dependency, and building and evolving an array library is easier without
+such a coupling. Facilitation support of multiple array types in downstream
+libraries is an important use case however, the assumed dependency structure
+for that is:
+
+
+
+Array libraries may know how to interoperate with each other, for example by
+constructing their own array type from that of another library or by shared
+memory use of an array (see [Data interchange mechanisms](design_topics/data_interchange.rst)).
+This can be done without a dependency though - only adherence to a protocol is
+enough.
+
+Array-consuming libraries will have to depend on one or more array libraries.
+That could be a "soft dependency" though, meaning retrieving an array library
+namespace from array instances that are passed in, but not explicitly doing
+`import arraylib_name`.
+
+
+## Backwards compatibility
+
+The assumption made during creation of this standard is that libraries are
+constrained by backwards compatibility guarantees to their users, and are
+likely unwilling to make significant backwards-incompatible changes for the
+purpose of conforming to this standard. Therefore it is assumed that the
+standard will be made available in a new namespace within each library, or the
+library will provide a way to retrieve a module or module-like object that
+adheres to this standard. See {ref}`how-to-adopt-this-api` for more details.
+
+
+## Production code & interactive use
+
+It is assumed that the primary use case is writing production code, for example
+in array-consuming libraries. As a consequence, making it easy to ensure that
+code is written as intended and has unambiguous semantics is preferred - and
+clear exceptions must be raised otherwise.
+
+It is also assumed that this does not significantly detract from the
+interactive user experience. However, in case existing libraries differ in
+behavior, the more strict version of that behavior is typically preferred. A
+good example is array inputs to functions - while NumPy accepts lists, tuples,
+generators, and anything else that could be turned into an array, most other
+libraries only accept their own array types. This standard follows the latter choice.
+It is likely always possible to put a thin "interactive use convenience layer"
+on top of a more strict behavior.
diff --git a/spec/2024.12/benchmark_suite.md b/spec/2024.12/benchmark_suite.md
new file mode 100644
index 000000000..41066c6a4
--- /dev/null
+++ b/spec/2024.12/benchmark_suite.md
@@ -0,0 +1,3 @@
+# Benchmark suite
+
+Adding a benchmark suite is planned in the future.
diff --git a/spec/2024.12/changelog.rst b/spec/2024.12/changelog.rst
new file mode 100644
index 000000000..701a3dbcd
--- /dev/null
+++ b/spec/2024.12/changelog.rst
@@ -0,0 +1,5 @@
+Changelog per API standard version
+==================================
+
+.. include:: ../../CHANGELOG.md
+ :parser: myst_parser.sphinx_
diff --git a/spec/2024.12/conf.py b/spec/2024.12/conf.py
new file mode 100644
index 000000000..dfe216bb5
--- /dev/null
+++ b/spec/2024.12/conf.py
@@ -0,0 +1,13 @@
+import sys
+from pathlib import Path
+
+sys.path.insert(0, str(Path(__file__).parents[2] / "src"))
+
+from array_api_stubs import _2024_12 as stubs_mod
+from _array_api_conf import *
+
+release = "2024.12"
+
+nav_title = html_theme_options.get("nav_title") + " {}".format(release)
+html_theme_options.update({"nav_title": nav_title})
+sys.modules["array_api"] = stubs_mod
diff --git a/spec/2024.12/design_topics/C_API.rst b/spec/2024.12/design_topics/C_API.rst
new file mode 100644
index 000000000..6a44596b0
--- /dev/null
+++ b/spec/2024.12/design_topics/C_API.rst
@@ -0,0 +1,94 @@
+.. _C-API:
+
+C API
+=====
+
+Use of a C API is out of scope for this array API, as mentioned in :ref:`Scope`.
+There are a lot of libraries that do use such an API - in particular via Cython code
+or via direct usage of the NumPy C API. When the maintainers of such libraries
+want to use this array API standard to support multiple types of arrays, they
+need a way to deal with that issue. This section aims to provide some guidance.
+
+The assumption in the rest of this section is that performance matters for the library,
+and hence the goal is to make other array types work without converting to a
+``numpy.ndarray`` or another particular array type. If that's not the case (e.g. for a
+visualization package), then other array types can simply be handled by converting
+to the supported array type.
+
+.. note::
+ Often a zero-copy conversion to ``numpy.ndarray`` is possible, at least for CPU arrays.
+ If that's the case, this may be a good way to support other array types.
+ The main difficulty in that case will be getting the return array type right - however,
+ this standard does provide a Python-level API for array construction that should allow
+ doing this. A relevant question is if it's possible to know with
+ certainty that a conversion will be zero-copy. This may indeed be
+ possible, see :ref:`data-interchange`.
+
+
+Example situations for C/Cython usage
+-------------------------------------
+
+Situation 1: a Python package that is mostly pure Python, with a limited number of Cython extensions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+ Projects in this situation include Statsmodels, scikit-bio and QuTiP
+
+Main strategy: documentation. The functionality using Cython code will not support other array types (or only with conversion to/from ``numpy.ndarray``), which can be documented per function.
+
+
+Situation 2: a Python package that contains a lot of Cython code
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+ Projects in this situation include scikit-learn and scikit-image
+
+Main strategy: add support for other array types *per submodule*. This keeps it manageable to explain to the user which functionality does and doesn't have support.
+
+Longer term: specific support for particular array types (e.g. ``cupy.ndarray`` can be supported with Python-only code via ``cupy.ElementwiseKernel``).
+
+
+Situation 3: a Python package that uses the NumPy or Python C API directly
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+ Projects in this situation include SciPy and Astropy
+
+Strategy: similar to *situation 2*, but the number of submodules that can support all array types may be limited.
+
+
+Device support
+--------------
+
+Supporting non-CPU array types in code using the C API or Cython seems problematic,
+this almost inevitably will require custom device-specific code (e.g., CUDA, ROCm) or
+something like JIT compilation with Numba.
+
+
+Other longer-term approaches
+----------------------------
+
+Further Python API standardization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There may be cases where it makes sense to standardize additional sets of
+functions, because they're important enough that array libraries tend to
+reimplement them. An example of this may be *special functions*, as provided
+by ``scipy.special``. Bessel and gamma functions for example are commonly
+reimplemented by array libraries. This may avoid having to drop into a
+particular implementation that does use a C API (e.g., one can then rely on
+``arraylib.special.gamma`` rather than having to use ``scipy.special.gamma``).
+
+HPy
+~~~
+
+`HPy `_ is a new project that will provide a higher-level
+C API and ABI than CPython offers. A Cython backend targeting HPy will be provided as well.
+
+- Better PyPy support
+- Universal ABI - single binary for all supported Python versions
+- Cython backend generating HPy rather than CPython code
+
+HPy isn't quite ready for mainstream usage today, but once it is it may
+help make supporting multiple array libraries or adding non-CPU device
+support to Cython more feasible.
diff --git a/spec/2024.12/design_topics/accuracy.rst b/spec/2024.12/design_topics/accuracy.rst
new file mode 100644
index 000000000..9d82dbb1f
--- /dev/null
+++ b/spec/2024.12/design_topics/accuracy.rst
@@ -0,0 +1,93 @@
+.. _accuracy:
+
+Accuracy
+========
+
+ Array API specification for minimum accuracy requirements.
+
+Arithmetic Operations
+---------------------
+
+The results of element-wise arithmetic operations
+
+- ``+``
+- ``-``
+- ``*``
+- ``/``
+- ``%``
+
+including the corresponding element-wise array APIs defined in this standard
+
+- add
+- subtract
+- multiply
+- divide
+
+for real-valued floating-point operands must return a correctly rounded value according to IEEE 754-2019 and a supported rounding mode. By default, the rounding mode should be ``roundTiesToEven`` (i.e., round to nearest with ties rounded toward the nearest value with an even least significant bit).
+
+IEEE 754-2019 requires support for subnormal (a.k.a., denormal) numbers, which are useful for supporting gradual underflow. However, hardware support for subnormal numbers is not universal, and many platforms (e.g., accelerators) and compilers support toggling denormals-are-zero (DAZ) and/or flush-to-zero (FTZ) behavior to increase performance and to guard against timing attacks. Accordingly, conforming implementations may vary in their support for subnormal numbers.
+
+Mathematical Functions
+----------------------
+
+The results of the following functions
+
+- reciprocal
+- sqrt
+
+for real-valued floating-point operands must return a correctly rounded value according to IEEE 754-2019 and a supported rounding mode.
+
+This specification does **not** precisely define the behavior of the following functions
+
+- acos
+- acosh
+- asin
+- asinh
+- atan
+- atan2
+- atanh
+- cos
+- cosh
+- exp
+- expm1
+- hypot
+- log
+- log1p
+- log2
+- log10
+- logaddexp
+- pow
+- sin
+- sinh
+- tan
+- tanh
+
+except to require specific results for certain argument values that represent boundary cases of interest.
+
+.. note::
+ To help readers identify functions lacking precisely defined accuracy behavior, this specification uses the phrase "implementation-dependent approximation" in function descriptions.
+
+For other argument values, these functions should compute approximations to the results of respective mathematical functions; however, this specification recognizes that array libraries may be constrained by underlying hardware and/or seek to optimize performance over absolute accuracy and, thus, allows some latitude in the choice of approximation algorithms.
+
+Although the specification leaves the choice of algorithms to the implementation, this specification recommends (but does not specify) that implementations use the approximation algorithms for IEEE 754-2019 arithmetic contained in `FDLIBM `_, the freely distributable mathematical library from Sun Microsystems, or some other comparable IEEE 754-2019 compliant mathematical library.
+
+.. note::
+ With exception of a few mathematical functions, returning results which are indistinguishable from correctly rounded infinitely precise results is difficult, if not impossible, to achieve due to the algorithms involved, the limits of finite-precision, and error propagation. However, this specification recognizes that numerical accuracy alignment among array libraries is desirable in order to ensure portability and reproducibility. Accordingly, for each mathematical function, the specification test suite includes test values which span a function's domain and reports the average and maximum deviation from either a designated standard implementation (e.g., an arbitrary precision arithmetic implementation) or an average computed across a subset of known array library implementations. Such reporting aids users who need to know how accuracy varies among libraries and developers who need to check the validity of their implementations.
+
+Statistical Functions
+---------------------
+
+This specification does not specify accuracy requirements for statistical functions; however, this specification does expect that a conforming implementation of the array API standard will make a best-effort attempt to ensure that its implementations are theoretically sound and numerically robust.
+
+.. note::
+ In order for an array library to pass the specification test suite, an array library's statistical function implementations must satisfy certain bare-minimum accuracy requirements (e.g., accurate summation of a small set of positive integers). Unfortunately, imposing more rigorous accuracy requirements is not possible without severely curtailing possible implementation algorithms and unduly increasing implementation complexity.
+
+Linear Algebra
+--------------
+
+This specification does not specify accuracy requirements for linear algebra functions; however, this specification does expect that a conforming implementation of the array API standard will make a best-effort attempt to ensure that its implementations are theoretically sound and numerically robust.
+
+Operations Involving Complex Numbers
+------------------------------------
+
+This specification does not specify accuracy requirements for arithmetic or functional operations involving complex-valued floating-point operands; however, this specification does expect that a conforming implementation of the array API standard will make a best-effort attempt to ensure that its implementations are theoretically sound and numerically robust.
diff --git a/spec/2024.12/design_topics/complex_numbers.rst b/spec/2024.12/design_topics/complex_numbers.rst
new file mode 100644
index 000000000..0eca79e91
--- /dev/null
+++ b/spec/2024.12/design_topics/complex_numbers.rst
@@ -0,0 +1,61 @@
+.. _complex-numbers:
+
+Complex Numbers
+===============
+
+The Complex Plane
+-----------------
+
+Mathematically, equality comparison between complex numbers depends on the choice of topology. For example, the complex plane has a continuum of infinities; however, when the complex plane is projected onto the surface of a sphere (a stereographic projection commonly referred to as the *Riemann sphere*), infinities coalesce into a single *point at infinity*, thus modeling the extended complex plane. For the former, the value :math:`\infty + 3j` is distinct from (i.e., does not equal) :math:`\infty + 4j`, while, for the latter, :math:`\infty + 3j` does equal :math:`\infty + 4j`.
+
+Modeling complex numbers as a Riemann sphere conveys certain mathematical niceties (e.g., well-behaved division by zero and preservation of the identity :math:`\frac{1}{\frac{1}{z}} = z`); however, translating the model to IEEE 754 floating-point operations can lead to some unexpected results. For example, according to IEEE 754, :math:`+\infty` and :math:`-\infty` are distinct values; hence, for equality comparison, if :math:`x = +\infty` and :math:`y = -\infty`, then :math:`x \neq y`. In contrast, if we convert :math:`x` and :math:`y` to their complex number equivalents :math:`x = +\infty + 0j` and :math:`y = -\infty + 0j` and then interpret within the context of the extended complex plane, we arrive at the opposite result; namely, :math:`x = y`.
+
+In short, given the constraints of floating-point arithmetic and the subtleties of signed zeros, infinities, NaNs, and their interaction, crafting a specification which always yields intuitive results and satisfies all use cases involving complex numbers is not possible. Instead, this specification attempts to follow precedent (e.g., C99, Python, Julia, NumPy, and elsewhere), while also minimizing surprise. The result is an imperfect balance in which certain APIs may appear to embrace the one-infinity model found in C/C++ for algebraic operations involving complex numbers (e.g., considering :math:`\infty + \operatorname{NaN}\ j` to be infinite, irrespective of the imaginary component's value, including NaN), while other APIs may rely on the complex plane with its multiplicity of infinities (e.g., in transcendental functions). Accordingly, consumers of this specification should expect that certain results involving complex numbers for one operation may not be wholly consistent with results involving complex numbers for another operation.
+
+
+.. _branch-cuts:
+
+Branch Cuts
+-----------
+
+In the mathematical field of complex analysis, a **branch cut** is a curve in the complex plane across which an analytic multi-valued function is discontinuous. Branch cuts are often taken as lines or line segments, and the choice of any particular branch cut is a matter of convention.
+
+For example, consider the function :math:`z^2` which maps a complex number :math:`z` to a well-defined number :math:`z^2`. The function's inverse function :math:`\sqrt{z}` does not, however, map to a single value. For example, for :math:`z = 1`, :math:`\sqrt{1} = \pm 1`. While one can choose a unique principal value for this and similar functions (e.g., in this case, the principal square root is :math:`+1`), choices cannot be made continuous over the whole complex plane, as lines of discontinuity must occur. To handle discontinuities, one commonly adopts branch cuts, which are not, in general, unique. Instead, one chooses a branch cut as a matter of convention in order to give simple analytic properties.
+
+Branch cuts do not arise for single-valued trigonometric, hyperbolic, integer power, or exponential functions; however, branch cuts do arise for their multi-valued inverses.
+
+In contrast to real-valued floating-point numbers which have well-defined behavior as specified in IEEE 754, complex-valued floating-point numbers have no equivalent specification. Accordingly, this specification chooses to follow C99 conventions for special cases and branch cuts for those functions supporting complex numbers. For those functions which do not have C99 equivalents (e.g., linear algebra APIs), the specification relies on dominant conventions among existing array libraries.
+
+.. warning::
+ All branch cuts documented in this specification are considered **provisional**. While conforming implementations of the array API standard should adopt the branch cuts described in this standard, consumers of array API standard implementations should **not** assume that branch cuts are consistent between implementations.
+
+ Provided no issues arise due to the choice of branch cut, the provisional status is likely to be removed in a future revision of this standard.
+
+
+.. _complex-number-ordering:
+
+Complex Number Ordering
+-----------------------
+
+Given a set :math:`\{a_1, \ldots, a_n\}`, an order relation must satisfy the following properties:
+
+1. **Reflexive**: for any :math:`a` in the set, :math:`a \leq a`.
+2. **Transitive**: for any :math:`a`, :math:`b`, and :math:`c` in the set, if :math:`a \leq b` and :math:`b \leq c`, then :math:`a \leq c`.
+3. **Antisymmetric**: for any :math:`a` and :math:`b` in the set, if :math:`a \leq b` and :math:`b \leq a`, then :math:`a = b`.
+4. **Total Order**: in addition to the *partial order* established by 1-3, for any :math:`a` and :math:`b` in the set, either :math:`a \leq b` or :math:`b \leq a` (or both).
+5. **Compatible with Addition**: for all :math:`a`, :math:`b`, and :math:`c` in the set, if :math:`a \leq b`, then :math:`a + c \leq b + c`.
+6. **Compatible with Multiplication**: for all :math:`a`, :math:`b`, and :math:`c` in the set, if :math:`a \leq b` and :math:`0 \leq c`, then :math:`ac \leq bc`.
+
+Defining an order relation for complex numbers which satisfies all six properties defined above is not possible. Accordingly, this specification does not require that a conforming implementation of the array API standard adopt any specific complex number order relation.
+
+In order to satisfy backward compatibility guarantees, conforming implementations of the array API standard may choose to define an ordering for complex numbers (e.g., lexicographic); however, consumers of the array API standard should **not** assume that complex number ordering is consistent between implementations or even supported.
+
+If a conforming implementation chooses to define an ordering for complex numbers, the ordering must be clearly documented.
+
+
+Valued-based Promotion
+----------------------
+
+According to the type promotion rules described in this specification (see :ref:`type-promotion`), only the data types of the input arrays participating in an operation matter, not their values. The same principle applies to situations in which one or more results of operations on real-valued arrays are mathematically defined in the complex domain, but not in their real domain.
+
+By convention, the principal square root of :math:`-1` is :math:`j`, where :math:`j` is the imaginary unit. Despite this convention, for those operations supporting type promotion, conforming implementations must only consider input array data types when determining the data type of the output array. For example, if a real-valued input array is provided to :func:`~array_api.sqrt`, the output array must also be real-valued, even if the input array contains negative values. Accordingly, if a consumer of a conforming implementation of this specification desires for an operation's results to include the complex domain, the consumer should first cast the input array(s) to an appropriate complex floating-point data type before performing the operation.
diff --git a/spec/2024.12/design_topics/copies_views_and_mutation.rst b/spec/2024.12/design_topics/copies_views_and_mutation.rst
new file mode 100644
index 000000000..f302d8c8e
--- /dev/null
+++ b/spec/2024.12/design_topics/copies_views_and_mutation.rst
@@ -0,0 +1,102 @@
+.. _copyview-mutability:
+
+Copy-view behavior and mutability
+==================================
+
+.. admonition:: Mutating views
+ :class: important
+
+ Array API consumers are *strongly* advised to avoid *any* mutating operations when an array object may either be a "view" (i.e., an array whose data refers to memory that belongs to another array) or own memory of which one or more other array objects may be views. This admonition may become more strict in the future (e.g., this specification may require that view mutation be prohibited and trigger an exception). Accordingly, only perform mutation operations (e.g., in-place assignment) when absolutely confident that array data belongs to one, and only one, array object.
+
+Strided array implementations (e.g. NumPy, PyTorch, CuPy, MXNet) typically
+have the concept of a "view", meaning an array containing data in memory that
+belongs to another array (i.e., a different "view" on the original data).
+Views are useful for performance reasonsβnot copying data to a new location
+saves memory and is faster than copyingβbut can also affect the semantics
+of code. This happens when views are combined with *mutating* operations.
+The following example is illustrative:
+
+.. code-block:: python
+
+ x = ones(1)
+ y = x[:] # `y` *may* be a view on the data of `x`
+ y -= 1 # if `y` is a view, this modifies `x`
+
+Code similar to the above example will not be portable between array
+libraries. For example, for NumPy, PyTorch, and CuPy, ``x`` will contain the value ``0``,
+while, for TensorFlow, JAX, and Dask, ``x`` will contain the value ``1``. In
+this case, the combination of views and mutability is fundamentally problematic
+if the goal is to be able to write code with unambiguous semantics.
+
+Views are necessary for getting good performance out of the current strided
+array libraries. It is not always clear, however, when a library will return a
+view and when it will return a copy. This standard does not attempt to
+specify thisβlibraries may do either.
+
+There are several types of operations that may perform in-place mutation of
+array data. These include:
+
+1. In-place operators (e.g. ``*=``)
+2. Item assignment (e.g. ``x[0] = 1``)
+3. Slice assignment (e.g., ``x[:2, :] = 3``)
+4. The `out=` keyword present in some strided array libraries (e.g. ``sin(x, out=y)``)
+
+Libraries such as TensorFlow and JAX tend to support in-place operators by providing
+alternative syntax for item and slice assignment (e.g. an ``update_index``
+function or ``x.at[idx].set(y)``) and have no need for ``out=``.
+
+A potential solution could be to make views read-only or implement copy-on-write
+semantics. Both are hard to implement and would present significant backward
+compatibility issues for current strided array libraries. Read-only
+views would also not be a full solution due to the fact that mutating the original
+(base) array will also result in ambiguous semantics. Accordingly, this standard
+does not attempt to pursue this solution.
+
+Both in-place operators and item/slice assignment can be mapped onto
+equivalent functional expressions (e.g. ``x[idx] = val`` maps to
+``x.at[idx].set(val)``), and, given that both in-place operators and item/slice
+assignment are very widely used in both library and end user code, this
+standard chooses to include them.
+
+The situation with ``out=`` is slightly differentβit's less heavily used, and
+easier to avoid. It's also not an optimal API because it mixes an
+"efficiency of implementation" consideration ("you're allowed to do this
+in-place") with the semantics of a function ("the output _must_ be placed into
+this array"). There are libraries that do some form of tracing or abstract
+interpretation over a vocabulary that does not support mutation (to make
+analysis easier). In those cases implementing ``out=`` with correct handling of
+views may even be impossible to do.
+
+There are alternatives. For example, the concept of donated arguments in JAX or
+working buffers in LAPACK which allow the user to express "you _may_ overwrite
+this data; do whatever is fastest". Given that those alternatives aren't widely
+used in array libraries today, this standard chooses to (a) leave out ``out=``,
+and (b) not specify another method of reusing arrays that are no longer needed
+as buffers.
+
+This leaves the problem of the initial exampleβdespite the best efforts of this
+standard, it remains possible to write code that will not work the same for all
+array libraries. This is something that the users are advised to best keep in
+mind and to reason carefully about the potential ambiguity of implemented code.
+
+
+.. _copy-keyword-argument:
+
+Copy keyword argument behavior
+------------------------------
+
+Several APIs in this standard support a ``copy`` keyword argument (e.g.,
+``asarray``, ``astype``, ``reshape``, and ``__dlpack__``). Typically, when a
+user sets ``copy=True``, the user does so in order to ensure that they are free
+to mutate the returned array without side-effectsβnamely, without mutating other
+views on the original (base) array. Accordingly, when ``copy=True``, unless an
+array library can guarantee that an array can be mutated without side-effects,
+conforming libraries are recommended to always perform a physical copy of the
+underlying array data.
+
+.. note::
+ Typically, in order to provide such a guarantee, libraries must perform
+ whole-program analysis.
+
+Conversely, consumers of this standard should expect that, if they set
+``copy=True``, they are free to use in-place operations on a returned array.
diff --git a/spec/2024.12/design_topics/data_dependent_output_shapes.rst b/spec/2024.12/design_topics/data_dependent_output_shapes.rst
new file mode 100644
index 000000000..43daa9765
--- /dev/null
+++ b/spec/2024.12/design_topics/data_dependent_output_shapes.rst
@@ -0,0 +1,15 @@
+.. _data-dependent-output-shapes:
+
+Data-dependent output shapes
+============================
+
+Array libraries which build computation graphs commonly employ static analysis that relies upon known shapes. For example, JAX requires known array sizes when compiling code, in order to perform static memory allocation. Functions and operations which are value-dependent present difficulties for such libraries, as array sizes cannot be inferred ahead of time without also knowing the contents of the respective arrays.
+
+While value-dependent functions and operations are not impossible to implement for array libraries which build computation graphs, this specification does not want to impose an undue burden on such libraries and permits omission of value-dependent operations. All other array libraries are expected, however, to implement the value-dependent operations included in this specification in order to be array specification compliant.
+
+Value-dependent operations are demarcated in this specification using an admonition similar to the following:
+
+.. admonition:: Data-dependent output shape
+ :class: important
+
+ The shape of the output array for this function/operation depends on the data values in the input array; hence, array libraries which build computation graphs (e.g., JAX, Dask, etc.) may find this function/operation difficult to implement without knowing array values. Accordingly, such libraries may choose to omit this function. See :ref:`data-dependent-output-shapes` section for more details.
diff --git a/spec/2024.12/design_topics/data_interchange.rst b/spec/2024.12/design_topics/data_interchange.rst
new file mode 100644
index 000000000..3b3040672
--- /dev/null
+++ b/spec/2024.12/design_topics/data_interchange.rst
@@ -0,0 +1,105 @@
+.. _data-interchange:
+
+Data interchange mechanisms
+===========================
+
+This section discusses the mechanism to convert one type of array into another.
+As discussed in the :ref:`assumptions-dependencies ` section,
+*functions* provided by an array library are not expected to operate on
+*array types* implemented by another library. Instead, the array can be
+converted to a "native" array type.
+
+The interchange mechanism must offer the following:
+
+1. Data access via a protocol that describes the memory layout of the array
+ in an implementation-independent manner.
+
+ *Rationale: any number of libraries must be able to exchange data, and no
+ particular package must be needed to do so.*
+
+2. Support for all dtypes in this API standard (see :ref:`data-types`).
+
+3. Device support. It must be possible to determine on what device the array
+ that is to be converted lives.
+
+ *Rationale: there are CPU-only, GPU-only, and multi-device array types;
+ it's best to support these with a single protocol (with separate
+ per-device protocols it's hard to figure out unambiguous rules for which
+ protocol gets used, and the situation will get more complex over time
+ as TPU's and other accelerators become more widely available).*
+
+4. Zero-copy semantics where possible, making a copy only if needed (e.g.
+ when data is not contiguous in memory).
+
+ *Rationale: performance.*
+
+5. A Python-side and a C-side interface, the latter with a stable C ABI.
+
+ *Rationale: all prominent existing array libraries are implemented in
+ C/C++, and are released independently from each other. Hence a stable C
+ ABI is required for packages to work well together.*
+
+DLPack: An in-memory tensor structure
+-------------------------------------
+
+The best candidate for this protocol is
+`DLPack `_, and hence that is what this
+standard has chosen as the primary/recommended protocol. Note that the
+``asarray`` function also supports the Python buffer protocol (CPU-only) to
+support libraries that already implement buffer protocol support.
+
+.. note::
+ The main alternatives to DLPack are device-specific methods:
+
+ - The `buffer protocol `_ on CPU
+ - ``__cuda_array_interface__`` for CUDA, specified in the Numba documentation
+ `here `_
+ (Python-side only at the moment)
+
+ An issue with device-specific protocols are: if two libraries both
+ support multiple device types, in which order should the protocols be
+ tried? A growth in the number of protocols to support each time a new
+ device gets supported by array libraries (e.g. TPUs, AMD GPUs, emerging
+ hardware accelerators) also seems undesirable.
+
+ In addition to the above argument, it is also clear from adoption
+ patterns that DLPack has the widest support. The buffer protocol, despite
+ being a lot older and standardized as part of Python itself via PEP 3118,
+ hardly has any support from array libraries. CPU interoperability is
+ mostly dealt with via the NumPy-specific ``__array__`` (which, when called,
+ means the object it is attached to must return a ``numpy.ndarray``
+ containing the data the object holds).
+
+ See the `RFC to adopt DLPack `_
+ for discussion that preceded the adoption of DLPack.
+
+DLPack's documentation can be found at: https://dmlc.github.io/dlpack/latest/.
+
+The `Python specification of DLPack `__
+page gives a high-level specification for data exchange in Python using DLPack.
+
+.. note::
+ DLPack is a standalone protocol/project and can therefore be used outside of
+ this standard. Python libraries that want to implement only DLPack support
+ are recommended to do so using the same syntax and semantics as outlined
+ below. They are not required to return an array object from ``from_dlpack``
+ which conforms to this standard.
+
+Non-supported use cases
+-----------------------
+
+Use of DLPack requires that the data can be represented by a strided, in-memory
+layout on a single device. This covers usage by a large range of, but not all,
+known and possible array libraries. Use cases that are not supported by DLPack
+include:
+
+- Distributed arrays, i.e., the data residing on multiple nodes or devices,
+- Sparse arrays, i.e., sparse representations where a data value (typically
+ zero) is implicit.
+
+There may be other reasons why it is not possible or desirable for an
+implementation to materialize the array as strided data in memory. In such
+cases, the implementation may raise a `BufferError` in the `__dlpack__` or
+`__dlpack_device__` method. In case an implementation is never able to export
+its array data via DLPack, it may omit `__dlpack__` and `__dlpack_device__`
+completely, and hence `from_dlpack` may raise an `AttributeError`.
diff --git a/spec/2024.12/design_topics/device_support.rst b/spec/2024.12/design_topics/device_support.rst
new file mode 100644
index 000000000..593b0b9fa
--- /dev/null
+++ b/spec/2024.12/design_topics/device_support.rst
@@ -0,0 +1,112 @@
+.. _device-support:
+
+Device support
+==============
+
+For libraries that support execution on more than a single hardware device - e.g. CPU and GPU, or multiple GPUs - it is important to be able to control on which device newly created arrays get placed and where execution happens. Attempting to be fully implicit doesn't always scale well to situations with multiple GPUs.
+
+Existing libraries employ one or more of these three methods to exert such control over data placement:
+
+1. A global default device, which may be fixed or user-switchable.
+2. A context manager to control device assignment within its scope.
+3. Local control for data allocation target device via explicit keywords, and a method to transfer arrays to another device.
+
+Libraries differ in how execution is controlled, via a context manager or with the convention that execution takes place on the same device where all argument arrays are allocated. And they may or may not allow mixing arrays on different devices via implicit data transfers.
+
+This standard chooses to add support for method 3 (local control), with the convention that execution takes place on the same device where all argument arrays are allocated. The rationale for choosing method 3 is because it's the most explicit and granular, with its only downside being verbosity. A context manager may be added in the future - see :ref:`device-out-of-scope` for details.
+
+Intended usage
+--------------
+
+The intended usage for the device support in the current version of the
+standard is *device handling in library code*. The assumed pattern is that
+users create arrays (for which they can use all the relevant device syntax
+that the library they use provides), and that they then pass those arrays
+into library code which may have to do the following:
+
+- Create new arrays on the same device as an array that's passed in.
+- Determine whether two input arrays are present on the same device or not.
+- Move an array from one device to another.
+- Create output arrays on the same device as the input arrays.
+- Pass on a specified device to other library code.
+
+.. note::
+ Given that there is not much that's currently common in terms of
+ device-related syntax between different array libraries, the syntax included
+ in the standard is kept as minimal as possible while enabling the
+ above-listed use cases.
+
+Syntax for device assignment
+----------------------------
+
+The array API provides the following syntax for device assignment and
+cross-device data transfer:
+
+1. A ``.device`` property on the array object, which returns a ``Device`` object
+ representing the device the data in the array is stored on, and supports
+ comparing devices for equality with ``==`` and ``!=`` within the same library
+ (e.g., by implementing ``__eq__``); comparing device objects from different
+ libraries is out of scope).
+2. A ``device=None`` keyword for array creation functions, which takes an
+ instance of a ``Device`` object.
+3. A ``.to_device`` method on the array object to copy an array to a different device.
+
+.. note::
+ The current API standard does **not** include a universal ``Device`` object
+ recognized by all compliant libraries. Accordingly, the standard does not
+ provide a means of instantiating a ``Device`` object to point to a specific
+ physical or logical device.
+
+ The choice to not include a standardized ``Device`` object may be revisited
+ in a future revision of this standard.
+
+ For array libraries which concern themselves with multi-device support,
+ including CPU and GPU, they are free to expose a library-specific device
+ object (e.g., for creating an array on a particular device). While a
+ library-specific device object can be used as input to ``to_device``, beware
+ that this will mean non-portability as code will be specific to that
+ library.
+
+Semantics
+---------
+
+Handling devices is complex, and some frameworks have elaborate policies for
+handling device placement. Therefore this section only gives recommendations,
+rather than hard requirements:
+
+- Respect explicit device assignment (i.e. if the input to the ``device=`` keyword is not ``None``, guarantee that the array is created on the given device, and raise an exception otherwise).
+- Preserve device assignment as much as possible (e.g. output arrays from a function are expected to be on the same device as input arrays to the function).
+- Raise an exception if an operation involves arrays on different devices (i.e. avoid implicit data transfer between devices).
+- Use a default for ``device=None`` which is consistent between functions within the same library.
+- If a library has multiple ways of controlling device placement, the most explicit method should have the highest priority. For example:
+
+ 1. If ``device=`` keyword is specified, that always takes precedence
+
+ 2. If ``device=None``, then use the setting from a context manager, if set.
+
+ 3. If no context manager was used, then use the global default device/strategy
+
+.. _device-out-of-scope:
+
+Out of scope for device support
+-------------------------------
+
+Individual libraries may offers APIs for one or more of the following topics,
+however those are out of scope for this standard:
+
+- Identifying a specific physical or logical device across libraries
+- Setting a default device globally
+- Stream/queue control
+- Distributed allocation
+- Memory pinning
+- A context manager for device control
+
+.. note::
+ A context manager for controlling the default device is present in most existing array
+ libraries (NumPy being the exception). There are concerns with using a
+ context manager however. A context manager can be tricky to use at a high
+ level, since it may affect library code below function calls (non-local
+ effects). See, e.g., `this PyTorch issue `_
+ for a discussion on a good context manager API.
+
+ Adding a context manager may be considered in a future version of this API standard.
diff --git a/spec/2024.12/design_topics/exceptions.rst b/spec/2024.12/design_topics/exceptions.rst
new file mode 100644
index 000000000..570fe56e3
--- /dev/null
+++ b/spec/2024.12/design_topics/exceptions.rst
@@ -0,0 +1,28 @@
+.. _exceptions:
+
+Exceptions
+==========
+
+This standard specifies expected syntax and semantics for a set of APIs. When
+inputs to an API do not match what is expected, libraries may emit warnings,
+raise exceptions, or misbehave in unexpected ways. In general, it is not
+possible to foresee or specify all the ways in which unexpected or invalid
+inputs are provided. Therefore, this standard does not attempt to specify
+exception or warning types to the extent needed in order to do exception
+handling in a portable manner. In general, it is expected that array library
+implementers follow `the guidance given by the documentation of the Python
+language `__, and either use
+builtin exception or warning types that are appropriate for the
+situation or use custom exceptions or warnings that derive from those builtin
+ones.
+
+In specific cases, it may be useful to provide guidance to array library
+authors regarding what an appropriate exception is. That guidance will be
+phrased as *should* rather than *must* (typically in a *Raises* section),
+because (a) there may be reasons for an implementer to deviate, and (b) more
+often than not, existing array library implementation already differ in their
+choices, and it may not be worth them breaking backward compatibility in order
+to comply with a "must" in this standard.
+
+In other cases, this standard will only specify that an exception should or
+must be raised, but not mention what type of exception that is.
diff --git a/spec/2024.12/design_topics/index.rst b/spec/2024.12/design_topics/index.rst
new file mode 100644
index 000000000..548eda90c
--- /dev/null
+++ b/spec/2024.12/design_topics/index.rst
@@ -0,0 +1,18 @@
+Design topics & constraints
+===========================
+
+.. toctree::
+ :caption: Design topics & constraints
+ :maxdepth: 1
+
+ copies_views_and_mutation
+ data_dependent_output_shapes
+ lazy_eager
+ data_interchange
+ device_support
+ static_typing
+ accuracy
+ exceptions
+ complex_numbers
+ C_API
+ parallelism
diff --git a/spec/2024.12/design_topics/lazy_eager.rst b/spec/2024.12/design_topics/lazy_eager.rst
new file mode 100644
index 000000000..63297ac73
--- /dev/null
+++ b/spec/2024.12/design_topics/lazy_eager.rst
@@ -0,0 +1,43 @@
+.. _lazy-eager:
+
+Lazy vs. eager execution
+========================
+
+While the execution model for implementations is out of scope of this standard,
+there are a few aspects of lazy (or graph-based) execution as contrasted to
+eager execution that may have an impact on the prescribed semantics of
+individual APIs, and will therefore show up in the API specification.
+
+One important difference is data-dependent or value-dependent behavior, as
+described in :ref:`data-dependent-output-shapes`. Because such behavior is hard
+to implement, implementers may choose to omit such APIs from their library.
+
+Another difference is when the Python language itself prescribes that a
+specific type *must* be returned. For those cases, it is not possible to return
+a lazy/delayed kind of object to avoid computing a value. This is the case for
+five dunder methods: `__bool__`, `__int__`, `__float__`, `__complex__` and
+`__index__`. Each implementation has only two choices when one of these methods
+is called:
+
+1. Compute a value of the required type (a Python scalar of type `bool`, `int`,
+ `float` or `complex`), or
+2. Raise an exception.
+
+When an implementation is 100% lazy, for example when it serializes a
+computation graph, computing the value is not possible and hence such an
+implementation has no choice but to raise an exception. For a "mostly lazy"
+implementation, it may make sense to trigger execution instead - but it is not
+required to, both choices are valid.
+
+A common code construct where this happens is conditional logic, e.g.::
+
+ vals = compute_something()
+ if all(vals):
+ # The if-statement will make Python call the __bool__ method
+ # on the result of `all(vals)`.
+ do_something_else()
+
+Note that the API does not contain control flow constructs, as of now, that
+would allow avoiding the implicit `__bool__` call in the example above. The
+only control flow-like function is `where`, but there's no function like `cond`
+to replace an `if`-statement.
diff --git a/spec/2024.12/design_topics/parallelism.rst b/spec/2024.12/design_topics/parallelism.rst
new file mode 100644
index 000000000..f013a9cf9
--- /dev/null
+++ b/spec/2024.12/design_topics/parallelism.rst
@@ -0,0 +1,24 @@
+Parallelism
+===========
+
+Parallelism is mostly, but not completely, an execution or runtime concern
+rather than an API concern. Execution semantics are out of scope for this API
+standard, and hence won't be discussed further here. The API related part
+involves how libraries allow users to exercise control over the parallelism
+they offer, such as:
+
+- Via environment variables. This is the method of choice for BLAS libraries and libraries using OpenMP.
+- Via a keyword to individual functions or methods. Examples include the ``n_jobs`` keyword used in scikit-learn and the ``workers`` keyword used in SciPy.
+- Build-time settings to enable a parallel or distributed backend.
+- Via letting the user set chunk sizes. Dask uses this approach.
+
+When combining multiple libraries, one has to deal with auto-parallelization
+semantics and nested parallelism. Two things that could help improve the
+coordination of parallelization behavior in a stack of Python libraries are:
+
+1. A common API pattern for enabling parallelism
+2. A common library providing a parallelization layer
+
+Option (1) may possibly fit in a future version of this array API standard.
+`array-api issue 4 `_ contains
+more detailed discussion on the topic of parallelism.
diff --git a/spec/2024.12/design_topics/static_typing.rst b/spec/2024.12/design_topics/static_typing.rst
new file mode 100644
index 000000000..26a1fb901
--- /dev/null
+++ b/spec/2024.12/design_topics/static_typing.rst
@@ -0,0 +1,50 @@
+Static typing
+=============
+
+Good support for static typing both in array libraries and array-consuming
+code is desirable. Therefore the exact type or set of types for each
+parameter, keyword and return value is specified for functions and methods -
+see :ref:`function-and-method-signatures`. That section specifies arrays
+simply as ``array``; what that means is dealt with in this section.
+
+Introducing type annotations in libraries became more relevant only when
+Python 2.7 support was dropped at the start of 2020. As a consequence, using
+type annotations with array libraries is largely still a work in progress.
+This version of the API standard does not deal with trying to type *array
+properties* like shape, dimensionality or dtype, because that's not a solved
+problem in individual array libraries yet.
+
+An ``array`` type annotation can mean either the type of one specific array
+object, or some superclass or typing Protocol - as long as it is consistent
+with the array object specified in :ref:`array-object`. To illustrate by
+example:
+
+.. code-block:: python
+
+ # `Array` is a particular class in the library
+ def sin(x: Array, / ...) -> Array:
+ ...
+
+and
+
+.. code-block:: python
+
+ # There's some base class `_BaseArray`, and there may be multiple
+ # array subclasses inside the library
+ A = TypeVar('A', bound=_BaseArray)
+ def sin(x: A, / ...) -> A:
+ ...
+
+should both be fine. There may be other variations possible. Also note that
+this standard does not require that input and output array types are the same
+(they're expected to be defined in the same library though). Given that
+array libraries don't have to be aware of other types of arrays defined in
+other libraries (see :ref:`assumptions-dependencies`), this should be enough
+for a single array library.
+
+That said, an array-consuming library aiming to support multiple array types
+may need more - for example a protocol to enable structural subtyping. This
+API standard currently takes the position that it does not provide any
+reference implementation or package that can or should be relied on at
+runtime, hence no such protocol is defined here. This may be dealt with in a
+future version of this standard.
diff --git a/spec/2024.12/extensions/fourier_transform_functions.rst b/spec/2024.12/extensions/fourier_transform_functions.rst
new file mode 100644
index 000000000..170ae390b
--- /dev/null
+++ b/spec/2024.12/extensions/fourier_transform_functions.rst
@@ -0,0 +1,45 @@
+Fourier transform Functions
+===========================
+
+ Array API specification for Fourier transform functions.
+
+Extension name and usage
+------------------------
+
+The name of the namespace providing the extension must be: ``fft``.
+
+If implemented, this ``fft`` extension must be retrievable via::
+
+ >>> xp = x.__array_namespace__()
+ >>> if hasattr(xp, 'fft'):
+ >>> # Use `xp.fft`
+
+
+Objects in API
+--------------
+
+A conforming implementation of this ``fft`` extension must provide and support the following functions.
+
+.. currentmodule:: array_api.fft
+
+..
+ NOTE: please keep the functions and their inverse together
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ fft
+ ifft
+ fftn
+ ifftn
+ rfft
+ irfft
+ rfftn
+ irfftn
+ hfft
+ ihfft
+ fftfreq
+ rfftfreq
+ fftshift
+ ifftshift
diff --git a/spec/2024.12/extensions/index.rst b/spec/2024.12/extensions/index.rst
new file mode 100644
index 000000000..3b9409954
--- /dev/null
+++ b/spec/2024.12/extensions/index.rst
@@ -0,0 +1,34 @@
+.. _extensions:
+
+Extensions
+==========
+
+Extensions are coherent sets of functionality that are commonly implemented
+across array libraries. Each array library supporting this standard may, but is
+not required to, implement an extension. If an extension is supported, it
+must be accessible inside the main array API supporting namespace as a separate
+namespace.
+
+Extension module implementors must aim to provide all functions and other
+public objects in an extension. The rationale for this is that downstream usage
+can then check whether or not the extension is present (using ``hasattr(xp,
+'extension_name')`` should be enough), and can then assume that functions are
+implemented. This in turn makes it also easy for array-consuming libraries to
+document which array libraries they support - e.g., "all libraries implementing
+the array API standard and its linear algebra extension".
+
+The mechanism through which the extension namespace is made available is up to
+the implementer, e.g. via a regular submodule that is imported under the
+``linalg`` name, or via a module-level ``__getattr__``.
+
+The functions in an extension must adhere to the same conventions as those in
+the array API standard. See :ref:`api-specification`.
+
+------------------------------------------------------------------------------
+
+.. toctree::
+ :caption: Extension modules:
+ :maxdepth: 1
+
+ fourier_transform_functions
+ linear_algebra_functions
diff --git a/spec/2024.12/extensions/linear_algebra_functions.rst b/spec/2024.12/extensions/linear_algebra_functions.rst
new file mode 100644
index 000000000..938221c79
--- /dev/null
+++ b/spec/2024.12/extensions/linear_algebra_functions.rst
@@ -0,0 +1,116 @@
+.. _linear-algebra-extension:
+
+Linear Algebra Extension
+========================
+
+ Array API specification for linear algebra functions.
+
+Extension name and usage
+------------------------
+
+The name of the namespace providing the extension must be: ``linalg``.
+
+If implemented, this ``linalg`` extension must be retrievable via::
+
+ >>> xp = x.__array_namespace__()
+ >>> if hasattr(xp, 'linalg'):
+ >>> # Use `xp.linalg`
+
+
+Design Principles
+-----------------
+
+A principal goal of this specification is to standardize commonly implemented interfaces among array libraries. While this specification endeavors to avoid straying too far from common practice, this specification does, with due restraint, seek to address design decisions arising more from historical accident than first principles. This is especially true for linear algebra APIs, which have arisen and evolved organically over time and have often been tied to particular underlying implementations (e.g., to BLAS and LAPACK).
+
+Accordingly, the standardization process affords the opportunity to reduce interface complexity among linear algebra APIs by inferring and subsequently codifying common design themes, thus allowing more consistent APIs. What follows is the set of design principles governing the APIs which follow:
+
+1. **Batching**: if an operation is explicitly defined in terms of matrices (i.e., two-dimensional arrays), then the associated interface should support "batching" (i.e., the ability to perform the operation over a "stack" of matrices). Example operations include:
+
+ - ``inv``: computing the multiplicative inverse of a square matrix.
+ - ``cholesky``: performing Cholesky decomposition.
+ - ``matmul``: performing matrix multiplication.
+
+2. **Data types**: if an operation requires decimal operations and :ref:`type-promotion` semantics are undefined (e.g., as is the case for mixed-kind promotions), then the associated interface should be specified as being restricted to floating-point data types. While the specification uses the term "SHOULD" rather than "MUST", a conforming implementation of the array API standard should only ignore the restriction provided overly compelling reasons for doing so. Example operations which should be limited to floating-point data types include:
+
+ - ``inv``: computing the multiplicative inverse.
+ - ``slogdet``: computing the natural logarithm of the absolute value of the determinant.
+ - ``norm``: computing the matrix or vector norm.
+
+ Certain operations are solely comprised of multiplications and additions. Accordingly, associated interfaces need not be restricted to floating-point data types. However, careful consideration should be given to overflow, and use of floating-point data types may be more prudent in practice. Example operations include:
+
+ - ``matmul``: performing matrix multiplication.
+ - ``trace``: computing the sum along the diagonal.
+ - ``cross``: computing the vector cross product.
+
+ Lastly, certain operations may be performed independent of data type, and, thus, the associated interfaces should support all data types specified in this standard. Example operations include:
+
+ - ``matrix_transpose``: computing the transpose.
+ - ``diagonal``: returning the diagonal.
+
+3. **Return values**: if an interface has more than one return value, the interface should return a namedtuple consisting of each value.
+
+ In general, interfaces should avoid polymorphic return values (e.g., returning an array **or** a namedtuple, dependent on, e.g., an optional keyword argument). Dedicated interfaces for each return value type are preferred, as dedicated interfaces are easier to reason about at both the implementation level and user level. Example interfaces which could be combined into a single overloaded interface, but are not, include:
+
+ - ``eigh``: computing both eigenvalues and eigenvectors.
+ - ``eigvalsh``: computing only eigenvalues.
+
+4. **Implementation agnosticism**: a standardized interface should eschew parameterization (including keyword arguments) biased toward particular implementations.
+
+ Historically, at a time when all array computing happened on CPUs, BLAS and LAPACK underpinned most numerical computing libraries and environments. Naturally, language and library abstractions catered to the parameterization of those libraries, often exposing low-level implementation details verbatim in their higher-level interfaces, even if such choices would be considered poor or ill-advised by today's standards (e.g., NumPy's use of `UPLO` in `eigh`). However, the present day is considerably different. While still important, BLAS and LAPACK no longer hold a monopoly over linear algebra operations, especially given the proliferation of devices and hardware on which such operations must be performed. Accordingly, interfaces must be conservative in the parameterization they support in order to best ensure universality. Such conservatism applies even to performance optimization parameters afforded by certain hardware.
+
+5. **Orthogonality**: an interface should have clearly defined and delineated functionality which, ideally, has no overlap with the functionality of other interfaces in the specification. Providing multiple interfaces which can all perform the same operation creates unnecessary confusion regarding interface applicability (i.e., which interface is best at which time) and decreases readability of both library and user code. Where overlap is possible, the specification must be parsimonious in the number of interfaces, ensuring that each interface provides a unique and compelling abstraction. Examples of related interfaces which provide distinct levels of abstraction (and generality) include:
+
+ - ``vecdot``: computing the dot product of two vectors.
+ - ``matmul``: performing matrix multiplication (including between two vectors and thus the dot product).
+ - ``tensordot``: computing tensor contractions (generalized sum-products).
+ - ``einsum``: expressing operations in terms of Einstein summation convention, including dot products and tensor contractions.
+
+ The above can be contrasted with, e.g., NumPy, which provides the following interfaces for computing the dot product or related operations:
+
+ - ``dot``: dot product, matrix multiplication, and tensor contraction.
+ - ``inner``: dot product.
+ - ``vdot``: dot product with flattening and complex conjugation.
+ - ``multi_dot``: chained dot product.
+ - ``tensordot``: tensor contraction.
+ - ``matmul``: matrix multiplication (dot product for two vectors).
+ - ``einsum``: Einstein summation convention.
+
+ where ``dot`` is overloaded based on input array dimensionality and ``vdot`` and ``inner`` exhibit a high degree of overlap with other interfaces. By consolidating interfaces and more clearly delineating behavior, this specification aims to ensure that each interface has a unique purpose and defined use case.
+
+.. currentmodule:: array_api.linalg
+
+Objects in API
+--------------
+
+A conforming implementation of this ``linalg`` extension must provide and support the following functions.
+
+..
+ NOTE: please keep the functions in alphabetical order
+
+.. autosummary::
+ :toctree: generated
+ :template: method.rst
+
+ cholesky
+ cross
+ det
+ diagonal
+ eigh
+ eigvalsh
+ inv
+ matmul
+ matrix_norm
+ matrix_power
+ matrix_rank
+ matrix_transpose
+ outer
+ pinv
+ qr
+ slogdet
+ solve
+ svd
+ svdvals
+ tensordot
+ trace
+ vecdot
+ vector_norm
diff --git a/spec/2024.12/future_API_evolution.md b/spec/2024.12/future_API_evolution.md
new file mode 100644
index 000000000..443f683d5
--- /dev/null
+++ b/spec/2024.12/future_API_evolution.md
@@ -0,0 +1,60 @@
+(future-API-evolution)=
+
+# Future API standard evolution
+
+## Scope extensions
+
+Proposals for scope extensions in a future version of the API standard will follow
+the process documented at https://github.com/data-apis/governance/blob/master/process_document.md
+
+In summary, proposed new APIs go through several maturity stages, and will only be
+accepted in a future version of this API standard once they have reached the "Final"
+maturity stage, which means multiple array libraries have compliant implementations
+and real-world experience from use of those implementations is available.
+
+
+## Backwards compatibility
+
+Functions, objects, keywords and specified behavior are added to this API standard
+only if those are already present in multiple existing array libraries, and if there is
+data that those APIs are used. Therefore it is highly unlikely that future versions
+of this standard will make backwards-incompatible changes.
+
+The aim is for future versions to be 100% backwards compatible with older versions.
+Any exceptions must have strong rationales and be clearly documented in the updated
+API specification.
+
+
+(api-versioning)=
+
+## Versioning
+
+This API standard uses the following versioning scheme:
+
+- The version is date-based, in the form `yyyy.mm` (e.g., `2020.12`).
+- The version shall not include a standard way to do `alpha`/`beta`/`rc` or
+ `.post`/`.dev` type versions.
+ _Rationale: that's for Python packages, not for a standard._
+- The version must be made available at runtime via an attribute
+ `__array_api_version__` by a compliant implementation, in `'yyyy.mm'` format
+ as a string, in the namespace that implements the API standard.
+ _Rationale: dunder version strings are the standard way of doing this._
+
+No utilities for dealing with version comparisons need to be provided; given
+the format simple string comparisons with Python operators (`=-`, `<`, `>=`,
+etc.) will be enough.
+
+```{note}
+
+Rationale for the `yyyy.mm` versioning scheme choice:
+the API will be provided as part of a library, which already has a versioning
+scheme (typically PEP 440 compliant and in the form `major.minor.bugfix`),
+and a way to access it via `module.__version__`. The API standard version is
+completely independent from the package version. Given the standardization
+process, it resembles a C/C++ versioning scheme (e.g. `C99`, `C++14`) more
+than Python package versioning.
+```
+
+The frequency of releasing a new version of an API standard will likely be at
+regular intervals and on the order of one year, however no assumption on
+frequency of new versions appearing must be made.
diff --git a/spec/2024.12/index.rst b/spec/2024.12/index.rst
new file mode 100644
index 000000000..3e51cc68e
--- /dev/null
+++ b/spec/2024.12/index.rst
@@ -0,0 +1,37 @@
+Python array API standard
+=========================
+
+Contents
+--------
+
+.. toctree::
+ :caption: Context
+ :maxdepth: 1
+
+ purpose_and_scope
+ use_cases
+ assumptions
+
+.. toctree::
+ :caption: API
+ :maxdepth: 1
+
+ design_topics/index
+ future_API_evolution
+ API_specification/index
+ extensions/index
+
+.. toctree::
+ :caption: Methodology and Usage
+ :maxdepth: 1
+
+ usage_data
+ verification_test_suite
+ benchmark_suite
+
+.. toctree::
+ :caption: Other
+ :maxdepth: 1
+
+ changelog
+ license
diff --git a/spec/2024.12/license.rst b/spec/2024.12/license.rst
new file mode 100644
index 000000000..06ec75dfc
--- /dev/null
+++ b/spec/2024.12/license.rst
@@ -0,0 +1,9 @@
+License
+=======
+
+All content on this website and the corresponding
+`GitHub repository `__ is licensed
+under the following license:
+
+ .. include:: ../../LICENSE
+ :parser: myst_parser.sphinx_
diff --git a/spec/2024.12/purpose_and_scope.md b/spec/2024.12/purpose_and_scope.md
new file mode 100644
index 000000000..b2019b7dd
--- /dev/null
+++ b/spec/2024.12/purpose_and_scope.md
@@ -0,0 +1,470 @@
+# Purpose and scope
+
+## Introduction
+
+Python users have a wealth of choice for libraries and frameworks for
+numerical computing, data science, machine learning, and deep learning. New
+frameworks pushing forward the state of the art in these fields are appearing
+every year. One unintended consequence of all this activity and creativity
+has been fragmentation in multidimensional array (a.k.a. tensor) libraries -
+which are the fundamental data structure for these fields. Choices include
+NumPy, Tensorflow, PyTorch, Dask, JAX, CuPy, MXNet, Xarray, and others.
+
+The APIs of each of these libraries are largely similar, but with enough
+differences that it's quite difficult to write code that works with multiple
+(or all) of these libraries. This array API standard aims to address that
+issue, by specifying an API for the most common ways arrays are constructed
+and used.
+
+Why not simply pick an existing API and bless that as the standard? In short,
+because there are often good reasons for the current inconsistencies between
+libraries. The most obvious candidate for that existing API is NumPy. However
+NumPy was not designed with non-CPU devices, graph-based libraries, or JIT
+compilers in mind. Other libraries often deviate from NumPy for good
+(necessary) reasons. Choices made in this API standard are often the same
+ones NumPy makes, or close to it, but are different where necessary to make
+sure all existing array libraries can adopt this API.
+
+
+### This API standard
+
+This document aims to standardize functionality that exists in most/all array
+libraries and either is commonly used or is needed for
+consistency/completeness. Usage is determined via analysis of downstream
+libraries, see {ref}`usage-data`. An example of consistency is: there are
+functional equivalents for all Python operators (including the rarely used
+ones).
+
+Beyond usage and consistency, there's a set of use cases that inform the API
+design to ensure it's fit for a wide range of users and situations - see
+{ref}`use-cases`.
+
+A question that may arise when reading this document is: _"what about
+functionality that's not present in this document?_ This:
+
+- means that there is no guarantee the functionality is present in libraries
+ adhering to the standard
+- does _not_ mean that that functionality is unimportant
+- may indicate that that functionality, if present in a particular array
+ library, is unlikely to be present in all other libraries
+
+### History
+
+The first library for numerical and scientific computing in Python was
+Numeric, developed in the mid-1990s. In the early 2000s a second, similar
+library, Numarray, was created. In 2005 NumPy was written, superceding both
+Numeric and Numarray and resolving the fragmentation at that time. For
+roughly a decade, NumPy was the only widely used array library. Over the past
+~5 years, mainly due to the emergence of new hardware and the rise of deep
+learning, many other libraries have appeared, leading to more severe
+fragmentation. Concepts and APIs in newer libraries were often inspired by
+(or copied from) those in older ones - and then changed or improved upon to
+fit new needs and use cases. Individual library authors discussed ideas,
+however there was never (before this array API standard) a serious attempt
+to coordinate between all libraries to avoid fragmentation and arrive at a
+common API standard.
+
+The idea for this array API standard grew gradually out of many conversations
+between maintainers during 2019-2020. It quickly became clear that any
+attempt to write a new "reference library" to fix the current fragmentation
+was infeasible - unlike in 2005, there are now too many different use cases
+and too many stakeholders, and the speed of innovation is too high. In May
+2020 an initial group of maintainers was assembled in the [Consortium for
+Python Data API Standards](https://data-apis.org/) to start drafting a
+specification for an array API that could be adopted by each of the existing
+array and tensor libraries. That resulted in this document, describing that
+API.
+
+
+(Scope)=
+
+## Scope (includes out-of-scope / non-goals)
+
+This section outlines what is in scope and out of scope for this API standard.
+
+### In scope
+
+The scope of the array API standard includes:
+
+- Functionality which needs to be included in an array library for it to adhere
+ to this standard.
+- Names of functions, methods, classes and other objects.
+- Function signatures, including type annotations.
+- Semantics of functions and methods. I.e. expected outputs including precision
+ for and dtypes of numerical results.
+- Semantics in the presence of `nan`'s, `inf`'s, empty arrays (i.e. arrays
+ including one or more dimensions of size `0`).
+- Casting rules, broadcasting, indexing
+- Data interchange. I.e. protocols to convert one type of array into another
+ type, potentially sharing memory.
+- Device support.
+
+Furthermore, meta-topics included in this standard include:
+
+- Use cases for the API standard and assumptions made in it
+- API standard adoption
+- API standard versioning
+- Future API standard evolution
+- Array library and API standard versioning
+- Verification of API standard conformance
+
+The concrete set of functionality that is in scope for this version of the
+standard is shown in this diagram:
+
+
+
+
+**Goals** for the API standard include:
+
+- Make it possible for array-consuming libraries to start using multiple types
+ of arrays as inputs.
+- Enable more sharing and reuse of code built on top of the core functionality
+ in the API standard.
+- For authors of new array libraries, provide a concrete API that can be
+ adopted as is, rather than each author having to decide what to borrow from
+ where and where to deviate.
+- Make the learning curve for users less steep when they switch from one array
+ library to another one.
+
+
+### Out of scope
+
+1. Implementations of the standard are out of scope.
+
+ _Rationale: the standard will consist of a document and an accompanying test
+ suite with which the conformance of an implementation can be verified. Actual
+ implementations will live in array libraries; no reference implementation is
+ planned._
+
+2. Execution semantics are out of scope. This includes single-threaded vs.
+ parallel execution, task scheduling and synchronization, eager vs. delayed
+ evaluation, performance characteristics of a particular implementation of the
+ standard, and other such topics.
+
+ _Rationale: execution is the domain of implementations. Attempting to specify
+ execution behavior in a standard is likely to require much more fine-grained
+ coordination between developers of implementations, and hence is likely to
+ become an obstacle to adoption._
+
+3. Non-Python API standardization (e.g., Cython or NumPy C APIs)
+
+ _Rationale: this is an important topic for some array-consuming libraries,
+ but there is no widely shared C/Cython API and hence it doesn't make sense at
+ this point in time to standardize anything. See
+ the [C API section](design_topics/C_API.rst) for more details._
+
+4. Standardization of these dtypes is out of scope: bfloat16, extended
+ precision floating point, datetime, string, object and void dtypes.
+
+ _Rationale: these dtypes aren't uniformly supported, and their inclusion at
+ this point in time could put a significant implementation burden on
+ libraries. It is expected that some of these dtypes - in particular
+ `bfloat16` - will be included in a future version of the standard._
+
+5. The following topics are out of scope: I/O, polynomials, error handling,
+ testing routines, building and packaging related functionality, methods of
+ binding compiled code (e.g., `cffi`, `ctypes`), subclassing of an array
+ class, masked arrays, and missing data.
+
+ _Rationale: these topics are not core functionality for an array library,
+ and/or are too tied to implementation details._
+
+6. NumPy (generalized) universal functions, i.e. ufuncs and gufuncs.
+
+ _Rationale: these are NumPy-specific concepts, and are mostly just a
+ particular way of building regular functions with a few extra
+ methods/properties._
+
+7. Behaviour for unexpected/invalid input to functions and methods.
+
+ _Rationale: there are a huge amount of ways in which users can provide
+ invalid or unspecified input to functionality in the standard. Exception
+ types or other resulting behaviour cannot be completely covered and would
+ be hard to make consistent between libraries._
+
+
+**Non-goals** for the API standard include:
+
+- Making array libraries identical so they can be merged.
+
+ _Each library will keep having its own particular strength, whether it's
+ offering functionality beyond what's in the standard, performance advantages
+ for a given use case, specific hardware or software environment support, or
+ more._
+
+- Implement a backend or runtime switching system to be able to switch from one
+ array library to another with a single setting or line of code.
+
+ _This may be feasible, however it's assumed that when an array-consuming
+ library switches from one array type to another, some testing and possibly
+ code adjustment for performance or other reasons may be needed._
+
+- Making it possible to mix multiple array libraries in function calls.
+
+ _Most array libraries do not know about other libraries, and the functions
+ they implement may try to convert "foreign" input, or raise an exception.
+ This behaviour is hard to specify; ensuring only a single array type is
+ used is best left to the end user._
+
+
+### Implications of in/out of scope
+
+If something is out of scope and therefore will not be part of (the current
+version of) the API standard, that means that there are no guarantees that that
+functionality works the same way, or even exists at all, across the set of
+array libraries that conform to the standard. It does _not_ imply that this
+functionality is less important or should not be used.
+
+
+## Stakeholders
+
+Arrays are fundamental to scientific computing, data science, and machine
+learning and deep learning. Hence there are many stakeholders for an array API
+standard. The _direct_ stakeholders of this standard are **authors/maintainers of
+Python array libraries**. There are many more types of _indirect_ stakeholders
+though, including:
+
+- maintainers of libraries and other programs which depend on array libraries
+ (called "array-consuming libraries" in the rest of this document)
+- authors of non-Python array libraries
+- developers of compilers and runtimes with array-specific functionality
+- end users
+
+Libraries that are being actively considered - in terms of current behaviour and
+API surface - during the creation of the first version of this standard
+include:
+
+- [NumPy](https://numpy.org)
+- [TensorFlow](https://www.tensorflow.org/)
+- [PyTorch](https://pytorch.org/)
+- [MXNet](https://numpy.mxnet.io/)
+- [JAX](https://github.com/google/jax)
+- [Dask](https://dask.org/)
+- [CuPy](https://cupy.chainer.org/)
+
+Other Python array libraries that are currently under active development and
+could adopt this API standard include:
+
+- [xarray](https://xarray.pydata.org/)
+- [PyData/Sparse](https://sparse.pydata.org)
+- [Weld](https://github.com/weld-project/weld)
+- [Bohrium](https://bohrium.readthedocs.io/)
+- [Arkouda](https://github.com/mhmerrill/arkouda)
+- [Legate](https://research.nvidia.com/publication/2019-11_Legate-NumPy%3A-Accelerated)
+
+There are a huge amount of array-consuming libraries; some of the most
+prominent ones that are being taken into account - in terms of current array
+API usage or impact of design decisions on them - include (this list is likely
+to grow it over time):
+
+- [Pandas](https://pandas.pydata.org/)
+- [SciPy](https://github.com/scipy/scipy)
+- [scikit-learn](https://scikit-learn.org/)
+- [Matplotlib](https://matplotlib.org/)
+- [scikit-image](https://scikit-image.org/)
+- [NetworkX](https://networkx.github.io/)
+
+Array libraries in other languages, some of which may grow a Python API in the
+future or have taken inspiration from NumPy or other array libraries, include:
+
+- [Xtensor](https://xtensor.readthedocs.io) (C++, cross-language)
+- [XND](https://xnd.io/) (C, cross-language)
+- [stdlib](https://stdlib.io/) (JavaScript)
+- [rust-ndarray](https://github.com/rust-ndarray/ndarray) (Rust)
+- [rray](https://github.com/r-lib/rray) (R)
+- [ND4J](https://github.com/deeplearning4j/nd4j) (JVM)
+- [NumSharp](https://github.com/SciSharp/NumSharp) (C#)
+
+Compilers, runtimes, and dispatching layers for which this API standard may be
+relevant:
+
+- [Cython](https://cython.org/)
+- [Numba](http://numba.pydata.org/)
+- [Pythran](https://pythran.readthedocs.io/en/latest/)
+- [Transonic](https://transonic.readthedocs.io)
+- [ONNX](https://onnx.ai/)
+- [Apache TVM](https://tvm.apache.org/)
+- [MLIR](https://mlir.llvm.org/)
+- [TACO](https://github.com/tensor-compiler/taco)
+- [unumpy](https://github.com/Quansight-Labs/unumpy)
+- [einops](https://github.com/arogozhnikov/einops)
+- [Apache Arrow](https://arrow.apache.org/)
+
+
+
+## How to read this document
+
+For guidance on how to read and understand the type annotations included in this specification, consult the Python [documentation](https://docs.python.org/3/library/typing.html).
+
+
+(how-to-adopt-this-api)=
+
+## How to adopt this API
+
+Most (all) existing array libraries will find something in this API standard
+that is incompatible with a current implementation, and that they cannot
+change due to backwards compatibility concerns. Therefore we expect that each
+of those libraries will want to offer a standard-compliant API in a _new
+namespace_. The question then becomes: how does a user access this namespace?
+
+The simplest method is: document the import to use to directly access the
+namespace (e.g. `import package_name.array_api`). This has two issues though:
+
+1. Array-consuming libraries that want to support multiple array libraries
+ then have to explicitly import each library.
+2. It is difficult to _version_ the array API standard implementation (see
+ {ref}`api-versioning`).
+
+To address both issues, a uniform way must be provided by a conforming
+implementation to access the API namespace, namely a [method on the array
+object](array.__array_namespace__):
+
+```
+xp = x.__array_namespace__()
+```
+
+The method must take one keyword, `api_version=None`, to make it possible to
+request a specific API version:
+
+```
+xp = x.__array_namespace__(api_version='2020.10')
+```
+
+The `xp` namespace must contain all functionality specified in
+{ref}`api-specification`. The namespace may contain other functionality; however,
+including additional functionality is not recommended as doing so may hinder
+portability and inter-operation of array libraries within user code.
+
+### Checking an array object for Compliance
+
+Array-consuming libraries are likely to want a mechanism for determining
+whether a provided array is specification compliant. The recommended approach
+to check for compliance is by checking whether an array object has an
+`__array_namespace__` attribute, as this is the one distinguishing feature of
+an array-compliant object.
+
+Checking for an `__array_namespace__` attribute can be implemented as a small
+utility function similar to the following.
+
+```python
+def is_array_api_obj(x):
+ return hasattr(x, '__array_namespace__')
+```
+
+```{note}
+Providing a "reference library" on which people depend is out-of-scope for
+the standard. Hence the standard cannot, e.g., provide an array ABC from
+which libraries can inherit to enable an `isinstance` check. However, note
+that the `numpy.array_api` implementation aims to provide a reference
+implementation with only the behavior specified in this standard - it may
+prove useful for verifying one is writing portable code.
+```
+
+### Discoverability of conforming implementations
+
+It may be useful to have a way to discover all packages in a Python
+environment which provide a conforming array API implementation, and the
+namespace that that implementation resides in.
+To assist array-consuming libraries which need to create arrays originating
+from multiple conforming array implementations, or developers who want to perform
+for example cross-library testing, libraries may provide an
+{pypa}`entry point ` in order to make an array API
+namespace discoverable.
+
+:::{admonition} Optional feature
+Given that entry points typically require build system & package installer
+specific implementation, this standard chooses to recommend rather than
+mandate providing an entry point.
+:::
+
+The following code is an example for how one can discover installed
+conforming libraries:
+
+```python
+from importlib.metadata import entry_points
+
+try:
+ eps = entry_points()['array_api']
+ ep = next(ep for ep in eps if ep.name == 'package_name')
+except TypeError:
+ # The dict interface for entry_points() is deprecated in py3.10,
+ # supplanted by a new select interface.
+ ep = entry_points(group='array_api', name='package_name')
+
+xp = ep.load()
+```
+
+An entry point must have the following properties:
+
+- **group**: equal to `array_api`.
+- **name**: equal to the package name.
+- **object reference**: equal to the array API namespace import path.
+
+
+* * *
+
+## Conformance
+
+A conforming implementation of the array API standard must provide and support
+all the functions, arguments, data types, syntax, and semantics described in
+this specification.
+
+A conforming implementation of the array API standard may provide additional
+features (e.g., values, objects, properties, data types, functions, and function
+arguments) beyond those described in this specification.
+
+Libraries which aim to provide a conforming implementation but haven't yet
+completed such an implementation may, and are encouraged to, provide details on
+the level of (non-)conformance. For details on how to do this, see
+[Verification - measuring conformance](verification_test_suite.md).
+
+
+* * *
+
+## Terms and Definitions
+
+For the purposes of this specification, the following terms and definitions apply.
+
+
+
+**array**:
+a (usually fixed-size) multidimensional container of items of the same type and size.
+
+**axis**:
+an array dimension.
+
+**branch cut**:
+a curve in the complex plane across which a given complex function fails to be continuous.
+
+**broadcast**:
+automatic (implicit) expansion of array dimensions to be of equal sizes without copying array data for the purpose of making arrays with different shapes have compatible shapes for element-wise operations.
+
+**compatible**:
+two arrays whose dimensions are compatible (i.e., where the size of each dimension in one array is either equal to one or to the size of the corresponding dimension in a second array).
+
+**element-wise**:
+an operation performed element-by-element, in which individual array elements are considered in isolation and independently of other elements within the same array.
+
+**matrix**:
+a two-dimensional array.
+
+**rank**:
+number of array dimensions (not to be confused with the number of linearly independent columns of a matrix).
+
+**shape**:
+a tuple of `N` non-negative integers that specify the sizes of each dimension and where `N` corresponds to the number of dimensions.
+
+**singleton dimension**:
+a dimension whose size is one.
+
+**vector**:
+a one-dimensional array.
+
+* * *
+
+## Normative References
+
+The following referenced documents are indispensable for the application of this specification.
+
+- __IEEE 754-2019: IEEE Standard for Floating-Point Arithmetic.__ Institute of Electrical and Electronic Engineers, New York (2019).
+- Scott Bradner. 1997. "Key words for use in RFCs to Indicate Requirement Levels". RFC 2119. doi:[10.17487/rfc2119](https://tools.ietf.org/html/rfc2119).
diff --git a/spec/2024.12/usage_data.md b/spec/2024.12/usage_data.md
new file mode 100644
index 000000000..c2dcd5d65
--- /dev/null
+++ b/spec/2024.12/usage_data.md
@@ -0,0 +1,86 @@
+(usage-data)=
+
+# Usage Data
+
+> Summary of existing array API design and usage.
+
+## Introduction
+
+With rare exception, technical standardization ("standardization") occurs neither in a vacuum nor from first principles. Instead, standardization finds its origins in two or more, sometimes competing, implementations differing in design and behavior. These differences introduce friction as those (e.g., downstream end-users and library authors) who operate at higher levels of abstraction must either focus on an implementation subset (e.g., only NumPy-like array libraries) or accommodate variation through increased complexity (e.g., if NumPy array, call method `.foo()`; else if Dask array, call method `.bar()`).
+
+Standardization aspires to reduce this friction and is a process which codifies that which is common, while still encouraging experimentation and innovation. Through the process of standardization, implementations can align around a subset of established practices and channel development resources toward that which is new and novel. In short, standardization aims to thwart reinventing the proverbial wheel.
+
+A foundational step in standardization is articulating a subset of established practices and defining those practices in unambiguous terms. To this end, the standardization process must approach the problem from two directions: **design** and **usage**. The former direction seeks to understand
+
+- current implementation design (APIs, names, signatures, classes, and objects)
+- current implementation semantics (calling conventions and behavior)
+
+while the latter direction seeks to quantify API
+
+- consumers (e.g., which downstream libraries utilize an API?)
+- usage frequency (e.g., how often is an API consumed?)
+- consumption patterns (e.g., which optional arguments are provided and in what context?)
+
+By analyzing both design and usage, the standardization process grounds specification decisions in empirical data and analysis.
+
+## Design
+
+To understand API design, standardization follows the following process.
+
+- Identify a representative sample of commonly used Python array libraries (e.g., NumPy, Dask Array, CuPy, MXNet, JAX, TensorFlow, and PyTorch).
+- Acquire public APIs (e.g., by analyzing module exports and scraping public documentation).
+- Unify and standardize public API data representation for subsequent analysis.
+- Extract commonalities and differences by analyzing the intersection and complement of available APIs.
+- Derive a common API subset suitable for standardization (based on prevalence and ease of implementation), where such a subset may include attribute names, method names, and positional and keyword arguments.
+- Leverage usage data to validate API need and to inform naming conventions, supported data types, and/or optional arguments.
+- Summarize findings and provide tooling for additional analysis and exploration.
+
+See the [`array-api-comparison`](https://github.com/data-apis/array-api-comparison)
+repository for design data and summary analysis.
+
+## Usage
+
+To understand usage patterns, standardization follows the following process.
+
+- Identify a representative sample of commonly used Python libraries ("downstream libraries") which consume the subset of array libraries identified during design analysis (e.g., pandas, Matplotlib, SciPy, Xarray, scikit-learn, and scikit-image).
+- Instrument downstream libraries in order to record Python array API calls.
+- Collect traces while running downstream library test suites.
+- Transform trace data into structured data (e.g., as JSON) for subsequent analysis.
+- Generate empirical APIs based on provided arguments and associated types, noting which downstream library called which empirical API and at what frequency.
+- Derive a single inferred API which unifies the individual empirical API calling semantics.
+- Organize API results in human-readable form as type definition files.
+- Compare the inferred API to the documented API.
+
+The following is an inferred API for `numpy.arange`. The docstring includes the number of lines of code that invoked this function for each downstream library when running downstream library test suites.
+
+```python
+def arange(
+ _0: object,
+ /,
+ *_args: object,
+ dtype: Union[type, str, numpy.dtype, None] = ...,
+ step: Union[int, float] = ...,
+ stop: int = ...,
+):
+ """
+ usage.dask: 347
+ usage.matplotlib: 359
+ usage.pandas: 894
+ usage.sample-usage: 4
+ usage.scipy: 1173
+ usage.skimage: 174
+ usage.sklearn: 373
+ usage.xarray: 666
+ """
+ ...
+```
+
+See the [`python-record-api`](https://github.com/data-apis/python-record-api) repository for source code, usage data, and analysis. To perform a similar analysis on additional downstream libraries, including those not publicly released, see the published PyPI [package](https://pypi.org/project/record_api/).
+
+## Use in Decision-Making
+
+Design and usage data support specification decision-making in the following ways.
+
+- Validate user stories to ensure that proposals satisfy existing needs.
+- Define scope to ensure that proposals address general array library design requirements (i.e., proposals must have broad applicability and be possible to implement with a reasonable amount of effort).
+- Inform technical design discussions to ensure that proposals are grounded in empirical data.
diff --git a/spec/2024.12/use_cases.md b/spec/2024.12/use_cases.md
new file mode 100644
index 000000000..e24aa50db
--- /dev/null
+++ b/spec/2024.12/use_cases.md
@@ -0,0 +1,235 @@
+(use-cases)=
+
+# Use cases
+
+Use cases inform the requirements for, and design choices made in, this array
+API standard. This section first discusses what types of use cases are
+considered, and then works out a few concrete use cases in more detail.
+
+## Types of use cases
+
+- Packages that depend on a specific array library currently, and would like
+ to support multiple of them (e.g. for GPU or distributed array support, for
+ improved performance, or for reaching a wider user base).
+- Writing new libraries/tools that wrap multiple array libraries.
+- Projects that implement new types of arrays with, e.g., hardware-specific
+ optimizations or auto-parallelization behavior, and need an API to put on
+ top that is familiar to end users.
+- End users that want to switch from one library to another without learning
+ about all the small differences between those libraries.
+
+
+## Concrete use cases
+
+- {ref}`use-case-scipy`
+- {ref}`use-case-einops`
+- {ref}`use-case-xtensor`
+- {ref}`use-case-numba`
+
+
+(use-case-scipy)=
+
+### Use case 1: add hardware accelerator and distributed support to SciPy
+
+When surveying a representative set of advanced users and research software
+engineers in 2019 (for [this NSF proposal](https://figshare.com/articles/Mid-Scale_Research_Infrastructure_-_The_Scientific_Python_Ecosystem/8009441)),
+the single most common pain point brought up about SciPy was performance.
+
+SciPy heavily relies on NumPy (its only non-optional runtime dependency).
+NumPy provides an array implementation that's in-memory, CPU-only and
+single-threaded. Common performance-related wishes users have are:
+
+- parallel algorithms (can be multi-threaded or multiprocessing based)
+- support for distributed arrays (with Dask in particular)
+- support for GPUs and other hardware accelerators (shortened to just "GPU"
+ in the rest of this use case)
+
+Some parallelism can be supported in SciPy, it has a `workers` keyword
+(similar to scikit-learn's `n_jobs` keyword) that allows specifying to use
+parallelism in some algorithms. However SciPy itself will not directly start
+depending on a GPU or distributed array implementation, or contain (e.g.)
+CUDA code - that's not maintainable given the resources for development.
+_However_, there is a way to provide distributed or GPU support. Part of the
+solution is provided by NumPy's "array protocols" (see [gh-1](https://github.com/data-apis/array-api/issues/1)), that allow
+dispatching to other array implementations. The main problem then becomes how
+to know whether this will work with a particular distributed or GPU array
+implementation - given that there are zero other array implementations that
+are even close to providing full NumPy compatibility - without adding that
+array implementation as a dependency.
+
+It's clear that SciPy functionality that relies on compiled extensions (C,
+C++, Cython, Fortran) directly can't easily be run on another array library
+than NumPy (see [C API](design_topics/C_API.rst) for more details about this topic). Pure Python
+code can work though. There's two main possibilities:
+
+1. Testing with another package, manually or in CI, and simply provide a list
+ of functionality that is found to work. Then make ad-hoc fixes to expand
+ the set that works.
+2. Start relying on a well-defined subset of the NumPy API (or a new
+ NumPy-like API), for which compatibility is guaranteed.
+
+Option (2) seems strongly preferable, and that "well-defined subset" is _what
+an API standard should provide_. Testing will still be needed, to ensure there
+are no critical corner cases or bugs between array implementations, however
+that's then a very tractable task.
+
+As a concrete example, consider the spectral analysis functions in `scipy.signal`.
+All of those functions (e.g., `periodogram`, `spectrogram`, `csd`, `welch`, `stft`,
+`istft`) are pure Python - with the exception of `lombscargle` which is ~40
+lines of Cython - and uses NumPy function calls, array attributes and
+indexing. The beginning of each function could be changed to retrieve the
+module that implements the array API standard for the given input array type,
+and then functions from that module could be used instead of NumPy functions.
+
+If the user has another array type, say a CuPy or PyTorch array `x` on their
+GPU, doing:
+```
+from scipy import signal
+
+signal.welch(x)
+```
+will result in:
+```
+# For CuPy
+ValueError: object __array__ method not producing an array
+
+# For PyTorch
+TypeError: can't convert cuda:0 device type tensor to numpy.
+```
+and therefore the user will have to explicitly convert to and from a
+`numpy.ndarray` (which is quite inefficient):
+```
+# For CuPy
+x_np = cupy.asnumpy(x)
+freq, Pxx = (cupy.asarray(res) for res in signal.welch(x_np))
+
+# For PyTorch
+x_np = x.cpu().numpy()
+# Note: ends up with tensors on CPU, may still have to move them back
+freq, Pxx = (torch.tensor(res) for res in signal.welch(x_np))
+```
+This code will look a little different for each array library. The end goal
+here is to be able to write this instead as:
+```
+freq, Pxx = signal.welch(x)
+```
+and have `freq`, `Pxx` be arrays of the same type and on the same device as `x`.
+
+```{note}
+
+This type of use case applies to many other libraries, from scikit-learn
+and scikit-image to domain-specific libraries like AstroPy and
+scikit-bio, to code written for a single purpose or user.
+```
+
+(use-case-einops)=
+
+### Use case 2: simplify einops by removing the backend system
+
+[einops](https://github.com/arogozhnikov/einops) is a library that provides flexible tensor operations and supports many array libraries (NumPy, TensorFlow, PyTorch, CuPy, MXNet, JAX).
+Most of the code in `einops` is:
+
+- [einops.py](https://github.com/arogozhnikov/einops/blob/master/einops/einops.py)
+ contains the functions it offers as public API (`rearrange`, `reduce`, `repeat`).
+- [_backends.py](https://github.com/arogozhnikov/einops/blob/master/einops/_backends.py)
+ contains the glue code needed to support that many array libraries.
+
+The amount of code in each of those two files is almost the same (~550 LoC each).
+The typical pattern in `einops.py` is:
+```
+def some_func(x):
+ ...
+ backend = get_backend(x)
+ shape = backend.shape(x)
+ result = backend.reduce(x)
+ ...
+```
+With a standard array API, the `_backends.py` glue layer could almost completely disappear,
+because the purpose it serves (providing a unified interface to array operations from each
+of the supported backends) is already addressed by the array API standard.
+Hence the complete `einops` code base could be close to 50% smaller, and easier to maintain or add to.
+
+```{note}
+
+Other libraries that have a similar backend system to support many array libraries
+include [TensorLy](https://github.com/tensorly/tensorly), the (now discontinued)
+multi-backend version of [Keras](https://github.com/keras-team/keras),
+[Unumpy](https://github.com/Quansight-Labs/unumpy) and
+[EagerPy](https://github.com/jonasrauber/eagerpy). Many end users and
+organizations will also have such glue code - it tends to be needed whenever
+one tries to support multiple array types in a single API.
+```
+
+
+(use-case-xtensor)=
+
+### Use case 3: adding a Python API to xtensor
+
+[xtensor](https://github.com/xtensor-stack/xtensor) is a C++ array library
+that is NumPy-inspired and provides lazy arrays. It has Python (and Julia and R)
+bindings, however it does not have a Python array API.
+
+Xtensor aims to follow NumPy closely, however it only implements a subset of functionality
+and documents some API differences in
+[Notable differences with NumPy](https://xtensor.readthedocs.io/en/latest/numpy-differences.html).
+
+Note that other libraries document similar differences, see for example
+[this page for JAX](https://jax.readthedocs.io/en/latest/jax.numpy.html) and
+[this page for TensorFlow](https://www.tensorflow.org/guide/tf_numpy).
+
+Each time an array library author designs a new API, they have to choose (a)
+what subset of NumPy makes sense to implement, and (b) where to deviate
+because NumPy's API for a particular function is suboptimal or the semantics
+don't fit their execution model.
+
+This array API standard aims to provide an API that can be readily adopted,
+without having to make the above-mentioned choices.
+
+```{note}
+
+XND is another array library, written in C, that still needs a Python API.
+Array implementations in other languages are often in a similar situation,
+and could translate this array API standard 1:1 to their language.
+```
+
+
+(use-case-numba)=
+
+### Use case 4: make JIT compilation of array computations easier and more robust
+
+[Numba](https://github.com/numba/numba) is a Just-In-Time (JIT) compiler for
+numerical functions in Python; it is NumPy-aware. [PyPy](https://pypy.org)
+is an implementation of Python with a JIT at its core; its NumPy support relies
+on running NumPy itself through a compatibility layer (`cpyext`), while a
+previous attempt to implement NumPy support directly was unsuccessful.
+
+Other array libraries may have an internal JIT (e.g., TensorFlow, PyTorch,
+JAX, MXNet) or work with an external JIT like
+[XLA](https://www.tensorflow.org/xla) or [VTA](https://tvm.apache.org/docs/vta/index.html).
+
+Numba currently has to jump through some hoops to accommodate NumPy's casting rules
+and may not attain full compatibility with NumPy in some cases - see, e.g.,
+[this](https://github.com/numba/numba/issues/4749) or
+[this](https://github.com/numba/numba/issues/5907) example issue regarding (array) scalar
+return values.
+
+An [explicit suggestion from a Numba developer](https://twitter.com/esc___/status/1295389487485333505)
+for this array API standard was:
+
+> for JIT compilers (e.g. Numba) it will be important, that the type of the
+ returned value(s) depends only on the *types* of the input but not on the
+ *values*.
+
+A concrete goal for this use case is to have better matching between
+JIT-compiled and non-JIT execution. Here is an example from the Numba code
+base, the need for which should be avoided in the future:
+
+```
+def check(x, y):
+ got = cfunc(x, y)
+ np.testing.assert_array_almost_equal(got, pyfunc(x, y))
+ # Check the power operation conserved the input's dtype
+ # (this is different from Numpy, whose behaviour depends on
+ # the *values* of the arguments -- see PyArray_CanCastArrayTo).
+ self.assertEqual(got.dtype, x.dtype)
+```
diff --git a/spec/2024.12/verification_test_suite.md b/spec/2024.12/verification_test_suite.md
new file mode 100644
index 000000000..cbe770e48
--- /dev/null
+++ b/spec/2024.12/verification_test_suite.md
@@ -0,0 +1,62 @@
+# Verification - test suite
+
+## Measuring conformance
+
+In addition to the specification documents, a test suite is being developed to
+aid library developers check conformance to the spec. **NOTE: The test suite
+is still a work in progress.** It can be found at
+.
+
+It is important to note that while the aim of the array API test suite is to
+cover as much of the spec as possible, there are necessarily some aspects of
+the spec that are not covered by the test suite, typically because they are
+impossible to effectively test. Furthermore, if the test suite appears to
+diverge in any way from what the spec documents say, this should be considered
+a bug in the test suite. The specification is the ground source of truth.
+
+## Running the tests
+
+To run the tests, first clone the [test suite
+repo](https://github.com/data-apis/array-api-tests), and install the testing
+dependencies,
+
+ pip install pytest hypothesis
+
+or
+
+ conda install pytest hypothesis
+
+as well as the array libraries that you want to test. To run the tests, you
+need to specify the array library that is to be tested. There are two ways to
+do this. One way is to set the `ARRAY_API_TESTS_MODULE` environment variable.
+For example
+
+ ARRAY_API_TESTS_MODULE=numpy pytest
+
+Alternatively, edit the `array_api_tests/_array_module.py` file and change the
+line
+
+```py
+array_module = None
+```
+
+to
+
+```py
+import numpy as array_module
+```
+
+(replacing `numpy` with the array module namespace to be tested).
+
+In either case, the tests should be run with the `pytest` command.
+
+Aside from the two testing dependencies (`pytest` and `hypothesis`), the test
+suite has no dependencies. In particular, it does not depend on any specific
+array libraries such as NumPy. All tests are run using only the array library
+that is being tested, comparing results against the behavior as defined in the
+spec. The test suite is designed to be standalone so that it can easily be vendored.
+
+See the
+[README](https://github.com/data-apis/array-api-tests/blob/master/README.md)
+in the test suite repo for more information about how to run and interpret the
+test suite results.
diff --git a/spec/API_specification/array_api/fft.py b/spec/API_specification/array_api/fft.py
deleted file mode 100644
index 2eb428b0c..000000000
--- a/spec/API_specification/array_api/fft.py
+++ /dev/null
@@ -1,601 +0,0 @@
-from ._types import Tuple, Union, Sequence, array, Optional, Literal, device
-
-
-def fft(
- x: array,
- /,
- *,
- n: Optional[int] = None,
- axis: int = -1,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the one-dimensional discrete Fourier transform.
-
- .. note::
- Applying the one-dimensional inverse discrete Fourier transform to the output of this function must return the original (i.e., non-transformed) input array within numerical accuracy (i.e., ``ifft(fft(x)) == x``), provided that the transform and inverse transform are performed with the same arguments (length, axis, and normalization mode).
-
- Parameters
- ----------
- x: array
- input array. Should have a floating-point data type.
- n: int
- length of the transformed axis of the output. If
-
- - ``n`` is greater than the length of the input array, the input array is zero-padded to length ``n``.
- - ``n`` is less than the length of the input array, the input array is trimmed to length ``n``.
- - ``n`` is not provided, the length of the transformed axis of the output must equal the length of the input along the axis specified by ``axis``.
-
- Default: ``None``.
- axis: int
- axis (dimension) over which to compute the Fourier transform. If not set, the last axis (dimension) is used.
-
- Default: ``-1``.
- norm: Literal['backward', 'ortho', 'forward']
- normalization mode. Should be one of the following modes:
-
- - ``'backward'``: no normalization.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: normalize by ``1/n``.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axis (dimension) indicated by ``axis``. The returned array must have a complex floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def ifft(
- x: array,
- /,
- *,
- n: Optional[int] = None,
- axis: int = -1,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the one-dimensional inverse discrete Fourier transform.
-
- .. note::
- Applying the one-dimensional inverse discrete Fourier transform to the output of this function must return the original (i.e., non-transformed) input array within numerical accuracy (i.e., ``ifft(fft(x)) == x``), provided that the transform and inverse transform are performed with the same (length, axis, and normalization mode).
-
- Parameters
- ----------
- x: array
- input array. Should have a floating-point data type.
- n: int
- length of the transformed axis of the output. If
-
- - ``n`` is greater than the length of the input array, the input array is zero-padded to length ``n``.
- - ``n`` is less than the length of the input array, the input array is trimmed to length ``n``.
- - ``n`` is not provided, the length of the transformed axis of the output must equal the length of the input along the axis specified by ``axis``.
-
- Default: ``None``.
- axis: int
- axis (dimension) over which to compute the inverse Fourier transform. If not set, the last axis (dimension) is used.
-
- Default: ``-1``.
- norm: Literal['backward', 'ortho', 'forward']
- normalization mode. Should be one of the following modes:
-
- - ``'backward'``: normalize by ``1/n``.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: no normalization.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axis (dimension) indicated by ``axis``. The returned array must have a complex floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def fftn(
- x: array,
- /,
- *,
- s: Sequence[int] = None,
- axes: Sequence[int] = None,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the n-dimensional discrete Fourier transform.
-
- .. note::
- Applying the n-dimensional inverse discrete Fourier transform to the output of this function must return the original (i.e., non-transformed) input array within numerical accuracy (i.e., ``ifftn(fftn(x)) == x``), provided that the transform and inverse transform are performed with the same arguments (sizes, axes, and normalization mode).
-
- Parameters
- ----------
- x: array
- input array. Should have a floating-point data type.
- s: Sequence[int]
- size of each transformed axis of the output. If
-
- - ``s[i]`` is greater than the size of the input array along the corresponding axis (dimension) ``i``, the input array along the axis ``i`` is zero-padded to size ``s[i]``.
- - ``s[i]`` is less than the size of the input array along a corresponding axis (dimension) ``i``, the input array along the axis ``i`` is trimmed to size ``s[i]``.
- - ``s[i]`` is ``-1``, the whole input array along the axis ``i`` is used (no padding/trimming).
- - ``s`` is not provided, the size of each transformed axis (dimension) in the output array must equal the size of the corresponding axis in the input array.
-
- If ``s`` is not ``None``, ``axes`` must not be ``None`` either, and ``s[i]`` corresponds to the size along the transformed axis specified by ``axes[i]``.
-
- Default: ``None``.
- axes: Sequence[int]
- axes (dimensions) over which to compute the Fourier transform. If ``None``, all axes must be transformed.
-
- If ``s`` is specified, the corresponding ``axes`` to be transformed must be explicitly specified too.
-
- Default: ``None``.
- norm: Literal['backward', 'ortho', 'forward']
- normalization mode. Should be one of the following modes:
-
- - ``'backward'``: no normalization.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: normalize by ``1/n``.
-
- where ``n = prod(s)`` is the logical FFT size.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axes (dimension) indicated by ``axes``. The returned array must have a complex floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def ifftn(
- x: array,
- /,
- *,
- s: Sequence[int] = None,
- axes: Sequence[int] = None,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the n-dimensional inverse discrete Fourier transform.
-
- .. note::
- Applying the n-dimensional inverse discrete Fourier transform to the output of this function must return the original (i.e., non-transformed) input array within numerical accuracy (i.e., ``ifftn(fftn(x)) == x``), provided that the transform and inverse transform are performed with the same arguments (sizes, axes, and normalization mode).
-
- Parameters
- ----------
- x: array
- input array. Should have a floating-point data type.
- s: Sequence[int]
- size of each transformed axis of the output. If
-
- - ``s[i]`` is greater than the size of the input array along the corresponding axis (dimension) ``i``, the input array along the axis ``i`` is zero-padded to size ``s[i]``.
- - ``s[i]`` is less than the size of the input array along a corresponding axis (dimension) ``i``, the input array along the axis ``i`` is trimmed to size ``s[i]``.
- - ``s[i]`` is ``-1``, the whole input array along the axis ``i`` is used (no padding/trimming).
- - ``s`` is not provided, the size of each transformed axis (dimension) in the output array must equal the size of the corresponding axis in the input array.
-
- If ``s`` is not ``None``, ``axes`` must not be ``None`` either, and ``s[i]`` corresponds to the size along the transformed axis specified by ``axes[i]``.
-
- Default: ``None``.
- axes: Sequence[int]
- axes (dimensions) over which to compute the Fourier transform. If ``None``, all axes must be transformed.
-
- If ``s`` is specified, the corresponding ``axes`` to be transformed must be explicitly specified too.
-
- Default: ``None``.
- norm: Literal['backward', 'ortho', 'forward']
- specify the normalization mode. Should be one of the following modes:
-
- - ``'backward'``: normalize by ``1/n``.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: no normalization.
-
- where ``n = prod(s)`` is the logical FFT size.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axes (dimension) indicated by ``axes``. The returned array must have a complex floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def rfft(
- x: array,
- /,
- *,
- n: Optional[int] = None,
- axis: int = -1,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the one-dimensional discrete Fourier transform for real-valued input.
-
- .. note::
- Applying the one-dimensional inverse discrete Fourier transform for real-valued input to the output of this function must return the original (i.e., non-transformed) input array within numerical accuracy (i.e., ``irfft(rfft(x)) == x``), provided that the transform and inverse transform are performed with the same arguments (axis and normalization mode) and consistent length.
-
- Parameters
- ----------
- x: array
- input array. Must have a real-valued floating-point data type.
- n: int
- length of the transformed axis of the **input**. If
-
- - ``n`` is greater than the length of the input array, the input array is zero-padded to length ``n``.
- - ``n`` is less than the length of the input array, the input array is trimmed to length ``n``.
- - ``n`` is not provided, the length of the transformed axis of the output must equal the length of the input along the axis specified by ``axis``.
-
- Default: ``None``.
- axis: int
- axis (dimension) over which to compute the Fourier transform. If not set, the last axis (dimension) is used.
-
- Default: ``-1``.
- norm: Literal['backward', 'ortho', 'forward']
- normalization mode. Should be one of the following modes:
-
- - ``'backward'``: no normalization.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: normalize by ``1/n``.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axis (dimension) indicated by ``axis``. The returned array must have a complex-valued floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def irfft(
- x: array,
- /,
- *,
- n: Optional[int] = None,
- axis: int = -1,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the one-dimensional inverse of ``rfft`` for complex-valued input.
-
- .. note::
- Applying the one-dimensional inverse discrete Fourier transform for real-valued input to the output of this function must return the original (i.e., non-transformed) input array within numerical accuracy (i.e., ``irfft(rfft(x)) == x``), provided that the transform and inverse transform are performed with the same arguments (axis and normalization mode) and consistent length.
-
- Parameters
- ----------
- x: array
- input array. Should have a complex-valued floating-point data type.
- n: int
- length of the transformed axis of the **output**. If
-
- - ``n//2+1`` is greater than the length of the input array, the input array is zero-padded to length ``n//2+1``.
- - ``n//2+1`` is less than the length of the input array, the input array is trimmed to length ``n//2+1``.
- - ``n`` is not provided, the length of the transformed axis of the output must equal the length ``2*(m-1)``, where ``m`` is the length of the input along the axis specified by ``axis``.
-
- Default: ``None``.
- axis: int
- axis (dimension) over which to compute the inverse Fourier transform. If not set, the last axis (dimension) is used.
-
- Default: ``-1``.
- norm: Literal['backward', 'ortho', 'forward']
- normalization mode. Should be one of the following modes:
-
- - ``'backward'``: normalize by ``1/n``.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: no normalization.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axis (dimension) indicated by ``axis``. The returned array must have a real-valued floating-point data type determined by :ref:`type-promotion`. The length along the transformed axis is ``n`` (if given) or ``2*(m-1)`` (otherwise).
- """
-
-
-def rfftn(
- x: array,
- /,
- *,
- s: Sequence[int] = None,
- axes: Sequence[int] = None,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the n-dimensional discrete Fourier transform for real-valued input.
-
- .. note::
- Applying the n-dimensional inverse discrete Fourier transform for real-valued input to the output of this function must return the original (i.e., non-transformed) input array within numerical accuracy (i.e., ``irfftn(rfftn(x)) == x``), provided that the transform and inverse transform are performed with the same arguments (axes and normalization mode) and consistent sizes.
-
- Parameters
- ----------
- x: array
- input array. Must have a real-valued floating-point data type.
- s: Sequence[int]
- size of each transformed axis of the **input**. If
-
- - ``s[i]`` is greater than the size of the input array along the corresponding axis (dimension) ``i``, the input array along the axis ``i`` is zero-padded to size ``s[i]``.
- - ``s[i]`` is less than the size of the input array along a corresponding axis (dimension) ``i``, the input array along the axis ``i`` is trimmed to size ``s[i]``.
- - ``s[i]`` is ``-1``, the whole input array along the axis ``i`` is used (no padding/trimming).
- - ``s`` is not provided, the size of each transformed axis (dimension) in the output array must equal the size of the corresponding axis in the input array.
-
- If ``s`` is not ``None``, ``axes`` must not be ``None`` either, and ``s[i]`` corresponds to the size along the transformed axis specified by ``axes[i]``.
-
- Default: ``None``.
- axes: Sequence[int]
- axes (dimensions) over which to compute the Fourier transform. If ``None``, all axes must be transformed.
-
- If ``s`` is specified, the corresponding ``axes`` to be transformed must be explicitly specified too.
-
- Default: ``None``.
- norm: Literal['backward', 'ortho', 'forward']
- normalization mode. Should be one of the following modes:
-
- - ``'backward'``: no normalization.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: normalize by ``1/n``.
-
- where ``n = prod(s)``, the logical FFT size.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axes (dimension) indicated by ``axes``. The returned array must have a complex-valued floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def irfftn(
- x: array,
- /,
- *,
- s: Sequence[int] = None,
- axes: Sequence[int] = None,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the n-dimensional inverse of ``rfftn`` for complex-valued input.
-
- .. note::
- Applying the n-dimensional inverse discrete Fourier transform for real-valued input to the output of this function must return the original (i.e., non-transformed) input array within numerical accuracy (i.e., ``irfftn(rfftn(x)) == x``), provided that the transform and inverse transform are performed with the same arguments (axes and normalization mode) and consistent sizes.
-
- Parameters
- ----------
- x: array
- input array. Should have a complex-valued floating-point data type.
- s: Sequence[int]
- size of each transformed axis of the **output**. ``n=s[i]`` is also the number of input points used along the axis (dimension) ``i``, except for the last axis, where ``n=s[-1]//2+1`` points of the input are used. If
-
- - ``n`` is greater than the size of the input array along the corresponding axis (dimension) ``i``, the input array along the axis ``i`` is zero-padded to size ``n``.
- - ``n`` is less than the size of the input array along the corresponding axis (dimension) ``i``, the input array along the axis ``i`` is trimmed to size ``n``.
- - ``s[i]`` is ``-1``, the whole input array along the axis ``i`` is used (no padding/trimming).
- - ``s`` is not provided, the size of each transformed axis (dimension) in the output array must equal the size of the corresponding axis in the input array, except for the last axis which is trimmed to ``2*(m-1)``, where ``m`` is the length of the input along the axis.
-
- If ``s`` is not ``None``, ``axes`` must not be ``None`` either, and ``s[i]`` corresponds to the size along the transformed axis specified by ``axes[i]``.
-
- Default: ``None``.
- axes: Sequence[int]
- axes (dimensions) over which to compute the Fourier transform. If ``None``, all axes must be transformed.
-
- If ``s`` is specified, the corresponding ``axes`` to be transformed must be explicitly specified too.
-
- Default: ``None``.
- norm: Literal['backward', 'ortho', 'forward']
- normalization mode. Should be one of the following modes:
-
- - ``'backward'``: normalize by ``1/n``.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: no normalization.
-
- where ``n = prod(s)`` is the logical FFT size.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axes (dimension) indicated by ``axes``. The returned array must have a real-valued floating-point data type determined by :ref:`type-promotion`. The length along the last transformed axis is ``s[-1]`` (if given) or ``2*(m - 1)`` (otherwise), and all other axes ``s[i]``.
- """
-
-
-def hfft(
- x: array,
- /,
- *,
- n: Optional[int] = None,
- axis: int = -1,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the one-dimensional discrete Fourier transform of a signal with Hermitian symmetry.
-
- Parameters
- ----------
- x: array
- input array. Should have a floating-point data type.
- n: int
- length of the transformed axis of the **output**. If
-
- - ``n//2+1`` is greater than the length of the input array, the input array is zero-padded to length ``n//2+1``.
- - ``n//2+1`` is less than the length of the input array, the input array is trimmed to length ``n//2+1``.
- - ``n`` is not provided, the length of the transformed axis of the output must equal the length ``2*(m-1)``, where ``m`` is the length of the input along the axis specified by ``axis``.
-
- Default: ``None``.
- axis: int
- axis (dimension) over which to compute the Fourier transform. If not set, the last axis (dimension) is used.
-
- Default: ``-1``.
- norm: Literal['backward', 'ortho', 'forward']
- normalization mode. Should be one of the following modes:
-
- - ``'backward'``: no normalization.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: normalize by ``1/n``.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axis (dimension) indicated by ``axis``. The returned array must have a real-valued floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def ihfft(
- x: array,
- /,
- *,
- n: Optional[int] = None,
- axis: int = -1,
- norm: Literal["backward", "ortho", "forward"] = "backward",
-) -> array:
- """
- Computes the one-dimensional inverse discrete Fourier transform of a signal with Hermitian symmetry.
-
- Parameters
- ----------
- x: array
- input array. Must have a real-valued floating-point data type.
- n: int
- length of the transformed axis of the **input**. If
-
- - ``n`` is greater than the length of the input array, the input array is zero-padded to length ``n``.
- - ``n`` is less than the length of the input array, the input array is trimmed to length ``n``.
- - ``n`` is not provided, the length of the transformed axis of the output must equal the length of the input along the axis specified by ``axis``.
-
- Default: ``None``.
- axis: int
- axis (dimension) over which to compute the Fourier transform. If not set, the last axis (dimension) is used.
-
- Default: ``-1``.
- norm: Literal['backward', 'ortho', 'forward']
- normalization mode. Should be one of the following modes:
-
- - ``'backward'``: normalize by ``1/n``.
- - ``'ortho'``: normalize by ``1/sqrt(n)`` (i.e., make the FFT orthonormal).
- - ``'forward'``: no normalization.
-
- Default: ``'backward'``.
-
- Returns
- -------
- out: array
- an array transformed along the axis (dimension) indicated by ``axis``. The returned array must have a complex-valued floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def fftfreq(n: int, /, *, d: float = 1.0, device: Optional[device] = None) -> array:
- """
- Returns the discrete Fourier transform sample frequencies.
-
- For a Fourier transform of length ``n`` and length unit of ``d`` the frequencies are described as:
-
- .. code-block::
-
- f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) # if n is even
- f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) # if n is odd
-
- Parameters
- ----------
- n: int
- window length.
- d: float
- sample spacing between individual samples of the Fourier transform input. Default: ``1.0``.
- device: Optional[device]
- device on which to place the created array. Default: ``None``.
-
- Returns
- -------
- out: array
- an array of length ``n`` containing the sample frequencies. The returned array must have a real-valued floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def rfftfreq(n: int, /, *, d: float = 1.0, device: Optional[device] = None) -> array:
- """
- Returns the discrete Fourier transform sample frequencies (for ``rfft`` and ``irfft``).
-
- For a Fourier transform of length ``n`` and length unit of ``d`` the frequencies are described as:
-
- .. code-block::
-
- f = [0, 1, ..., n/2-1, n/2] / (d*n) # if n is even
- f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) # if n is odd
-
- The Nyquist frequency component is considered to be positive.
-
- Parameters
- ----------
- n: int
- window length.
- d: float
- sample spacing between individual samples of the Fourier transform input. Default: ``1.0``.
- device: Optional[device]
- device on which to place the created array. Default: ``None``.
-
- Returns
- -------
- out: array
- an array of length ``n//2+1`` containing the sample frequencies. The returned array must have a real-valued floating-point data type determined by :ref:`type-promotion`.
- """
-
-
-def fftshift(x: array, /, *, axes: Union[int, Sequence[int]] = None) -> array:
- """
- Shift the zero-frequency component to the center of the spectrum.
-
- This function swaps half-spaces for all axes (dimensions) specified by ``axes``.
-
- .. note::
- ``out[0]`` is the Nyquist component only if the length of the input is even.
-
- Parameters
- ----------
- x: array
- input array. Should have a floating-point data type.
- axes: Union[int, Sequence[int]]
- axes over which to shift. If ``None``, the function must shift all axes. Default: ``None``.
-
- Returns
- -------
- out: array
- the shifted array. The returned array must have the same data type as ``x``.
- """
-
-
-def ifftshift(x: array, /, *, axes: Union[int, Sequence[int]] = None) -> array:
- """
- Inverse of ``fftshift``.
-
- .. note::
- Although identical for even-length ``x``, ``fftshift`` and ``ifftshift`` differ by one sample for odd-length ``x``.
-
- Parameters
- ----------
- x: array
- input array. Should have a floating-point data type.
- axes: Union[int, Sequence[int]]
- axes over which to perform the inverse shift. If ``None``, the function must shift all axes. Default: ``None``.
-
- Returns
- -------
- out: array
- the shifted array. The returned array must have the same data type as ``x``.
- """
-
-
-__all__ = [
- "fft",
- "ifft",
- "fftn",
- "ifftn",
- "rfft",
- "irfft",
- "rfftn",
- "irfftn",
- "hfft",
- "ihfft",
- "fftfreq",
- "rfftfreq",
- "fftshift",
- "ifftshift",
-]
diff --git a/spec/Makefile b/spec/Makefile
deleted file mode 100644
index e71fa39e1..000000000
--- a/spec/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
-# Minimal makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line, and also
-# from the environment for the first two.
-SPHINXOPTS ?= -W --keep-going
-SPHINXBUILD ?= sphinx-build
-SOURCEDIR = .
-BUILDDIR = _build
-
-# Put it first so that "make" without argument is like "make help".
-help:
- @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-.PHONY: help Makefile clean
-
-# Catch-all target: route all unknown targets to Sphinx using the new
-# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
-%: Makefile
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-clean:
- -rm -rf $(BUILDDIR)
- -rm -rf "$(SOURCEDIR)/API_specification/generated"
- -rm -rf "$(SOURCEDIR)/extensions/generated"
diff --git a/spec/_ghpages/_gitignore.txt b/spec/_ghpages/_gitignore.txt
new file mode 100644
index 000000000..4e7ddcaad
--- /dev/null
+++ b/spec/_ghpages/_gitignore.txt
@@ -0,0 +1,36 @@
+#/
+# @license MIT
+#
+# Copyright (c) 2022 Python Data APIs Consortium.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#/
+#
+# Note this file is not intended to be a .gitignore for the main branch, but to
+# be copied into gh-pages branch.
+
+_site
+build/
+doctrees/
+.vscode/
+node_modules/
+__pycache__/
+*.pyc
+spec/**/generated/
+src/*.egg-info/
diff --git a/spec/_ghpages/index.html b/spec/_ghpages/index.html
new file mode 100644
index 000000000..e209341a3
--- /dev/null
+++ b/spec/_ghpages/index.html
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
diff --git a/spec/_ghpages/versions.json b/spec/_ghpages/versions.json
new file mode 100644
index 000000000..65a52628a
--- /dev/null
+++ b/spec/_ghpages/versions.json
@@ -0,0 +1,8 @@
+{
+ "2021.12": "2021.12",
+ "2022.12": "2022.12",
+ "2023.12": "2023.12",
+ "2024.12": "2024.12",
+ "latest": "latest",
+ "draft": "draft"
+}
diff --git a/spec/_static/css/custom.css b/spec/_static/css/custom.css
new file mode 100644
index 000000000..51c73f65c
--- /dev/null
+++ b/spec/_static/css/custom.css
@@ -0,0 +1,3 @@
+s {
+ text-decoration: inherit;
+}
diff --git a/spec/_static/images/dtype_promotion_complex.png b/spec/_static/images/dtype_promotion_complex.png
new file mode 100644
index 000000000..3503b07f5
Binary files /dev/null and b/spec/_static/images/dtype_promotion_complex.png differ
diff --git a/spec/_static/images/dtype_promotion_lattice_no_complex.png b/spec/_static/images/dtype_promotion_lattice_no_complex.png
new file mode 100644
index 000000000..669d30476
Binary files /dev/null and b/spec/_static/images/dtype_promotion_lattice_no_complex.png differ
diff --git a/spec/_static/images/favicon.png b/spec/_static/images/favicon.png
new file mode 100644
index 000000000..49b7d9d6f
Binary files /dev/null and b/spec/_static/images/favicon.png differ
diff --git a/spec/_static/javascripts/version_dropdown.js b/spec/_static/javascripts/version_dropdown.js
new file mode 100644
index 000000000..4d7b7378b
--- /dev/null
+++ b/spec/_static/javascripts/version_dropdown.js
@@ -0,0 +1,140 @@
+/**
+* Returns a promise for resolving a URL corresponding to a versioned resource (if one exists).
+*
+* @private
+* @param {string} url - base URL
+* @param {string} path - resource path
+* @returns {Promise} promise which resolves a resource URL
+*/
+function href(url, path) {
+ const defaultURL = url + "/index.html";
+ url += "/" + path;
+
+ // If a versioned resource exists, return the resource's URL; otherwise, return a default URL:
+ const opts = {
+ 'method': 'HEAD'
+ };
+ return fetch(url, opts).then(onResponse).catch(onError);
+
+ /**
+ * Callback invoked upon successfully resolving a resource.
+ *
+ * @private
+ * @param {Object} response - response object
+ */
+ function onResponse(response) {
+ if (response.ok) {
+ return url;
+ }
+ return defaultURL;
+ }
+
+ /**
+ * Callback invoked upon failing to resolve a resource.
+ *
+ * @private
+ * @param {Error} error - error object
+ */
+ function onError(error) {
+ return defaultURL;
+ }
+}
+
+/**
+* Adds a version dropdown menu with custom URL paths depending on the current page.
+*
+* @param {string} json_loc - JSON URL
+* @param {string} target_loc - target URL
+* @param {string} text - text
+* @returns {Promise} promise which resolves upon adding a version menu
+*/
+async function add_version_dropdown(json_loc, target_loc, text) {
+ const dropdown = document.createElement("div");
+ dropdown.className = "md-flex__cell md-flex__cell--shrink dropdown";
+
+ const button = document.createElement("button");
+ button.className = "dropdownbutton";
+
+ const content = document.createElement("div");
+ content.className = "dropdown-content md-hero";
+
+ dropdown.appendChild(button);
+ dropdown.appendChild(content);
+
+ const opts = {
+ 'method': 'GET'
+ };
+ await fetch(json_loc, opts).then(onResponse).then(onVersions).catch(onError);
+
+ /**
+ * Callback invoked upon resolving a resource.
+ *
+ * @private
+ * @param {Object} response - response object
+ */
+ function onResponse(response) {
+ return response.json();
+ }
+
+ /**
+ * Callback invoked upon resolving a JSON resource.
+ *
+ * @private
+ * @param {Object} versions - versions object
+ * @returns {Promise} promise which resolves upon processing version data
+ */
+ async function onVersions(versions) {
+ // Resolve the current browser URL:
+ const currentURL = window.location.href;
+
+ // Check whether the user is currently on a resource page (e.g., is viewing the specification for a particular function):
+ let path = currentURL.split(/_site|array\-api/)[1];
+
+ // Extract the resource subpath:
+ if (path) {
+ path = path.split("/");
+ path = path.slice(2, path.length);
+ path = path.join("/");
+ } else {
+ path = "";
+ }
+ // For each version, create an anchor element and attempt to resolve a given resource for that version...
+ const promises = [];
+ const el = [];
+ for (let key in versions) {
+ if (versions.hasOwnProperty(key)) {
+ let a = document.createElement("a");
+ a.innerHTML = key;
+ a.title = key;
+ el.push(a);
+ promises.push(href(target_loc + versions[key], path));
+ }
+ }
+ // Resolve all resource URLs:
+ const urls = await Promise.all(promises);
+
+ // Append the version links to the dropdown menu:
+ for (let i = 0; i < urls.length; i++) {
+ let a = el[i];
+ a.href = urls[i];
+ content.appendChild(a);
+ }
+ // Set the button text:
+ button.innerHTML = text;
+
+ // Append dropdown:
+ $(".navheader").append(dropdown);
+ }
+
+ /**
+ * Callback invoked upon failing to resolve a resource.
+ *
+ * @private
+ */
+ function onError() {
+ button.innerHTML = "Other Versions Not Found";
+
+ // Append dropdown:
+ $(".navheader").append(dropdown);
+ }
+};
diff --git a/spec/_templates/property.rst b/spec/_templates/property.rst
index baf31cea3..74062629f 100644
--- a/spec/_templates/property.rst
+++ b/spec/_templates/property.rst
@@ -2,4 +2,4 @@
{{ name.split('.')[-1] | underline }}
-.. auto{{ objtype }}:: {{ objname }}
\ No newline at end of file
+.. auto{{ objtype }}:: {{ objname }}
diff --git a/spec/benchmark_suite.md b/spec/benchmark_suite.md
deleted file mode 100644
index da203cbf6..000000000
--- a/spec/benchmark_suite.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Benchmark suite
-
-Adding a benchmark suite is planned in the future.
\ No newline at end of file
diff --git a/spec/draft/API_specification/array_object.rst b/spec/draft/API_specification/array_object.rst
new file mode 100644
index 000000000..e3c7e8ae6
--- /dev/null
+++ b/spec/draft/API_specification/array_object.rst
@@ -0,0 +1,322 @@
+.. _array-object:
+
+Array object
+============
+
+ Array API specification for array object attributes and methods.
+
+A conforming implementation of the array API standard must provide and support an array object having the following attributes and methods.
+
+Furthermore, a conforming implementation of the array API standard must support, at minimum, array objects of rank (i.e., number of dimensions) ``0``, ``1``, ``2``, ``3``, and ``4`` and must explicitly document their maximum supported rank ``N``.
+
+.. note::
+ Conforming implementations must support zero-dimensional arrays.
+
+ Apart from array object attributes, such as ``ndim``, ``device``, and ``dtype``, all operations in this standard return arrays (or tuples of arrays), including those operations, such as ``mean``, ``var``, and ``std``, from which some common array libraries (e.g., NumPy) return scalar values.
+
+ *Rationale: always returning arrays is necessary to (1) support accelerator libraries where non-array return values could force device synchronization and (2) support delayed execution models where an array represents a future value.*
+
+-------------------------------------------------
+
+.. _operators:
+
+Operators
+---------
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python operators.
+
+Arithmetic Operators
+~~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python arithmetic operators.
+
+- ``+x``: :meth:`.array.__pos__`
+
+ - `operator.pos(x) `_
+ - `operator.__pos__(x) `_
+
+- `-x`: :meth:`.array.__neg__`
+
+ - `operator.neg(x) `_
+ - `operator.__neg__(x) `_
+
+- `x1 + x2`: :meth:`.array.__add__`
+
+ - `operator.add(x1, x2) `_
+ - `operator.__add__(x1, x2) `_
+
+- `x1 - x2`: :meth:`.array.__sub__`
+
+ - `operator.sub(x1, x2) `_
+ - `operator.__sub__(x1, x2) `_
+
+- `x1 * x2`: :meth:`.array.__mul__`
+
+ - `operator.mul(x1, x2) `_
+ - `operator.__mul__(x1, x2) `_
+
+- `x1 / x2`: :meth:`.array.__truediv__`
+
+ - `operator.truediv(x1,x2) `_
+ - `operator.__truediv__(x1, x2) `_
+
+- `x1 // x2`: :meth:`.array.__floordiv__`
+
+ - `operator.floordiv(x1, x2) `_
+ - `operator.__floordiv__(x1, x2) `_
+
+- `x1 % x2`: :meth:`.array.__mod__`
+
+ - `operator.mod(x1, x2) `_
+ - `operator.__mod__(x1, x2) `_
+
+- `x1 ** x2`: :meth:`.array.__pow__`
+
+ - `operator.pow(x1, x2) `_
+ - `operator.__pow__(x1, x2) `_
+
+Arithmetic operators should be defined for arrays having real-valued data types.
+
+Array Operators
+~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python array operators.
+
+- `x1 @ x2`: :meth:`.array.__matmul__`
+
+ - `operator.matmul(x1, x2) `_
+ - `operator.__matmul__(x1, x2) `_
+
+The matmul ``@`` operator should be defined for arrays having numeric data types.
+
+Bitwise Operators
+~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python bitwise operators.
+
+- `~x`: :meth:`.array.__invert__`
+
+ - `operator.inv(x) `_
+ - `operator.invert(x) `_
+ - `operator.__inv__(x) `_
+ - `operator.__invert__(x) `_
+
+- `x1 & x2`: :meth:`.array.__and__`
+
+ - `operator.and(x1, x2) `_
+ - `operator.__and__(x1, x2) `_
+
+- `x1 | x2`: :meth:`.array.__or__`
+
+ - `operator.or(x1, x2) `_
+ - `operator.__or__(x1, x2) `_
+
+- `x1 ^ x2`: :meth:`.array.__xor__`
+
+ - `operator.xor(x1, x2) `_
+ - `operator.__xor__(x1, x2) `_
+
+- `x1 << x2`: :meth:`.array.__lshift__`
+
+ - `operator.lshift(x1, x2) `_
+ - `operator.__lshift__(x1, x2) `_
+
+- `x1 >> x2`: :meth:`.array.__rshift__`
+
+ - `operator.rshift(x1, x2) `_
+ - `operator.__rshift__(x1, x2) `_
+
+Bitwise operators should be defined for arrays having integer and boolean data types.
+
+Comparison Operators
+~~~~~~~~~~~~~~~~~~~~
+
+A conforming implementation of the array API standard must provide and support an array object supporting the following Python comparison operators.
+
+- `x1 < x2`: :meth:`.array.__lt__`
+
+ - `operator.lt(x1, x2) `_
+ - `operator.__lt__(x1, x2) `_
+
+- `x1 <= x2`: :meth:`.array.__le__`
+
+ - `operator.le(x1, x2) `_
+ - `operator.__le__(x1, x2) `_
+
+- `x1 > x2`: :meth:`.array.__gt__`
+
+ - `operator.gt(x1, x2) `_
+ - `operator.__gt__(x1, x2) `_
+
+- `x1 >= x2`: :meth:`.array.__ge__`
+
+ - `operator.ge(x1, x2) `_
+ - `operator.__ge__(x1, x2) `_
+
+- `x1 == x2`: :meth:`.array.__eq__`
+
+ - `operator.eq(x1, x2) `_
+ - `operator.__eq__(x1, x2)