diff --git a/.github/actions/deploy-action/action.yml b/.github/actions/deploy-action/action.yml
index 6b2e578f1..ac777a7da 100644
--- a/.github/actions/deploy-action/action.yml
+++ b/.github/actions/deploy-action/action.yml
@@ -1,5 +1,5 @@
name: Deploy via Bastion Host
-description: "Deploy specified files and directories to a server via a bastion host"
+description: "Deploy specified files and directories to a primary and secondary server via a bastion host"
inputs:
private_ssh_key:
description: "The private SSH key used to authenticate with the remote servers"
@@ -13,8 +13,12 @@ inputs:
description: "The [user@]hostname of the bastion server"
required: true
- host:
- description: "The [user@]hostname of the web server"
+ primary_host:
+ description: "The [user@]hostname of the primary web server"
+ required: true
+
+ secondary_host:
+ description: "The [user@]hostname of the secondary web server"
required: true
source:
@@ -45,6 +49,10 @@ runs:
chmod 600 ~/.ssh/known_hosts
shell: bash
- - name: rsync source to destination
- run: rsync -avz --delete -e 'ssh -A ${{ inputs.bastion_host }} ssh' ${{ inputs.source }} ${{ inputs.host }}:${{ inputs.destination }}
+ - name: Deploy to primary server
+ run: rsync -avz --delete -e 'ssh -A ${{ inputs.bastion_host }} ssh' ${{ inputs.source }} ${{ inputs.primary_host }}:${{ inputs.destination }}
+ shell: bash
+
+ - name: Deploy to secondary server
+ run: rsync -avz --delete -e 'ssh -A ${{ inputs.bastion_host }} ssh' ${{ inputs.source }} ${{ inputs.secondary_host }}:${{ inputs.destination }}
shell: bash
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 890d8c13d..4d1565e61 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -7,6 +7,11 @@ updates:
target-branch: "develop"
- package-ecosystem: "github-actions"
directory: "/"
- schedule:
- interval: weekly
- target-branch: develop
+ schedule:
+ interval: "weekly"
+ target-branch: "develop"
+ - package-ecosystem: "pip"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ target-branch: "develop"
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 579b8a82f..a81b5f78c 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -10,32 +10,36 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- - name: Log current branches and repositories
- run: |
- echo "Current ref: $GITHUB_REF"
- echo "Base ref: $GITHUB_BASE_REF"
- echo "Head ref: $GITHUB_HEAD_REF"
- echo "Repository: $GITHUB_REPOSITORY"
- echo "Head repository: ${{ github.event.pull_request.head.repo.full_name }}"
- name: Only allow pull requests based on master from the develop branch of the current repository
if: ${{ github.base_ref == 'master' && !(github.head_ref == 'develop' && github.event.pull_request.head.repo.full_name == github.repository) }}
run: |
echo "Pull requests based on master can only come from the develop branch of this repository"
echo "Please check your base branch as it should be develop by default"
exit 1
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Install Python dependencies
- uses: py-actions/py-dependency-install@v2
+ uses: py-actions/py-dependency-install@v4
+ - name: Install Python libs
+ run: pip3 install -r ./requirements.txt
- uses: ruby/setup-ruby@v1
with:
- ruby-version: 2.7
+ ruby-version: 3.2
bundler-cache: true
- - uses: seanmiddleditch/gha-setup-ninja@v3
+ - uses: seanmiddleditch/gha-setup-ninja@v6
with:
version: 1.10.2
+ - name: Install arm-none-eabi-gcc GNU Arm Embedded Toolchain
+ uses: carlosperate/arm-none-eabi-gcc-action@v1.10.1
+ - name: Install Doxygen
+ run: |
+ wget https://www.doxygen.nl/files/doxygen-1.10.0.linux.bin.tar.gz
+ tar xf doxygen-1.10.0.linux.bin.tar.gz -C "$HOME"
+ echo "$HOME/doxygen-1.10.0/bin" >> $GITHUB_PATH
+ - name: Build Doxygen documentation
+ run: make build_doxygen_adoc
- name: Build documentation
run: make -j 2
- name: Deploy to Mythic Beasts
@@ -45,7 +49,8 @@ jobs:
private_ssh_key: ${{ secrets.DEPLOY_SSH_KEY }}
public_bastion_host_keys: ${{ secrets.DEPLOY_KNOWN_HOSTS }}
bastion_host: ${{ secrets.DEPLOY_BASTION_HOST }}
- host: ${{ secrets.DEPLOY_HOST }}
+ primary_host: ${{ secrets.DEPLOY_PRIMARY_HOST }}
+ secondary_host: ${{ secrets.DEPLOY_SECONDARY_HOST }}
# this needs to match destination: in _config.yml
source: documentation/html/
destination: documentation
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 12b65ae8f..9ffb99cfc 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -13,7 +13,7 @@ jobs:
pull-requests: write
steps:
- - uses: actions/stale@v3
+ - uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.'
diff --git a/.gitignore b/.gitignore
index 1852293db..11aa32a47 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,8 @@
.DS_Store
__pycache__
build
+build-pico-sdk-docs
documentation/html
+documentation/asciidoc/pico-sdk
+.venv
+.env
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..9f315972e
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,13 @@
+[submodule "documentation/pico-sdk"]
+ path = lib/pico-sdk
+ url = https://github.com/raspberrypi/pico-sdk
+ branch = master
+[submodule "documentation/pico-examples"]
+ path = lib/pico-examples
+ url = https://github.com/raspberrypi/pico-examples.git
+ branch = master
+
+[submodule "doxygentoasciidoc"]
+ path = lib/doxygentoasciidoc
+ url = https://github.com/raspberrypi/doxygentoasciidoc.git
+ branch = main
diff --git a/BUILD.md b/BUILD.md
new file mode 100644
index 000000000..2d5ac2c6e
--- /dev/null
+++ b/BUILD.md
@@ -0,0 +1,103 @@
+# Making the documentation
+
+A brief overview of the files in this repo, and the make-targets in the `Makefile`, and how it all hangs together to build the final output html files from the initial adoc input files.
+
+**TL;DR version**: To build the 'regular' documentation site, run `make clean; make`. To build the documentation site with pico-sdk API docs included, run `make clean; make build_doxygen_adoc; make`.
+
+## Files in the repo
+
+* `documentation/asciidoc/` all our "regular" asciidoc documentation (referred to as `$(ASCIIDOC_DIR)` in the `Makefile`)
+* `documentation/images/` the images shown on the "boxes"
+* `documentation/pico-sdk/` [pico-sdk](https://github.com/raspberrypi/pico-sdk) submodule (initially empty) (referred to as `$(PICO_SDK_DIR)` in the `Makefile`)
+* `documentation/pico-examples/` [pico-examples](https://github.com/raspberrypi/pico-examples) submodule (initially empty) (referred to as `$(PICO_EXAMPLES_DIR)` in the `Makefile`)
+* `jekyll-assets/` various styling stuff used by the jekyll build (referred to as `$(JEKYLL_ASSETS_DIR)` in the `Makefile`)
+* `scripts/` various Python build-scripts (referred to as `$(SCRIPTS_DIR)` in the `Makefile`)
+* `Makefile` top-level Makefile that runs the build
+
+## When you clone the repo and run `make`
+
+1. `.DEFAULT_GOAL := html` is set in the `Makefile`, which means that `make` actually does `make html`.
+ 1. The `html` target has the `run_ninja` target as a prerequisite
+ 1. The `run_ninja` target has `$(AUTO_NINJABUILD)` (i.e. `build/autogenerated.ninja`) as a prerequisite
+ 1. `build/autogenerated.ninja` has `$(BUILD_DIR)` (i.e. `build/`) as a prerequisite
+ 1. So the `build/` directory gets created
+ 1. Then `build/autogenerated.ninja` gets created
+ 1. Then `ninja` gets invoked, which uses `build.ninja` (which includes `build/autogenerated.ninja`) to create a whole bunch of files in the `build/` directory
+ 1. Then `jekyll` gets invoked, which uses all the files in the `build/` directory to create all the final output files in the `$(HTML_DIR)` (i.e. `documentation/html/`) directory
+
+If you run `make` a second time, then `make` and `ninja` will spot that everything is up to date, and only re-run the `jekyll` stage.
+
+## When you run `make clean`
+
+1. The `clean` target has the `clean_html` and `clean_doxygen_adoc` targets as prerequisites
+ 1. In this case `clean_doxygen_adoc` doesn't do anything, but `clean_html` deletes the `documentation/html/` directory
+1. Then the `build/` directory is deleted
+
+## When you run `make build_doxygen_adoc`
+
+1. The `build_doxygen_adoc` target has `$(ASCIIDOC_DOXYGEN_DIR)/index_doxygen.adoc` (i.e. `documentation/asciidoc/pico-sdk/index_doxygen.adoc`) as a prerequisite
+ 1. `documentation/asciidoc/pico-sdk/index_doxygen.adoc` has `$(DOXYGEN_HTML_DIR)` (i.e. `build-pico-sdk-docs/docs/doxygen/html/`) and `$(ASCIIDOC_DOXYGEN_DIR)` (i.e. `documentation/asciidoc/pico-sdk/`) as prerequisites
+ 1. So the `documentation/asciidoc/pico-sdk/` directory gets created
+ 1. `build-pico-sdk-docs/docs/doxygen/html/` has `$(ALL_SUBMODULE_CMAKELISTS)` (i.e. `documentation/pico-sdk/CMakeLists.txt` and `documentation/pico-examples/CMakeLists.txt`) and `$(DOXYGEN_PICO_SDK_BUILD_DIR)` (i.e. `build-pico-sdk-docs/`) as prerequisites
+ 1. So the `build-pico-sdk-docs/` directory gets created
+ 1. `documentation/pico-sdk/CMakeLists.txt` gets created by initialising the `pico-sdk` submodule
+ 1. `documentation/pico-examples/CMakeLists.txt` gets created by initialising the `pico-examples` submodule
+ 1. Then `cmake` gets invoked for `pico-sdk/`, which creates `build-pico-sdk-docs/Makefile`
+ 1. Then we run the `docs` target in `build-pico-sdk-docs/Makefile` which runs `doxygen` and creates a bunch of HTML files in `build-pico-sdk-docs/docs/doxygen/html/` (referred to as `$(DOXYGEN_HTML_DIR)` in the `Makefile`)
+1. Then we run the new `scripts/transform_doxygen_html.py` to convert the HTML files from `build-pico-sdk-docs/docs/doxygen/html/` into adoc files in `documentation/asciidoc/pico-sdk/`
+
+If you run `make build_doxygen_adoc` a second time, then `make` will detect that everything is already up to date, and so not do anything.
+
+If we **now** run `make` (see the `make html` description above), it will now find `documentation/asciidoc/pico-sdk/` and include that in the "tabs" in the output html files in `documentation/html/`.
+
+And if we then run a `make clean`, the presence of `documentation/asciidoc/pico-sdk/` will cause the `clean_doxygen_adoc` target to delete the files in the `build/` directory (to prevent things getting into an "invalid state"), and then delete the `documentation/asciidoc/pico-sdk/` directory.
+Note that `build-pico-sdk-docs/` (the raw Doxygen output) **isn't** deleted by `make clean`, because it's basically "static content" which can take a while to regenerate. To _also_ get rid of `build-pico-sdk-docs/` you can either `make clean_doxygen_html` or `make clean_everything` (with the latter also deinitialising the submodules).
+
+## Makefile targets
+
+Targets which might be useful for getting things to / from a particular state.
+
+* `make fetch_submodules` populates (initialises) the `documentation/pico-sdk/` and `documentation/pico-examples/` submodule directories
+* `make clean_submodules` deinitialises the submodule directories, i.e. is the opposite of `fetch_submodules`
+* `make build_doxygen_html` runs the `cmake` and `make` steps required to create the Doxygen HTML files (in `build-pico-sdk-docs/docs/doxygen/html/`) from the `pico-sdk` submodule
+* `make clean_doxygen_html` deletes the `build-pico-sdk-docs/` directory i.e. is the opposite of `build_doxygen_html`
+* `make build_doxygen_adoc` described in an earlier section, converts html files from `build-pico-sdk-docs/docs/doxygen/html/` to adoc files in `documentation/asciidoc/pico-sdk/`
+* `make clean_doxygen_adoc` deletes the `documentation/asciidoc/pico-sdk/` directory i.e. is the opposite of `build_doxygen_adoc`
+* `make run_ninja` converts adoc files from `documentation/asciidoc/` into adoc files in `build/`
+* `make clean_ninja` deletes the files in `build/` i.e. is the opposite of `run_ninja`
+* `make html` described in an earlier section, converts adoc files from `build/` into html files in `documentation/html/`. The default target invoked when no explicit target is given.
+* `make clean_html` deletes the `documentation/html/` directory, i.e. is the opposite of `html`
+* `make serve_html` converts adoc files from `build/` into html files in `documentation/html/` and then runs a mini webserver so that you can preview the output
+* `make clean` runs both of `clean_html` & `clean_doxygen_adoc` and also deletes `build/`
+* `make clean_everything` runs all of `clean_submodules`, `clean_doxygen_html` and `clean` i.e. returns your local directory to a fairly pristine state
+
+Note that for day-to-day usage, you'll typically only use the `make clean`, `make`, `make build_doxygen_adoc` and `make serve_html` commands - the dependencies in the `Makefile` are all set up so that any necessary intermediate steps are performed automatically.
+
+Bad ASCII-art time:
+
+```
++---------------+
+| 'clean' state |--> make build_doxygen_adoc
++---------------+ |
+ | | ^ V
+ | V | +-----------------------------------------+
+ | make make clean <---| documentation/asciidoc/pico-sdk/ exists |
+ | | ^ +-----------------------------------------+
+ | | | | |
+ | | | V |
+ | V | make |
+ | +----------------------------+ | |
+ | | documentation/html/ exists |<---+ |
+ | +----------------------------+ |
+ | | ^ |
+ | V | |
+ +---> make serve_html <-----------------------+
+ | |
+ | Ctrl-C
+ | ^
+ V |
++----------------------------------------------------------+
+| documentation/html/ exists and preview webserver running |
++----------------------------------------------------------+
+```
+
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index beb7f0705..37fef6b8e 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,37 +1,163 @@
-# Contributing to Raspberry Pi Documentation
+# Contributing to the Raspberry Pi Documentation
-The Raspberry Pi Documentation website is built from Asciidoc source using Asciidoctor and a Jekyll and Python toolchain. The website is automatically deployed to the raspberrypi.com site — pushed to production — using GitHub Actions when a push to the `master` branch occurs.
+The Raspberry Pi Documentation website is built from Asciidoc source using:
-Full instructions for building and running the documentation website locally can be found in the top-level [README.md](README.md) file.
+* [Asciidoctor](https://asciidoctor.org/)
+* [Jekyll](https://jekyllrb.com/)
+* [jekyll-asciidoc](https://github.com/asciidoctor/jekyll-asciidoc)
+* Python
-## How to Contribute
+The website automatically deploys to [www.raspberrypi.com/documentation](https://www.raspberrypi.com/documentation) using GitHub Actions when new commits appear in the `master` branch.
-In order to contribute new or updated documentation, you must first create a GitHub account and fork the original repository to your own account. You can make changes, save them in your repository, then [make a pull request](https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork) against this repository. The pull request will appear [in the repository](https://github.com/raspberrypi/documentation/pulls) where it can be assessed by the maintainers, copy edited, and if appropriate, merged with the official repository.
+## Contribute
-Unless you are opening a pull request which will only make small corrections, for instance to correct a typo, you are more likely to get traction for your changes if you [open an issue](https://github.com/raspberrypi/documentation/issues) first to discuss the proposed changes. Issues and Pull Requests older than 60 days will [automatically be marked as stale](https://github.com/actions/stale) and then closed 7 days later if there still hasn't been any further activity.
+To contribute or update documentation:
-**NOTE:** The default [branch](https://github.com/raspberrypi/documentation/branches) of the repository is the `develop` branch, and this should be the branch you get by default when you initially checkout the repository. You should target any pull requests against the `develop` branch, pull requests against the `master` branch will automatically fail checks and not be accepted.
+1. Create a fork of this repository on your GitHub account.
-**NOTE:** Issues and Pull Requests older than 60 days will [automatically be marked as stale](https://github.com/actions/stale) and then closed 7 days later if there still hasn't been any further activity.
+1. Make changes in your fork. Start from the default `develop` branch.
-Before starting writing your contribution to the documentation, you should take a look at the [style guide](https://github.com/raspberrypi/style-guide/blob/master/style-guide.md).
+1. Read our [style guide](https://github.com/raspberrypi/style-guide/blob/master/style-guide.md) to ensure that your changes are consistent with the rest of our documentation. Since Raspberry Pi is a British company, be sure to include all of your extra `u`s and transfigure those `z`s (pronounced 'zeds') into `s`s!
-**IMPORTANT**: Because the documentation makes use of the Asciidoc `include` statement, the `xref:` statements inside the documentation do not link back to the correct pages on Github, as Github does not support Asciidoc include functionality (see [#2005](https://github.com/raspberrypi/documentation/issues/2005)). However, these links work correctly when the HTML documentation is built and deployed. Please do not submit Pull Requests fixing link destinations unless you're sure that the link is broken [on the documentation site](https://www.raspberrypi.com/documentation/) itself.
+1. [Open a pull request](https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork) against this repository.
-## Type of Content
+1. The maintainers will assess and copy-edit the PR. This can take anywhere from a few minutes to a few days, depending on the size of your PR, the time of year, and the availability of the maintainers.
-We welcome contributions from the community, ranging from correcting small typos all the way through to adding entire new sections to the documentation. However, going forward we're going to be fairly targeted about what sort of content we add to the documentation. We are looking to keep the repository, and the documentation, focused on Raspberry Pi-specific things, rather than having generic Linux or computing content.
+1. After making any requested improvements to your PR, the maintainers will accept the PR and merge your changes into `develop`.
-We are therefore deprecating the more generic documentation around using the Linux operating system, ahead of removing these sections entirely at some point in the future as part of a larger update to the documentation site. This move is happening as we feel these sort of more general topics are, ten years on from when the documentation was initially written, now much better covered elsewhere on the web.
+1. When the maintainers next release the documentation by merging `develop` into `master`, your changes will go public on the production documentation site.
-As such, we're not accepting PRs against these sections unless they're correcting errors.
+Alternatively, [open an issue](https://github.com/raspberrypi/documentation/issues) to discuss proposed changes.
-**NOTE:** We are willing to consider toolchain-related contributions, but changes to the toolchain may have knock-on effects in other places, so it is possible that apparently benign pull requests that make toolchain changes could be refused for fairly opaque reasons.
+## Build
-## Third-Party Services
+### Install dependencies
-In general, we will not accept content that is specific to an individual third-party service or product. We will also not embed, or add links, to YouTube videos showing tutorials on how to configure your Raspberry Pi.
+To build the Raspberry Pi documentation locally, you'll need Ruby, Python, and the Ninja build system.
-## Licensing
+#### Linux
+
+Use `apt` to install the dependencies:
+
+```console
+$ sudo apt install -y ruby ruby-dev python3 python3-pip make ninja-build
+```
+
+Then, append the following lines to your `~/.bashrc` file (or equivalent shell configuration):
+
+```bash
+export GEM_HOME="$(ruby -e 'puts Gem.user_dir')"
+export PATH="$PATH:$GEM_HOME/bin"
+```
+
+Close and re-launch your terminal window to use the new dependencies and configuration.
+
+#### macOS
+
+If you don't already have it, we recommend installing the [Homebrew](https://brew.sh/) package manager:
+
+```console
+$ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
+```
+
+Next, use Homebrew to install Ruby:
+
+```console
+$ brew install ruby
+```
+
+After installing Ruby, follow the instructions provided by Homebrew to make your new Ruby version easily accessible from the command line.
+
+Then, use Homebrew to install the most recent version of Python:
+
+```console
+$ brew install python
+```
+
+Then, install the [Ninja build system](https://formulae.brew.sh/formula/ninja#default):
+
+```console
+$ brew install ninja
+```
+
+### Set up environment
+
+Use the `gem` package manager to install the [Ruby bundler](https://bundler.io/), which this repository uses to manage Ruby dependencies:
+
+```console
+$ gem install bundler
+```
+
+And then install the required Ruby gems:
+
+```console
+$ bundle install
+```
+
+Configure a Python virtual environment for this project:
+
+```console
+$ python -m venv .env
+```
+
+Activate the virtual environment:
+
+```console
+$ source .env/bin/activate
+```
+
+> [!TIP]
+> When you're using a virtual environment, you should see a `(.env)` prefix at the start of your terminal prompt. At any time, run the `deactivate` command to exit the virtual environment.
+
+In the virtual environment, install the required Python modules:
+
+```console
+$ pip3 install -r requirements.txt
+```
+
+### Build HTML
+
+> [!IMPORTANT]
+> If you configured a Python virtual environment as recommended in the previous step, **always** run `source .env/bin/activate` before building. You must activate the virtual environment to access to all of the Python dependencies installed in that virtual environment.
+
+To build the documentation and start a local server to preview the built site, run the following command:
+
+```console
+$ make serve_html
+```
+
+You can access the virtual server at [http://127.0.0.1:4000/documentation/](http://127.0.0.1:4000/documentation/).
+
+> [!TIP]
+> To delete and rebuild the documentation site, run `make clean`, then re-run the build command. You'll need to do this every time you add or remove an Asciidoc, image, or video file.
+
+
+### Build the Pico C SDK Doxygen documentation
+
+The Raspberry Pi documentation site includes a section of generated Asciidoc that we build from the [Doxygen Pico SDK documentation](https://github.com/raspberrypi/pico-sdk).
+
+We use the tooling in this repository and [doxygentoasciidoc](https://github.com/raspberrypi/doxygentoasciidoc) to generate that documentation section. By default, local documentation builds don't include this section because it takes a bit longer to build (tens of seconds) than the rest of the site.
+
+Building the Pico C SDK Doxygen documentation requires the following additional package dependencies:
+
+```console
+$ sudo apt install -y cmake gcc-arm-none-eabi doxygen graphviz
+```
+
+Then, initialise the Git submodules used in the Pico C SDK section build:
+
+```console
+$ git submodule update --init
+```
+
+Run the following command to build the Pico C SDK section Asciidoc files from the Doxygen source:
+
+```console
+$ make build_doxygen_adoc
+```
+
+The next time you build the documentation site, you'll see the Pico C SDK section in your local preview.
+
+> [!TIP]
+> To delete and rebuild the generated files, run `make clean_doxygen_xml`, then re-run the build command.
-The documentation is under a [Creative Commons Attribution-Sharealike](https://creativecommons.org/licenses/by-sa/4.0/) (CC BY-SA 4.0) licence. By contributing content to this repository, you are agreeing to place your contributions under this licence.
diff --git a/Gemfile b/Gemfile
index 392ce4121..bb73401e4 100644
--- a/Gemfile
+++ b/Gemfile
@@ -8,10 +8,10 @@ source "https://rubygems.org"
#
# This will help ensure the proper Jekyll version is running.
# Happy Jekylling!
-gem "jekyll", "~> 4.3.1"
+gem "jekyll", "~> 4.4.1"
# This is the default theme for new Jekyll sites. You may change this to anything you like.
-gem "minima", "~> 2.0"
+gem "minima", "~> 2.5"
# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
# uncomment the line below. To upgrade, run `bundle update github-pages`.
@@ -21,6 +21,8 @@ gem "minima", "~> 2.0"
group :jekyll_plugins do
gem "jekyll-feed", "~> 0.17"
gem 'jekyll-asciidoc'
+ gem 'asciidoctor'
+ gem 'asciidoctor-tabs', ">= 1.0.0.beta.6"
end
# Windows does not include zoneinfo files, so bundle the tzinfo-data gem
@@ -31,6 +33,10 @@ install_if -> { RUBY_PLATFORM =~ %r!mingw|mswin|java! } do
end
# Performance-booster for watching directories on Windows
-gem "wdm", "~> 0.1.0", :install_if => Gem.win_platform?
+gem "wdm", "~> 0.2.0", :install_if => Gem.win_platform?
-gem "nokogiri", "~> 1.13"
+gem "nokogiri", "~> 1.18"
+
+# So we can add custom element templates
+gem 'slim', '~> 5.2.1'
+gem 'thread_safe', '~> 0.3.5'
diff --git a/Gemfile.lock b/Gemfile.lock
index b26a24cb9..385a34392 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -1,98 +1,122 @@
GEM
remote: https://rubygems.org/
specs:
- addressable (2.8.1)
- public_suffix (>= 2.0.2, < 6.0)
- asciidoctor (2.0.15)
+ addressable (2.8.7)
+ public_suffix (>= 2.0.2, < 7.0)
+ asciidoctor (2.0.23)
+ asciidoctor-tabs (1.0.0.beta.6)
+ asciidoctor (>= 2.0.0, < 3.0.0)
+ base64 (0.2.0)
+ bigdecimal (3.1.9)
colorator (1.1.0)
- concurrent-ruby (1.1.10)
+ concurrent-ruby (1.3.5)
+ csv (3.3.2)
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
eventmachine (1.2.7)
- ffi (1.15.5)
+ ffi (1.17.1)
forwardable-extended (2.6.0)
+ google-protobuf (4.29.3)
+ bigdecimal
+ rake (>= 13)
http_parser.rb (0.8.0)
- i18n (1.12.0)
+ i18n (1.14.7)
concurrent-ruby (~> 1.0)
- jekyll (4.3.1)
+ jekyll (4.4.1)
addressable (~> 2.4)
+ base64 (~> 0.2)
colorator (~> 1.0)
+ csv (~> 3.0)
em-websocket (~> 0.5)
i18n (~> 1.0)
jekyll-sass-converter (>= 2.0, < 4.0)
jekyll-watch (~> 2.0)
+ json (~> 2.6)
kramdown (~> 2.3, >= 2.3.1)
kramdown-parser-gfm (~> 1.0)
liquid (~> 4.0)
- mercenary (>= 0.3.6, < 0.5)
+ mercenary (~> 0.3, >= 0.3.6)
pathutil (~> 0.9)
rouge (>= 3.0, < 5.0)
safe_yaml (~> 1.0)
terminal-table (>= 1.8, < 4.0)
webrick (~> 1.7)
- jekyll-asciidoc (3.0.0)
- asciidoctor (>= 1.5.0)
+ jekyll-asciidoc (3.0.1)
+ asciidoctor (>= 1.5.0, < 3.0.0)
jekyll (>= 3.0.0)
jekyll-feed (0.17.0)
jekyll (>= 3.7, < 5.0)
- jekyll-sass-converter (2.2.0)
- sassc (> 2.0.1, < 3.0)
- jekyll-seo-tag (2.7.1)
+ jekyll-sass-converter (3.1.0)
+ sass-embedded (~> 1.75)
+ jekyll-seo-tag (2.8.0)
jekyll (>= 3.8, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
- kramdown (2.4.0)
- rexml
+ json (2.9.1)
+ kramdown (2.5.1)
+ rexml (>= 3.3.9)
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
- liquid (4.0.3)
- listen (3.7.1)
+ liquid (4.0.4)
+ listen (3.9.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.4.0)
- mini_portile2 (2.8.0)
- minima (2.5.1)
+ mini_portile2 (2.8.8)
+ minima (2.5.2)
jekyll (>= 3.5, < 5.0)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
- nokogiri (1.13.10)
- mini_portile2 (~> 2.8.0)
+ nokogiri (1.18.8)
+ mini_portile2 (~> 2.8.2)
racc (~> 1.4)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
- public_suffix (5.0.0)
- racc (1.6.1)
+ public_suffix (6.0.1)
+ racc (1.8.1)
+ rake (13.2.1)
rb-fsevent (0.11.2)
- rb-inotify (0.10.1)
+ rb-inotify (0.11.1)
ffi (~> 1.0)
- rexml (3.2.5)
- rouge (4.0.0)
+ rexml (3.4.0)
+ rouge (4.5.1)
safe_yaml (1.0.5)
- sassc (2.4.0)
- ffi (~> 1.9)
+ sass-embedded (1.83.4)
+ google-protobuf (~> 4.29)
+ rake (>= 13)
+ slim (5.2.1)
+ temple (~> 0.10.0)
+ tilt (>= 2.1.0)
+ temple (0.10.3)
terminal-table (3.0.2)
unicode-display_width (>= 1.1.1, < 3)
- tzinfo (2.0.5)
+ thread_safe (0.3.6)
+ tilt (2.3.0)
+ tzinfo (2.0.6)
concurrent-ruby (~> 1.0)
- tzinfo-data (1.2022.7)
+ tzinfo-data (1.2025.2)
tzinfo (>= 1.0.0)
- unicode-display_width (2.3.0)
- wdm (0.1.1)
- webrick (1.7.0)
+ unicode-display_width (2.6.0)
+ wdm (0.2.0)
+ webrick (1.9.1)
PLATFORMS
ruby
DEPENDENCIES
- jekyll (~> 4.3.1)
+ asciidoctor
+ asciidoctor-tabs (>= 1.0.0.beta.6)
+ jekyll (~> 4.4.1)
jekyll-asciidoc
jekyll-feed (~> 0.17)
- minima (~> 2.0)
- nokogiri (~> 1.13)
+ minima (~> 2.5)
+ nokogiri (~> 1.18)
+ slim (~> 5.2.1)
+ thread_safe (~> 0.3.5)
tzinfo (~> 2.0)
tzinfo-data
- wdm (~> 0.1.0)
+ wdm (~> 0.2.0)
BUNDLED WITH
- 2.2.15
+ 2.3.22
diff --git a/LICENSE.md b/LICENSE.md
index 3cb65d491..4b2db9cd3 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -4,7 +4,7 @@ The Raspberry Pi documentation is licensed under a [Creative Commons Attribution
# Creative Commons Attribution-ShareAlike 4.0 International
-Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.
+Creative Commons Corporation ("Creative Commons") is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an "as-is" basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.
### Using Creative Commons Public Licenses
@@ -12,7 +12,7 @@ Creative Commons public licenses provide a standard set of terms and conditions
* __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors).
-* __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees).
+* __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor's permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees).
## Creative Commons Attribution-ShareAlike 4.0 International Public License
@@ -66,7 +66,7 @@ a. ___License grant.___
A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
- B. __Additional offer from the Licensor – Adapted Material.__ Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply.
+ B. __Additional offer from the Licensor – Adapted Material.__ Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter's License You apply.
C. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
@@ -112,7 +112,7 @@ b. ___ShareAlike.___
In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply.
-1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License.
+1. The Adapter's License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material.
@@ -170,6 +170,6 @@ c. No term or condition of this Public License will be waived and no failure to
d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
-> Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” The text of the Creative Commons public licenses is dedicated to the public domain under the [CC0 Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/legalcode). Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.
+> Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the "Licensor." The text of the Creative Commons public licenses is dedicated to the public domain under the [CC0 Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/legalcode). Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark "Creative Commons" or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.
>
> Creative Commons may be contacted at creativecommons.org.
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 86adfd9f8..711219f45 100644
--- a/Makefile
+++ b/Makefile
@@ -2,6 +2,7 @@
ASCIIDOC_DIR = documentation/asciidoc
HTML_DIR = documentation/html
+IMAGES_DIR = documentation/images
JEKYLL_ASSETS_DIR = jekyll-assets
SCRIPTS_DIR = scripts
DOCUMENTATION_REDIRECTS_DIR = documentation/redirects
@@ -13,22 +14,93 @@ ASCIIDOC_BUILD_DIR = $(BUILD_DIR)/jekyll
ASCIIDOC_INCLUDES_DIR = $(BUILD_DIR)/adoc_includes
AUTO_NINJABUILD = $(BUILD_DIR)/autogenerated.ninja
+PICO_SDK_DIR = lib/pico-sdk
+PICO_EXAMPLES_DIR = lib/pico-examples
+DOXYGEN_TO_ASCIIDOC_DIR = lib/doxygentoasciidoc
+ALL_SUBMODULE_CMAKELISTS = $(PICO_SDK_DIR)/CMakeLists.txt $(PICO_EXAMPLES_DIR)/CMakeLists.txt
+DOXYGEN_PICO_SDK_BUILD_DIR = build-pico-sdk-docs
+DOXYGEN_XML_DIR = $(DOXYGEN_PICO_SDK_BUILD_DIR)/combined/docs/doxygen/xml
+# The pico-sdk here needs to match up with the "from_json" entry in index.json
+ASCIIDOC_DOXYGEN_DIR = $(ASCIIDOC_DIR)/pico-sdk
+
JEKYLL_CMD = bundle exec jekyll
.DEFAULT_GOAL := html
-.PHONY: clean run_ninja clean_ninja html serve_html clean_html
+.PHONY: clean run_ninja clean_ninja html serve_html clean_html build_doxygen_xml clean_doxygen_xml build_doxygen_adoc clean_doxygen_adoc fetch_submodules clean_submodules clean_everything
$(BUILD_DIR):
@mkdir -p $@
+$(DOXYGEN_PICO_SDK_BUILD_DIR):
+ mkdir $@
+
+$(ASCIIDOC_DOXYGEN_DIR): | $(ASCIIDOC_DIR)
+ mkdir $@
+
# Delete all autogenerated files
-clean: clean_html
+clean: clean_html clean_doxygen_adoc
rm -rf $(BUILD_DIR)
+# Initialise pico-sdk submodule (and the subnmodules that it uses)
+$(PICO_SDK_DIR)/CMakeLists.txt $(PICO_SDK_DIR)/docs/index.h: | $(PICO_SDK_DIR)
+ git submodule update --init $(PICO_SDK_DIR)
+ git -C $(PICO_SDK_DIR) submodule update --init
+
+# Initialise pico-examples submodule
+$(PICO_EXAMPLES_DIR)/CMakeLists.txt: | $(PICO_SDK_DIR)/CMakeLists.txt $(PICO_EXAMPLES_DIR)
+ git submodule update --init $(PICO_EXAMPLES_DIR)
+
+# Initialise doxygentoasciidoc submodule
+$(DOXYGEN_TO_ASCIIDOC_DIR)/__main__.py:
+ git submodule update --init $(DOXYGEN_TO_ASCIIDOC_DIR)
+
+fetch_submodules: $(ALL_SUBMODULE_CMAKELISTS) $(DOXYGEN_TO_ASCIIDOC_DIR)/__main__.py
+
+# Get rid of the submodules
+clean_submodules:
+ git submodule deinit --all
+
+# Create the pico-sdk Doxygen XML files
+$(DOXYGEN_XML_DIR) $(DOXYGEN_XML_DIR)/index.xml: | $(ALL_SUBMODULE_CMAKELISTS) $(DOXYGEN_PICO_SDK_BUILD_DIR)
+ cmake -S $(PICO_SDK_DIR) -B $(DOXYGEN_PICO_SDK_BUILD_DIR)/combined -D PICO_EXAMPLES_PATH=../../$(PICO_EXAMPLES_DIR) -D PICO_NO_PICOTOOL=1 -D PICO_PLATFORM=combined-docs
+ cmake -S $(PICO_SDK_DIR) -B $(DOXYGEN_PICO_SDK_BUILD_DIR)/PICO_RP2040 -D PICO_EXAMPLES_PATH=../../$(PICO_EXAMPLES_DIR) -D PICO_NO_PICOTOOL=1 -D PICO_PLATFORM=rp2040
+ cmake -S $(PICO_SDK_DIR) -B $(DOXYGEN_PICO_SDK_BUILD_DIR)/PICO_RP2350 -D PICO_EXAMPLES_PATH=../../$(PICO_EXAMPLES_DIR) -D PICO_NO_PICOTOOL=1 -D PICO_PLATFORM=rp2350
+ $(MAKE) -C $(DOXYGEN_PICO_SDK_BUILD_DIR)/combined docs
+ $(MAKE) -C $(DOXYGEN_PICO_SDK_BUILD_DIR)/PICO_RP2040 docs
+ $(MAKE) -C $(DOXYGEN_PICO_SDK_BUILD_DIR)/PICO_RP2350 docs
+ python3 $(SCRIPTS_DIR)/postprocess_doxygen_xml.py $(DOXYGEN_PICO_SDK_BUILD_DIR)
+
+$(DOXYGEN_PICO_SDK_BUILD_DIR)/combined/docs/Doxyfile: | $(DOXYGEN_XML_DIR)
+
+build_doxygen_xml: | $(DOXYGEN_XML_DIR)
+
+# Clean all the Doxygen HTML files
+clean_doxygen_xml:
+ rm -rf $(DOXYGEN_PICO_SDK_BUILD_DIR)
+
+# create the sdk adoc and the json file
+$(ASCIIDOC_DOXYGEN_DIR)/picosdk_index.json $(ASCIIDOC_DOXYGEN_DIR)/index_doxygen.adoc: $(ASCIIDOC_DOXYGEN_DIR) $(DOXYGEN_XML_DIR)/index.xml $(DOXYGEN_TO_ASCIIDOC_DIR)/__main__.py $(DOXYGEN_TO_ASCIIDOC_DIR)/cli.py $(DOXYGEN_TO_ASCIIDOC_DIR)/nodes.py $(DOXYGEN_TO_ASCIIDOC_DIR)/helpers.py | $(BUILD_DIR) $(DOXYGEN_TO_ASCIIDOC_DIR)/requirements.txt
+ $(MAKE) clean_ninja
+ pip3 install -r $(DOXYGEN_TO_ASCIIDOC_DIR)/requirements.txt
+ PYTHONPATH=$(DOXYGEN_TO_ASCIIDOC_DIR)/.. python3 -m doxygentoasciidoc -o $(ASCIIDOC_DOXYGEN_DIR)/all_groups.adoc $(DOXYGEN_XML_DIR)/index.xml
+ PYTHONPATH=$(DOXYGEN_TO_ASCIIDOC_DIR)/.. python3 -m doxygentoasciidoc -c -o $(ASCIIDOC_DOXYGEN_DIR)/index_doxygen.adoc $(DOXYGEN_XML_DIR)/indexpage.xml
+ PYTHONPATH=$(DOXYGEN_TO_ASCIIDOC_DIR)/.. python3 -m doxygentoasciidoc -c -o $(ASCIIDOC_DOXYGEN_DIR)/examples_page.adoc $(DOXYGEN_XML_DIR)/examples_page.xml
+ python3 $(SCRIPTS_DIR)/postprocess_doxygen_adoc.py $(ASCIIDOC_DOXYGEN_DIR)
+ -cp $(DOXYGEN_XML_DIR)/*.png $(ASCIIDOC_DOXYGEN_DIR) 2>/dev/null || true
+
+build_doxygen_adoc: $(ASCIIDOC_DOXYGEN_DIR)/index_doxygen.adoc
+
+# Clean all the Doxygen asciidoc files
+clean_doxygen_adoc:
+ if [ -d $(ASCIIDOC_DOXYGEN_DIR) ]; then $(MAKE) clean_ninja; fi
+ rm -rf $(ASCIIDOC_DOXYGEN_DIR)
+
+clean_everything: clean_submodules clean_doxygen_xml clean
+
# AUTO_NINJABUILD contains all the parts of the ninjabuild where the rules themselves depend on other files
$(AUTO_NINJABUILD): $(SCRIPTS_DIR)/create_auto_ninjabuild.py $(DOCUMENTATION_INDEX) $(SITE_CONFIG) | $(BUILD_DIR)
- $< $(DOCUMENTATION_INDEX) $(SITE_CONFIG) $(ASCIIDOC_DIR) $(SCRIPTS_DIR) $(ASCIIDOC_BUILD_DIR) $(ASCIIDOC_INCLUDES_DIR) $(JEKYLL_ASSETS_DIR) $(DOCUMENTATION_REDIRECTS_DIR) $@
+ $< $(DOCUMENTATION_INDEX) $(SITE_CONFIG) $(ASCIIDOC_DIR) $(SCRIPTS_DIR) $(ASCIIDOC_BUILD_DIR) $(ASCIIDOC_INCLUDES_DIR) $(JEKYLL_ASSETS_DIR) $(DOXYGEN_PICO_SDK_BUILD_DIR) $(DOCUMENTATION_REDIRECTS_DIR) $(IMAGES_DIR) $@
# This runs ninjabuild to build everything in the ASCIIDOC_BUILD_DIR (and ASCIIDOC_INCLUDES_DIR)
run_ninja: $(AUTO_NINJABUILD)
@@ -46,7 +118,7 @@ html: run_ninja
# Build the html output files and additionally run a small webserver for local previews
serve_html: run_ninja
- $(JEKYLL_CMD) serve
+ $(JEKYLL_CMD) serve --watch
# Delete all the files created by the 'html' target
clean_html:
diff --git a/README.md b/README.md
index a76d9defc..69df3a28f 100644
--- a/README.md
+++ b/README.md
@@ -1,139 +1,22 @@
-# Welcome to the Raspberry Pi Documentation
+
+
+
+
+
+
-This repository contains the Asciidoc source and the toolchain to build the [Raspberry Pi Documentation](https://www.raspberrypi.com/documentation/). For details of how to contribute to the documentation see the [CONTRIBUTING.md](CONTRIBUTING.md) file.
+[Website][Raspberry Pi] | [Getting started] | [Documentation] | [Contribute]
+
-**NOTE:** This repository has undergone some recent changes. See our [blog post](https://www.raspberrypi.com/blog/bring-on-the-documentation/) for more details.
+This repository contains the source and tools used to build the [Raspberry Pi Documentation](https://www.raspberrypi.com/documentation/).
-## Building the Documentation
-
-Instructions on how to checkout the `documentation` repo, and then install the toolchain needed to convert from Asciidoc to HTML and build the documentation site.
-
-### Checking out the Repository
-
-Install `git` if you don't already have it, and check out the `documentation` repo as follows,
-```
-$ git clone https://github.com/raspberrypi/documentation.git
-$ cd documentation
-```
-
-### Installing the Toolchain
-
-#### On Linux
-
-This works on both regular Debian or Ubuntu Linux — and has been tested in a minimal Docker container — and also under Raspberry Pi OS if you are working from a Raspberry Pi.
-
-You can install the necessary dependencies on Linux as follows,
-
-```
-$ sudo apt install -y ruby ruby-dev python3 python3-pip make ninja-build
-```
-
-then add these lines to the bottom of your `$HOME/.bashrc`,
-```
-export GEM_HOME="$(ruby -e 'puts Gem.user_dir')"
-export PATH="$PATH:$GEM_HOME/bin"
-```
-
-and close and relaunch your Terminal window to have these new variables activated. Finally, run
-```
-$ gem install bundler -v 2.2.15
-```
-to install the latest version of the Ruby `bundle` command.
-
-#### On macOS
-
-If you don't already have it installed you should go ahead and install [HomeBrew](https://brew.sh/),
-
-```
-$ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
-```
-
-Then you need to install Ruby,
-
-```
-$ brew install ruby@2.7
-$ gem install bundler -v 2.2.15
-```
-
-**NOTE:** Homebrew defaults to Ruby 3.0 which is incompatible with Asciidoctor.
-
-**IMPORTANT:** Homebrew has problems using `/bin/zsh`, you may have to change your default shell to `/bin/bash`.
-
-##### Set up Homebrew Version of Ruby
-
-If you're using `csh` or `tcsh` add the following lines to your `.cshrc` or `.tcshrc`,
-
-```
-setenv PATH /usr/local/bin:/usr/local/sbin:$PATH
-
-setenv PATH /usr/local/opt/ruby/bin:${PATH}
-setenv PATH ${PATH}:/usr/local/lib/ruby/gems/2.7.0/bin
-setenv LDFLAGS -L/usr/local/opt/ruby@2.7/lib
-setenv CPPFLAGS -I/usr/local/opt/ruby@2.7/include
-setenv PKG_CONFIG_PATH /usr/local/opt/ruby@2.7/lib/pkgconfig
-```
-
-or if you're using `bash` add the following lines to your `.bash_profile`,
-
-```
-export PATH="/usr/local/bin:/usr/local/sbin:$PATH"
-
-export PATH="/usr/local/opt/ruby/bin:$PATH"
-export PATH="$PATH:/usr/local/lib/ruby/gems/2.7.0/bin"
-export PATH="/usr/local/opt/ruby@2.7/bin:$PATH"
-export LDFLAGS="-L/usr/local/opt/ruby@2.7/lib"
-export CPPFLAGS="-I/usr/local/opt/ruby@2.7/include"
-export PKG_CONFIG_PATH="/usr/local/opt/ruby@2.7/lib/pkgconfig"
-```
-NOTE: If you are running macOS on an Apple Silicon based Mac, rather than an Intel Mac, substitute `/opt/homebrew/` for `/usr/local` in the lines dealing with `ruby@2.7` in the above block.
-
-and then open a new Terminal window to make sure you're using the right version of Python and Ruby.
-
-##### Install Dependencies
-
-Go ahead and `brew install` the other dependencies,
-
-```
-$ brew install python@3
-$ brew install ninja
-$ brew install gumbo-parser
-$ pip3 install pyyaml
-```
-
-### Install Scripting Dependencies
-
-After you've installed the toolchain (on either Linux or macOS), you'll need to install the required Ruby gems and Python modules. Make sure you're in the top-level `documentation/` directory (i.e. the one containing `Gemfile.lock` and `requirements.txt`) and then run,
-```
-$ bundle install
-```
-(which may take several minutes), followed by,
-```
-$ pip3 install --user -r requirements.txt
-```
-
-### Building the Documentation Site
-
-After you've installed both the toolchain and scripting dependencies, you can build the documentation with,
-
-```
-$ make
-```
-
-This will automatically use [Ninja build](https://ninja-build.org/) to convert the source files in `documentation/asciidoc/` to a suitable intermediate structure in `build/jekyll/`, and then use [Jekyll AsciiDoc](https://github.com/asciidoctor/jekyll-asciidoc) to convert the files in `build/jekyll/` to the final output HTML files in `documentation/html/`.
-
-You can also start a local server to view the built site by running,
-```
-$ make serve_html
-```
-
-As the local server launches, the local URL will be printed in the terminal -- open this URL in a browser to see the locally-built site.
-
-You can revert your repository to a pristine state by running,
-```
-$ make clean
-```
-which will delete the `build/` and `documentation/html/` directories.
+[Raspberry Pi]: https://www.raspberrypi.com/
+[Getting Started]: https://www.raspberrypi.com/documentation/computers/getting-started.html
+[Documentation]: https://www.raspberrypi.com/documentation/
+[Contribute]: CONTRIBUTING.md
## Licence
-The Raspberry Pi [documentation](./documentation/) is [licensed](https://github.com/raspberrypi/documentation/blob/develop/LICENSE.md) under a Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA). While the toolchain source code — which is everything outside of the top-level `documentation/` subdirectory — is Copyright © 2021 Raspberry Pi Ltd and licensed under the [BSD 3-Clause](https://opensource.org/licenses/BSD-3-Clause) licence.
+The Raspberry Pi documentation is [licensed](https://github.com/raspberrypi/documentation/blob/develop/LICENSE.md) under a Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA). Documentation tools (everything outside of the `documentation/` subdirectory) are licensed under the [BSD 3-Clause](https://opensource.org/licenses/BSD-3-Clause) licence.
diff --git a/_config.yml b/_config.yml
index d7a94ee3d..4d740515b 100644
--- a/_config.yml
+++ b/_config.yml
@@ -17,14 +17,16 @@ title: Raspberry Pi Documentation
description: >- # this means to ignore newlines until "baseurl:"
Raspberry Pi Documentation.
baseurl: "/documentation" # the subpath of your site, e.g. /blog
-url: "" # the base hostname & protocol for your site, e.g. http://example.com
+url: "https://www.raspberrypi.com/documentation" # the base hostname & protocol for your site, e.g. http://example.com
githuburl: "https://github.com/raspberrypi/documentation/"
+mainsite: https://raspberrypi.com/
githubbranch: master
githubbranch_edit: develop
# Build settings
theme: minima
plugins:
+ - asciidoctor-tabs
- jekyll-asciidoc
- jekyll-feed
@@ -36,6 +38,10 @@ destination: documentation/html
sass:
sass_dir: css
+ quiet_deps: true
+
+asciidoctor:
+ template_dir: build/jekyll/_templates
# Exclude from processing.
# The following items will not be processed, by default. Create a custom list
diff --git a/build.ninja b/build.ninja
index 53a0b3576..56acd19ae 100644
--- a/build.ninja
+++ b/build.ninja
@@ -4,6 +4,7 @@
DOCUMENTATION_IMAGES_DIR = documentation/images
GITHUB_EDIT_TEMPLATE = jekyll-assets/_includes/github_edit.adoc
HTACCESS_EXTRA = documentation/htaccess_extra.txt
+DOXYGEN_PICOSDK_INDEX_JSON = documentation/asciidoc/pico-sdk/picosdk_index.json
# this corresponds to BUILD_DIR in Makefile
builddir = build
@@ -17,9 +18,15 @@ rule create_categories_page
rule create_toc
command = $scripts_dir/create_nav.py $in $src_dir $out
+rule create_output_supplemental_data
+ command = $scripts_dir/create_output_supplemental_data.py $in $out
+
rule create_build_adoc
command = $scripts_dir/create_build_adoc.py $documentation_index $site_config $GITHUB_EDIT_TEMPLATE $in $inc_dir $out
+rule create_build_adoc_doxygen
+ command = $scripts_dir/create_build_adoc_doxygen.py $documentation_index $site_config $in $DOXYGEN_PICOSDK_INDEX_JSON $out_dir $out
+
rule create_build_adoc_include
command = $scripts_dir/create_build_adoc_include.py $site_config $GITHUB_EDIT_TEMPLATE $in $out
@@ -27,7 +34,7 @@ rule create_htaccess
command = $scripts_dir/create_htaccess.py $in $redirects_dir $out
rule create_index_json
- command = $scripts_dir/create_output_index_json.py $in $out
+ command = $scripts_dir/create_output_index_json.py $in $out $src_dir $DOCUMENTATION_IMAGES_DIR
rule create_edit_warning
command = echo "Do not edit any files in this directory. Everything will get overwritten when you run 'make'" > $out
diff --git a/documentation/asciidoc/accessories/ai-camera.adoc b/documentation/asciidoc/accessories/ai-camera.adoc
new file mode 100644
index 000000000..55d35cba5
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-camera.adoc
@@ -0,0 +1,7 @@
+include::ai-camera/about.adoc[]
+
+include::ai-camera/getting-started.adoc[]
+
+include::ai-camera/details.adoc[]
+
+include::ai-camera/model-conversion.adoc[]
diff --git a/documentation/asciidoc/accessories/ai-camera/about.adoc b/documentation/asciidoc/accessories/ai-camera/about.adoc
new file mode 100644
index 000000000..927fcf19a
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-camera/about.adoc
@@ -0,0 +1,9 @@
+[[ai-camera]]
+== About
+
+The Raspberry Pi AI Camera uses the Sony IMX500 imaging sensor to provide low-latency, high-performance AI capabilities to any camera application. Tight integration with xref:../computers/camera_software.adoc[Raspberry Pi's camera software stack] allows users to deploy their own neural network models with minimal effort.
+
+image::images/ai-camera.png[The Raspberry Pi AI Camera]
+
+This section demonstrates how to run either a pre-packaged or custom neural network model on the camera. Additionally, this section includes the steps required to interpret inference data generated by neural networks running on the IMX500 in https://github.com/raspberrypi/rpicam-apps[`rpicam-apps`] and https://github.com/raspberrypi/picamera2[Picamera2].
+
diff --git a/documentation/asciidoc/accessories/ai-camera/details.adoc b/documentation/asciidoc/accessories/ai-camera/details.adoc
new file mode 100644
index 000000000..e640f289c
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-camera/details.adoc
@@ -0,0 +1,262 @@
+
+== Under the hood
+
+=== Overview
+
+The Raspberry Pi AI Camera works differently from traditional AI-based camera image processing systems, as shown in the diagram below:
+
+image::images/imx500-comparison.svg[Traditional versus IMX500 AI camera systems]
+
+The left side demonstrates the architecture of a traditional AI camera system. In such a system, the camera delivers images to the Raspberry Pi. The Raspberry Pi processes the images and then performs AI inference. Traditional systems may use external AI accelerators (as shown) or rely exclusively on the CPU.
+
+The right side demonstrates the architecture of a system that uses IMX500. The camera module contains a small Image Signal Processor (ISP) which turns the raw camera image data into an **input tensor**. The camera module sends this tensor directly into the AI accelerator within the camera, which produces **output tensors** that contain the inferencing results. The AI accelerator sends these tensors to the Raspberry Pi. There is no need for an external accelerator, nor for the Raspberry Pi to run neural network software on the CPU.
+
+To fully understand this system, familiarise yourself with the following concepts:
+
+Input Tensor:: The part of the sensor image passed to the AI engine for inferencing. Produced by a small on-board ISP which also crops and scales the camera image to the dimensions expected by the neural network that has been loaded. The input tensor is not normally made available to applications, though it is possible to access it for debugging purposes.
+
+Region of Interest (ROI):: Specifies exactly which part of the sensor image is cropped out before being rescaled to the size demanded by the neural network. Can be queried and set by an application. The units used are always pixels in the full resolution sensor output. The default ROI setting uses the full image received from the sensor, cropping no data.
+
+Output Tensors:: The results of inferencing performed by the neural network. The precise number and shape of the outputs depend on the neural network. Application code must understand how to handle the tensors.
+
+=== System architecture
+
+The diagram below shows the various camera software components (in green) used during our imaging/inference use case with the Raspberry Pi AI Camera module hardware (in red):
+
+image::images/imx500-block-diagram.svg[IMX500 block diagram]
+
+At startup, the IMX500 sensor module loads firmware to run a particular neural network model. During streaming, the IMX500 generates _both_ an image stream and an inference stream. This inference stream holds the inputs and outputs of the neural network model, also known as input/output **tensors**.
+
+=== Device drivers
+
+At the lowest level, the the IMX500 sensor kernel driver configures the camera module over the I2C bus. The CSI2 driver (`CFE` on Pi 5, `Unicam` on all other Pi platforms) sets up the receiver to write the image data stream into a frame buffer, together with the embedded data and inference data streams into another buffer in memory.
+
+The firmware files also transfer over the I2C bus wires. On most devices, this uses the standard I2C protocol, but Raspberry Pi 5 uses a custom high speed protocol. The RP2040 SPI driver in the kernel handles firmware file transfer, since the transfer uses the RP2040 microcontroller. The microcontroller bridges the I2C transfers from the kernel to the IMX500 via a SPI bus. Additionally, the RP2040 caches firmware files in on-board storage. This avoids the need to transfer entire firmware blobs over the I2C bus, significantly speeding up firmware loading for firmware you've already used.
+
+=== `libcamera`
+
+Once `libcamera` dequeues the image and inference data buffers from the kernel, the IMX500 specific `cam-helper` library (part of the Raspberry Pi IPA within `libcamera`) parses the inference buffer to access the input/output tensors. These tensors are packaged as Raspberry Pi vendor-specific https://libcamera.org/api-html/namespacelibcamera_1_1controls.html[`libcamera` controls]. `libcamera` returns the following controls:
+
+[%header,cols="a,a"]
+|===
+| Control
+| Description
+
+| `CnnOutputTensor`
+| Floating point array storing the output tensors.
+
+| `CnnInputTensor`
+| Floating point array storing the input tensor.
+
+| `CnnOutputTensorInfo`
+| Network specific parameters describing the output tensors' structure:
+
+[source,c]
+----
+struct OutputTensorInfo {
+ uint32_t tensorDataNum;
+ uint32_t numDimensions;
+ uint16_t size[MaxNumDimensions];
+};
+
+struct CnnOutputTensorInfo {
+ char networkName[NetworkNameLen];
+ uint32_t numTensors;
+ OutputTensorInfo info[MaxNumTensors];
+};
+----
+
+| `CnnInputTensorInfo`
+| Network specific parameters describing the input tensor's structure:
+
+[source,c]
+----
+struct CnnInputTensorInfo {
+ char networkName[NetworkNameLen];
+ uint32_t width;
+ uint32_t height;
+ uint32_t numChannels;
+};
+----
+
+|===
+
+=== `rpicam-apps`
+
+`rpicam-apps` provides an IMX500 post-processing stage base class that implements helpers for IMX500 post-processing stages: https://github.com/raspberrypi/rpicam-apps/blob/main/post_processing_stages/imx500/imx500_post_processing_stage.hpp[`IMX500PostProcessingStage`]. Use this base class to derive a new post-processing stage for any neural network model running on the IMX500. For an example, see https://github.com/raspberrypi/rpicam-apps/blob/main/post_processing_stages/imx500/imx500_object_detection.cpp[`imx500_object_detection.cpp`]:
+
+[source,cpp]
+----
+class ObjectDetection : public IMX500PostProcessingStage
+{
+public:
+ ObjectDetection(RPiCamApp *app) : IMX500PostProcessingStage(app) {}
+
+ char const *Name() const override;
+
+ void Read(boost::property_tree::ptree const ¶ms) override;
+
+ void Configure() override;
+
+ bool Process(CompletedRequestPtr &completed_request) override;
+};
+----
+
+For every frame received by the application, the `Process()` function is called (`ObjectDetection::Process()` in the above case). In this function, you can extract the output tensor for further processing or analysis:
+
+[source,cpp]
+----
+auto output = completed_request->metadata.get(controls::rpi::CnnOutputTensor);
+if (!output)
+{
+ LOG_ERROR("No output tensor found in metadata!");
+ return false;
+}
+
+std::vector output_tensor(output->data(), output->data() + output->size());
+----
+
+Once completed, the final results can either be visualised or saved in metadata and consumed by either another downstream stage, or the top level application itself. In the object inference case:
+
+[source,cpp]
+----
+if (objects.size())
+ completed_request->post_process_metadata.Set("object_detect.results", objects);
+----
+
+The `object_detect_draw_cv` post-processing stage running downstream fetches these results from the metadata and draws the bounding boxes onto the image in the `ObjectDetectDrawCvStage::Process()` function:
+
+[source,cpp]
+----
+std::vector detections;
+completed_request->post_process_metadata.Get("object_detect.results", detections);
+----
+
+The following table contains a full list of helper functions provided by `IMX500PostProcessingStage`:
+
+[%header,cols="a,a"]
+|===
+| Function
+| Description
+
+| `Read()`
+| Typically called from `::Read()`, this function reads the config parameters for input tensor parsing and saving.
+
+This function also reads the neural network model file string (`"network_file"`) and sets up the firmware to be loaded on camera open.
+
+| `Process()`
+| Typically called from `::Process()` this function processes and saves the input tensor to a file if required by the JSON config file.
+
+| `SetInferenceRoiAbs()`
+| Sets an absolute region of interest (ROI) crop rectangle on the sensor image to use for inferencing on the IMX500.
+
+| `SetInferenceRoiAuto()`
+| Automatically calculates region of interest (ROI) crop rectangle on the sensor image to preserve the input tensor aspect ratio for a given neural network.
+
+| `ShowFwProgressBar()`
+| Displays a progress bar on the console showing the progress of the neural network firmware upload to the IMX500.
+
+| `ConvertInferenceCoordinates()`
+| Converts from the input tensor coordinate space to the final ISP output image space.
+
+There are a number of scaling/cropping/translation operations occurring from the original sensor image to the fully processed ISP output image. This function converts coordinates provided by the output tensor to the equivalent coordinates after performing these operations.
+
+|===
+
+=== Picamera2
+
+IMX500 integration in Picamera2 is very similar to what is available in `rpicam-apps`. Picamera2 has an IMX500 helper class that provides the same functionality as the `rpicam-apps` `IMX500PostProcessingStage` base class. This can be imported to any Python script with:
+
+[source,python]
+----
+from picamera2.devices.imx500 import IMX500
+
+# This must be called before instantiation of Picamera2
+imx500 = IMX500(model_file)
+----
+
+To retrieve the output tensors, fetch them from the controls. You can then apply additional processing in your Python script.
+
+For example, in an object inference use case such as https://github.com/raspberrypi/picamera2/tree/main/examples/imx500/imx500_object_detection_demo.py[imx500_object_detection_demo.py], the object bounding boxes and confidence values are extracted in `parse_detections()` and draw the boxes on the image in `draw_detections()`:
+
+[source,python]
+----
+class Detection:
+ def __init__(self, coords, category, conf, metadata):
+ """Create a Detection object, recording the bounding box, category and confidence."""
+ self.category = category
+ self.conf = conf
+ obj_scaled = imx500.convert_inference_coords(coords, metadata, picam2)
+ self.box = (obj_scaled.x, obj_scaled.y, obj_scaled.width, obj_scaled.height)
+
+def draw_detections(request, detections, stream="main"):
+ """Draw the detections for this request onto the ISP output."""
+ labels = get_labels()
+ with MappedArray(request, stream) as m:
+ for detection in detections:
+ x, y, w, h = detection.box
+ label = f"{labels[int(detection.category)]} ({detection.conf:.2f})"
+ cv2.putText(m.array, label, (x + 5, y + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
+ cv2.rectangle(m.array, (x, y), (x + w, y + h), (0, 0, 255, 0))
+ if args.preserve_aspect_ratio:
+ b = imx500.get_roi_scaled(request)
+ cv2.putText(m.array, "ROI", (b.x + 5, b.y + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
+ cv2.rectangle(m.array, (b.x, b.y), (b.x + b.width, b.y + b.height), (255, 0, 0, 0))
+
+def parse_detections(request, stream='main'):
+ """Parse the output tensor into a number of detected objects, scaled to the ISP output."""
+ outputs = imx500.get_outputs(request.get_metadata())
+ boxes, scores, classes = outputs[0][0], outputs[1][0], outputs[2][0]
+ detections = [ Detection(box, category, score, metadata)
+ for box, score, category in zip(boxes, scores, classes) if score > threshold]
+ draw_detections(request, detections, stream)
+----
+
+Unlike the `rpicam-apps` example, this example applies no additional hysteresis or temporal filtering.
+
+The IMX500 class in Picamera2 provides the following helper functions:
+
+[%header,cols="a,a"]
+|===
+| Function
+| Description
+
+| `IMX500.get_full_sensor_resolution()`
+| Return the full sensor resolution of the IMX500.
+
+| `IMX500.config`
+| Returns a dictionary of the neural network configuration.
+
+| `IMX500.convert_inference_coords(coords, metadata, picamera2)`
+| Converts the coordinates _coords_ from the input tensor coordinate space to the final ISP output image space. Must be passed Picamera2's image metadata for the image, and the Picamera2 object.
+
+There are a number of scaling/cropping/translation operations occurring from the original sensor image to the fully processed ISP output image. This function converts coordinates provided by the output tensor to the equivalent coordinates after performing these operations.
+
+| `IMX500.show_network_fw_progress_bar()`
+| Displays a progress bar on the console showing the progress of the neural network firmware upload to the IMX500.
+
+| `IMX500.get_roi_scaled(request)`
+| Returns the region of interest (ROI) in the ISP output image coordinate space.
+
+| `IMX500.get_isp_output_size(picamera2)`
+| Returns the ISP output image size.
+
+| `IMX5000.get_input_size()`
+| Returns the input tensor size based on the neural network model used.
+
+| `IMX500.get_outputs(metadata)`
+| Returns the output tensors from the Picamera2 image metadata.
+
+| `IMX500.get_output_shapes(metadata)`
+| Returns the shape of the output tensors from the Picamera2 image metadata for the neural network model used.
+
+| `IMX500.set_inference_roi_abs(rectangle)`
+| Sets the region of interest (ROI) crop rectangle which determines which part of the sensor image is converted to the input tensor that is used for inferencing on the IMX500. The region of interest should be specified in units of pixels at the full sensor resolution, as a `(x_offset, y_offset, width, height)` tuple.
+
+| `IMX500.set_inference_aspect_ratio(aspect_ratio)`
+| Automatically calculates region of interest (ROI) crop rectangle on the sensor image to preserve the given aspect ratio. To make the ROI aspect ratio exactly match the input tensor for this network, use `imx500.set_inference_aspect_ratio(imx500.get_input_size())`.
+
+| `IMX500.get_kpi_info(metadata)`
+| Returns the frame-level performance indicators logged by the IMX500 for the given image metadata.
+
+|===
diff --git a/documentation/asciidoc/accessories/ai-camera/getting-started.adoc b/documentation/asciidoc/accessories/ai-camera/getting-started.adoc
new file mode 100644
index 000000000..b23720895
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-camera/getting-started.adoc
@@ -0,0 +1,141 @@
+== Getting started
+
+The instructions below describe how to run the pre-packaged MobileNet SSD and PoseNet neural network models on the Raspberry Pi AI Camera.
+
+=== Hardware setup
+
+Attach the camera to your Raspberry Pi 5 board following the instructions at xref:../accessories/camera.adoc#install-a-raspberry-pi-camera[Install a Raspberry Pi Camera].
+
+=== Prerequisites
+
+These instructions assume you are using the AI Camera attached to either a Raspberry Pi 4 Model B or Raspberry Pi 5 board. With minor changes, you can follow these instructions on other Raspberry Pi models with a camera connector, including the Raspberry Pi Zero 2 W and Raspberry Pi 3 Model B+.
+
+First, ensure that your Raspberry Pi runs the latest software. Run the following command to update:
+
+[source,console]
+----
+$ sudo apt update && sudo apt full-upgrade
+----
+
+=== Install the IMX500 firmware
+
+The AI camera must download runtime firmware onto the IMX500 sensor during startup. To install these firmware files onto your Raspberry Pi, run the following command:
+
+[source,console]
+----
+$ sudo apt install imx500-all
+----
+
+This command:
+
+* installs the `/lib/firmware/imx500_loader.fpk` and `/lib/firmware/imx500_firmware.fpk` firmware files required to operate the IMX500 sensor
+* places a number of neural network model firmware files in `/usr/share/imx500-models/`
+* installs the IMX500 post-processing software stages in `rpicam-apps`
+* installs the Sony network model packaging tools
+
+NOTE: The IMX500 kernel device driver loads all the firmware files when the camera starts. This may take several minutes if the neural network model firmware has not been previously cached. The demos below display a progress bar on the console to indicate firmware loading progress.
+
+=== Reboot
+
+Now that you've installed the prerequisites, restart your Raspberry Pi:
+
+[source,console]
+----
+$ sudo reboot
+----
+
+== Run example applications
+
+Once all the system packages are updated and firmware files installed, we can start running some example applications. As mentioned earlier, the Raspberry Pi AI Camera integrates fully with `libcamera`, `rpicam-apps`, and `Picamera2`.
+
+=== `rpicam-apps`
+
+The xref:../computers/camera_software.adoc#rpicam-apps[`rpicam-apps` camera applications] include IMX500 object detection and pose estimation stages that can be run in the post-processing pipeline. For more information about the post-processing pipeline, see xref:../computers/camera_software.adoc#post-process-file[the post-processing documentation].
+
+The examples on this page use post-processing JSON files located in `/usr/share/rpi-camera-assets/`.
+
+==== Object detection
+
+The MobileNet SSD neural network performs basic object detection, providing bounding boxes and confidence values for each object found. `imx500_mobilenet_ssd.json` contains the configuration parameters for the IMX500 object detection post-processing stage using the MobileNet SSD neural network.
+
+`imx500_mobilenet_ssd.json` declares a post-processing pipeline that contains two stages:
+
+. `imx500_object_detection`, which picks out bounding boxes and confidence values generated by the neural network in the output tensor
+. `object_detect_draw_cv`, which draws bounding boxes and labels on the image
+
+The MobileNet SSD tensor requires no significant post-processing on your Raspberry Pi to generate the final output of bounding boxes. All object detection runs directly on the AI Camera.
+
+The following command runs `rpicam-hello` with object detection post-processing:
+
+[source,console]
+----
+$ rpicam-hello -t 0s --post-process-file /usr/share/rpi-camera-assets/imx500_mobilenet_ssd.json --viewfinder-width 1920 --viewfinder-height 1080 --framerate 30
+----
+
+After running the command, you should see a viewfinder that overlays bounding boxes on objects recognised by the neural network:
+
+image::images/imx500-mobilenet.jpg[IMX500 MobileNet]
+
+To record video with object detection overlays, use `rpicam-vid` instead:
+
+[source,console]
+----
+$ rpicam-vid -t 10s -o output.264 --post-process-file /usr/share/rpi-camera-assets/imx500_mobilenet_ssd.json --width 1920 --height 1080 --framerate 30
+----
+
+You can configure the `imx500_object_detection` stage in many ways.
+
+For example, `max_detections` defines the maximum number of objects that the pipeline will detect at any given time. `threshold` defines the minimum confidence value required for the pipeline to consider any input as an object.
+
+The raw inference output data of this network can be quite noisy, so this stage also preforms some temporal filtering and applies hysteresis. To disable this filtering, remove the `temporal_filter` config block.
+
+==== Pose estimation
+
+The PoseNet neural network performs pose estimation, labelling key points on the body associated with joints and limbs. `imx500_posenet.json` contains the configuration parameters for the IMX500 pose estimation post-processing stage using the PoseNet neural network.
+
+`imx500_posenet.json` declares a post-processing pipeline that contains two stages:
+
+* `imx500_posenet`, which fetches the raw output tensor from the PoseNet neural network
+* `plot_pose_cv`, which draws line overlays on the image
+
+The AI Camera performs basic detection, but the output tensor requires additional post-processing on your host Raspberry Pi to produce final output.
+
+The following command runs `rpicam-hello` with pose estimation post-processing:
+
+[source,console]
+----
+$ rpicam-hello -t 0s --post-process-file /usr/share/rpi-camera-assets/imx500_posenet.json --viewfinder-width 1920 --viewfinder-height 1080 --framerate 30
+----
+
+image::images/imx500-posenet.jpg[IMX500 PoseNet]
+
+You can configure the `imx500_posenet` stage in many ways.
+
+For example, `max_detections` defines the maximum number of bodies that the pipeline will detect at any given time. `threshold` defines the minimum confidence value required for the pipeline to consider input as a body.
+
+=== Picamera2
+
+For examples of image classification, object detection, object segmentation, and pose estimation using Picamera2, see https://github.com/raspberrypi/picamera2/blob/main/examples/imx500/[the `picamera2` GitHub repository].
+
+Most of the examples use OpenCV for some additional processing. To install the dependencies required to run OpenCV, run the following command:
+
+[source,console]
+----
+$ sudo apt install python3-opencv python3-munkres
+----
+
+Now download the https://github.com/raspberrypi/picamera2[the `picamera2` repository] to your Raspberry Pi to run the examples. You'll find example files in the root directory, with additional information in the `README.md` file.
+
+Run the following script from the repository to run YOLOv8 object detection:
+
+[source,console]
+----
+$ python imx500_object_detection_demo.py --model /usr/share/imx500-models/imx500_network_ssd_mobilenetv2_fpnlite_320x320_pp.rpk
+----
+
+To try pose estimation in Picamera2, run the following script from the repository:
+
+[source,console]
+----
+$ python imx500_pose_estimation_higherhrnet_demo.py
+----
diff --git a/documentation/asciidoc/accessories/ai-camera/images/ai-camera.png b/documentation/asciidoc/accessories/ai-camera/images/ai-camera.png
new file mode 100644
index 000000000..a0186287c
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-camera/images/ai-camera.png differ
diff --git a/documentation/asciidoc/accessories/ai-camera/images/imx500-block-diagram.svg b/documentation/asciidoc/accessories/ai-camera/images/imx500-block-diagram.svg
new file mode 100644
index 000000000..142854adb
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-camera/images/imx500-block-diagram.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/documentation/asciidoc/accessories/ai-camera/images/imx500-comparison.svg b/documentation/asciidoc/accessories/ai-camera/images/imx500-comparison.svg
new file mode 100644
index 000000000..5355ecb23
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-camera/images/imx500-comparison.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/documentation/asciidoc/accessories/ai-camera/images/imx500-mobilenet.jpg b/documentation/asciidoc/accessories/ai-camera/images/imx500-mobilenet.jpg
new file mode 100644
index 000000000..871f7b9eb
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-camera/images/imx500-mobilenet.jpg differ
diff --git a/documentation/asciidoc/accessories/ai-camera/images/imx500-posenet.jpg b/documentation/asciidoc/accessories/ai-camera/images/imx500-posenet.jpg
new file mode 100644
index 000000000..0c145d748
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-camera/images/imx500-posenet.jpg differ
diff --git a/documentation/asciidoc/accessories/ai-camera/model-conversion.adoc b/documentation/asciidoc/accessories/ai-camera/model-conversion.adoc
new file mode 100644
index 000000000..f449476c6
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-camera/model-conversion.adoc
@@ -0,0 +1,106 @@
+== Model deployment
+
+To deploy a new neural network model to the Raspberry Pi AI Camera, complete the following steps:
+
+. Provide a floating-point neural network model (PyTorch or TensorFlow).
+. Run the model through Edge-MDT (Edge AI Model Development Toolkit).
+.. *Quantise* and compress the model so that it can run using the resources available on the IMX500 camera module.
+.. *Convert* the compressed model to IMX500 format.
+. Package the model into a firmware file that can be loaded at runtime onto the camera.
+
+The first two steps will normally be performed on a more powerful computer such as a desktop or server. You must run the final packaging step on a Raspberry Pi.
+
+=== Model creation
+
+The creation of neural network models is beyond the scope of this guide. Existing models can be re-used, or new ones created using popular AI frameworks like TensorFlow or PyTorch.
+
+For more information, see the official https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera[AITRIOS developer website].
+
+=== Model compression and conversion
+
+==== Edge-MDT installation
+
+The Edge-MDT (Model Development Toolkit) software package installs all the tools required to quantise, compress, and convert models to run on your IMX500 device.
+
+The Edge-MDT package takes a parameter to select between installing the PyTorch or TensorFlow version of the tools.
+
+[tabs]
+======
+PyTorch::
++
+[source,console]
+----
+$ pip install edge-mdt[pt]
+----
+
+TensorFlow::
++
+[source,console]
+----
+$ pip install edge-mdt[tf]
+----
++
+TIP: Always use the same version of TensorFlow you used to compress your model.
+======
+
+If you need to install both packages, use two separate Python virtual environments. This prevents TensorFlow and PyTorch from causing conflicts with each other.
+
+==== Model Optimization
+
+Models are quantised and compressed using Sony's Model Compression Toolkit (MCT). This tool is automatically installed as part of the Edge-MDT installation step. For more information, see the https://github.com/sony/model_optimization[Sony model optimization GitHub repository].
+
+The Model Compression Toolkit generates a quantised model in the following formats:
+
+* Keras (TensorFlow)
+* ONNX (PyTorch)
+
+=== Conversion
+
+The converter is a command line application that compiles the quantised model (in .onnx or .keras formats) into a binary file that can be packaged and loaded onto the AI Camera. This tool is automatically installed as part of the Edge-MDT installation step.
+
+To convert a model model:
+
+[tabs]
+======
+PyTorch::
++
+[source,console]
+----
+$ imxconv-pt -i -o
+----
+
+TensorFlow::
++
+[source,console]
+----
+$ imxconv-tf -i -o
+----
+======
+
+IMPORTANT: For optimal use of the memory available to the accelerator on the IMX500 sensor, add `--no-input-persistency` to the above commands. However, this will disable input tensor generation that may be used for debugging purposes.
+
+Both commands create an output folder that contains a memory usage report and a `packerOut.zip` file.
+
+For more information on the model conversion process, see the official https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter[Sony IMX500 Converter documentation].
+
+=== Packaging
+
+IMPORTANT: You must run this step on a Raspberry Pi.
+
+The final step packages the model into an RPK file. When running the neural network model, we'll upload this file to the AI Camera. Before proceeding, run the following command to install the necessary tools:
+
+[source,console]
+----
+$ sudo apt install imx500-tools
+----
+
+To package the model into an RPK file, run the following command:
+
+[source,console]
+----
+$ imx500-package -i -o
+----
+
+This command should create a file named `network.rpk` in the output folder. You'll pass the name of this file to your IMX500 camera applications.
+
+For a more comprehensive set of instructions and further specifics on the tools used, see the https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-packager[Sony IMX500 Packager documentation].
diff --git a/documentation/asciidoc/accessories/ai-hat-plus.adoc b/documentation/asciidoc/accessories/ai-hat-plus.adoc
new file mode 100644
index 000000000..dc6a3a7cf
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-hat-plus.adoc
@@ -0,0 +1,5 @@
+include::ai-hat-plus/about.adoc[]
+
+== Product brief
+
+For more information about the AI HAT+, including mechanical specifications and operating environment limitations, see the https://datasheets.raspberrypi.com/ai-hat-plus/raspberry-pi-ai-hat-plus-product-brief.pdf[product brief].
diff --git a/documentation/asciidoc/accessories/ai-hat-plus/about.adoc b/documentation/asciidoc/accessories/ai-hat-plus/about.adoc
new file mode 100644
index 000000000..98f1923bf
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-hat-plus/about.adoc
@@ -0,0 +1,75 @@
+[[ai-hat-plus]]
+== About
+
+.The 26 tera-operations per second (TOPS) Raspberry Pi AI HAT+
+image::images/ai-hat-plus-hero.jpg[width="80%"]
+
+The Raspberry Pi AI HAT+ add-on board has a built-in Hailo AI accelerator compatible with
+Raspberry Pi 5. The NPU in the AI HAT+ can be used for applications including process control, security, home automation, and robotics.
+
+The AI HAT+ is available in 13 and 26 tera-operations per second (TOPS) variants, built around the Hailo-8L and Hailo-8 neural network inference accelerators. The 13 TOPS variant works best with moderate workloads, with performance similar to the xref:ai-kit.adoc[AI Kit]. The 26 TOPS variant can run larger networks, can run networks faster, and can more effectively run multiple networks simultaneously.
+
+The AI HAT+ communicates using Raspberry Pi 5’s PCIe interface. The host Raspberry Pi 5 automatically detects the on-board Hailo accelerator and uses the NPU for supported AI computing tasks. Raspberry Pi OS's built-in `rpicam-apps` camera applications automatically use the NPU to run compatible post-processing tasks.
+
+[[ai-hat-plus-installation]]
+== Install
+
+To use the AI HAT+, you will need:
+
+* a Raspberry Pi 5
+
+Each AI HAT+ comes with a ribbon cable, GPIO stacking header, and mounting hardware. Complete the following instructions to install your AI HAT+:
+
+. First, ensure that your Raspberry Pi runs the latest software. Run the following command to update:
++
+[source,console]
+----
+$ sudo apt update && sudo apt full-upgrade
+----
+
+. Next, xref:../computers/raspberry-pi.adoc#update-the-bootloader-configuration[ensure that your Raspberry Pi firmware is up-to-date]. Run the following command to see what firmware you're running:
++
+[source,console]
+----
+$ sudo rpi-eeprom-update
+----
++
+If you see 6 December 2023 or a later date, proceed to the next step. If you see a date earlier than 6 December 2023, run the following command to open the Raspberry Pi Configuration CLI:
++
+[source,console]
+----
+$ sudo raspi-config
+----
++
+Under `Advanced Options` > `Bootloader Version`, choose `Latest`. Then, exit `raspi-config` with `Finish` or the *Escape* key.
++
+Run the following command to update your firmware to the latest version:
++
+[source,console]
+----
+$ sudo rpi-eeprom-update -a
+----
++
+Then, reboot with `sudo reboot`.
+
+. Disconnect the Raspberry Pi from power before beginning installation.
+
+. For the best performance, we recommend using the AI HAT+ with the Raspberry Pi Active Cooler. If you have an Active Cooler, install it before installing the AI HAT+.
++
+--
+image::images/ai-hat-plus-installation-01.png[width="60%"]
+--
+. Install the spacers using four of the provided screws. Firmly press the GPIO stacking header on top of the Raspberry Pi GPIO pins; orientation does not matter as long as all pins fit into place. Disconnect the ribbon cable from the AI HAT+, and insert the other end into the PCIe port of your Raspberry Pi. Lift the ribbon cable holder from both sides, then insert the cable with the copper contact points facing inward, towards the USB ports. With the ribbon cable fully and evenly inserted into the PCIe port, push the cable holder down from both sides to secure the ribbon cable firmly in place.
++
+--
+image::images/ai-hat-plus-installation-02.png[width="60%"]
+--
+. Set the AI HAT+ on top of the spacers, and use the four remaining screws to secure it in place.
+
+. Insert the ribbon cable into the slot on the AI HAT+. Lift the ribbon cable holder from both sides, then insert the cable with the copper contact points facing up. With the ribbon cable fully and evenly inserted into the port, push the cable holder down from both sides to secure the ribbon cable firmly in place.
+
+. Congratulations, you have successfully installed the AI HAT+. Connect your Raspberry Pi to power; Raspberry Pi OS will automatically detect the AI HAT+.
+
+== Get started with AI on your Raspberry Pi
+
+To start running AI accelerated applications on your Raspberry Pi, check out our xref:../computers/ai.adoc[Getting Started with the AI Kit and AI HAT+] guide.
diff --git a/documentation/asciidoc/accessories/ai-hat-plus/images/ai-hat-plus-hero.jpg b/documentation/asciidoc/accessories/ai-hat-plus/images/ai-hat-plus-hero.jpg
new file mode 100644
index 000000000..08064ca25
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-hat-plus/images/ai-hat-plus-hero.jpg differ
diff --git a/documentation/asciidoc/accessories/ai-hat-plus/images/ai-hat-plus-installation-01.png b/documentation/asciidoc/accessories/ai-hat-plus/images/ai-hat-plus-installation-01.png
new file mode 100644
index 000000000..33fb88280
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-hat-plus/images/ai-hat-plus-installation-01.png differ
diff --git a/documentation/asciidoc/accessories/ai-hat-plus/images/ai-hat-plus-installation-02.png b/documentation/asciidoc/accessories/ai-hat-plus/images/ai-hat-plus-installation-02.png
new file mode 100644
index 000000000..b2a60016a
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-hat-plus/images/ai-hat-plus-installation-02.png differ
diff --git a/documentation/asciidoc/accessories/ai-kit.adoc b/documentation/asciidoc/accessories/ai-kit.adoc
new file mode 100644
index 000000000..c5d54d1d4
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-kit.adoc
@@ -0,0 +1,6 @@
+include::ai-kit/about.adoc[]
+
+== Product brief
+
+For more information about the AI Kit, including mechanical specifications and operating environment limitations, see the https://datasheets.raspberrypi.com/ai-kit/raspberry-pi-ai-kit-product-brief.pdf[product brief].
+
diff --git a/documentation/asciidoc/accessories/ai-kit/about.adoc b/documentation/asciidoc/accessories/ai-kit/about.adoc
new file mode 100644
index 000000000..bc93a483f
--- /dev/null
+++ b/documentation/asciidoc/accessories/ai-kit/about.adoc
@@ -0,0 +1,93 @@
+[[ai-kit]]
+== About
+
+.The Raspberry Pi AI Kit
+image::images/ai-kit.jpg[width="80%"]
+
+The Raspberry Pi AI Kit bundles the xref:m2-hat-plus.adoc#m2-hat-plus[Raspberry Pi M.2 HAT+] with a Hailo AI acceleration module for use with Raspberry Pi 5. The kit contains the following:
+
+* Hailo AI module containing a Neural Processing Unit (NPU)
+* Raspberry Pi M.2 HAT+, to connect the AI module to your Raspberry Pi 5
+* thermal pad pre-fitted between the module and the M.2 HAT+
+* mounting hardware kit
+* 16mm stacking GPIO header
+
+== AI module features
+
+* 13 tera-operations per second (TOPS) neural network inference accelerator built around the Hailo-8L chip.
+* M.2 2242 form factor
+
+[[ai-kit-installation]]
+== Install
+
+To use the AI Kit, you will need:
+
+* a Raspberry Pi 5
+
+Each AI Kit comes with a pre-installed AI module, ribbon cable, GPIO stacking header, and mounting hardware. Complete the following instructions to install your AI Kit:
+
+. First, ensure that your Raspberry Pi runs the latest software. Run the following command to update:
++
+[source,console]
+----
+$ sudo apt update && sudo apt full-upgrade
+----
+
+. Next, xref:../computers/raspberry-pi.adoc#update-the-bootloader-configuration[ensure that your Raspberry Pi firmware is up-to-date]. Run the following command to see what firmware you're running:
++
+[source,console]
+----
+$ sudo rpi-eeprom-update
+----
++
+If you see 6 December 2023 or a later date, proceed to the next step. If you see a date earlier than 6 December 2023, run the following command to open the Raspberry Pi Configuration CLI:
++
+[source,console]
+----
+$ sudo raspi-config
+----
++
+Under `Advanced Options` > `Bootloader Version`, choose `Latest`. Then, exit `raspi-config` with `Finish` or the *Escape* key.
++
+Run the following command to update your firmware to the latest version:
++
+[source,console]
+----
+$ sudo rpi-eeprom-update -a
+----
++
+Then, reboot with `sudo reboot`.
+
+. Disconnect the Raspberry Pi from power before beginning installation.
+
+. For the best performance, we recommend using the AI Kit with the Raspberry Pi Active Cooler. If you have an Active Cooler, install it before installing the AI Kit.
++
+--
+image::images/ai-kit-installation-01.png[width="60%"]
+--
+. Install the spacers using four of the provided screws. Firmly press the GPIO stacking header on top of the Raspberry Pi GPIO pins; orientation does not matter as long as all pins fit into place. Disconnect the ribbon cable from the AI Kit, and insert the other end into the PCIe port of your Raspberry Pi. Lift the ribbon cable holder from both sides, then insert the cable with the copper contact points facing inward, towards the USB ports. With the ribbon cable fully and evenly inserted into the PCIe port, push the cable holder down from both sides to secure the ribbon cable firmly in place.
++
+--
+image::images/ai-kit-installation-02.png[width="60%"]
+--
+. Set the AI Kit on top of the spacers, and use the four remaining screws to secure it in place.
++
+--
+image::images/ai-kit-installation-03.png[width="60%"]
+--
+. Insert the ribbon cable into the slot on the AI Kit. Lift the ribbon cable holder from both sides, then insert the cable with the copper contact points facing up. With the ribbon cable fully and evenly inserted into the port, push the cable holder down from both sides to secure the ribbon cable firmly in place.
++
+--
+image::images/ai-kit-installation-04.png[width="60%"]
+--
+. Congratulations, you have successfully installed the AI Kit. Connect your Raspberry Pi to power; Raspberry Pi OS will automatically detect the AI Kit.
++
+--
+image::images/ai-kit-installation-05.png[width="60%"]
+--
+
+WARNING: Always disconnect your Raspberry Pi from power before connecting or disconnecting a device from the M.2 slot.
+
+== Get started with AI on your Raspberry Pi
+
+To start running AI accelerated applications on your Raspberry Pi, check out our xref:../computers/ai.adoc[Getting Started with the AI Kit and AI HAT+] guide.
diff --git a/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-01.png b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-01.png
new file mode 100644
index 000000000..33fb88280
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-01.png differ
diff --git a/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-02.png b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-02.png
new file mode 100644
index 000000000..b2a60016a
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-02.png differ
diff --git a/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-03.png b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-03.png
new file mode 100644
index 000000000..2e821583c
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-03.png differ
diff --git a/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-04.png b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-04.png
new file mode 100644
index 000000000..7bf45e816
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-04.png differ
diff --git a/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-05.png b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-05.png
new file mode 100644
index 000000000..67b0d969a
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-kit/images/ai-kit-installation-05.png differ
diff --git a/documentation/asciidoc/accessories/ai-kit/images/ai-kit.jpg b/documentation/asciidoc/accessories/ai-kit/images/ai-kit.jpg
new file mode 100644
index 000000000..d519b0ff4
Binary files /dev/null and b/documentation/asciidoc/accessories/ai-kit/images/ai-kit.jpg differ
diff --git a/documentation/asciidoc/accessories/audio.adoc b/documentation/asciidoc/accessories/audio.adoc
index 7c4fd154b..87e227f58 100644
--- a/documentation/asciidoc/accessories/audio.adoc
+++ b/documentation/asciidoc/accessories/audio.adoc
@@ -1,4 +1,3 @@
-
include::audio/introduction.adoc[]
include::audio/dac_pro.adoc[]
@@ -16,4 +15,3 @@ include::audio/getting_started.adoc[]
include::audio/hardware-info.adoc[]
include::audio/update-firmware.adoc[]
-
diff --git a/documentation/asciidoc/accessories/audio/codec_zero.adoc b/documentation/asciidoc/accessories/audio/codec_zero.adoc
index 9739adf5f..cfb9dd967 100644
--- a/documentation/asciidoc/accessories/audio/codec_zero.adoc
+++ b/documentation/asciidoc/accessories/audio/codec_zero.adoc
@@ -22,6 +22,7 @@ The Codec Zero includes an EEPROM which can be used for auto-configuration of th
In addition to the green (GPIO23) and red (GPIO24) LEDs, a tactile programmable button (GPIO27) is also provided.
==== Pinouts
+
[cols="1,12"]
|===
| *P1/2* | Support external PHONO/RCA sockets if needed. P1: AUX IN, P2: AUX OUT.
diff --git a/documentation/asciidoc/accessories/audio/configuration.adoc b/documentation/asciidoc/accessories/audio/configuration.adoc
index 97e514690..79a5d2136 100644
--- a/documentation/asciidoc/accessories/audio/configuration.adoc
+++ b/documentation/asciidoc/accessories/audio/configuration.adoc
@@ -6,33 +6,62 @@ image::images/gui.png[]
There are a number of third-party audio software applications available for Raspberry Pi that will support the plug-and-play feature of our audio boards. Often these are used headless. They can be controlled via a PC or Mac application, or by a web server installed on Raspberry Pi, with interaction through a webpage.
-If you need to configure Raspberry Pi OS yourself, perhaps if you're running a headless system of your own and don't have the option of control via the GUI, you will need to make your Raspberry Pi audio board the primary audio device in Raspberry Pi OS, disabling the Raspberry Pi’s on-board audio device. This is done by editing the `/boot/config.txt` file. Using a Terminal session connected to your Raspberry Pi via SSH, run the following command to edit the file:
+If you need to configure Raspberry Pi OS yourself, perhaps if you're running a headless system of your own and don't have the option of control via the GUI, you will need to make your Raspberry Pi audio board the primary audio device in Raspberry Pi OS, disabling the Raspberry Pi's on-board audio device. This is done by editing the xref:../computers/config_txt.adoc#what-is-config-txt[`/boot/firmware/config.txt`] file. Using a Terminal session connected to your Raspberry Pi via SSH, run the following command to edit the file:
+[source,console]
----
-$ sudo nano /boot/config.txt
+$ sudo nano /boot/firmware/config.txt
----
-Find the `dtparam=audio=on` line in the file and comment it out by placing a # symbol at the start of the line. Anything written after the # symbol in any given line will be disregarded by the program. Your ``/boot/config.txt`` file should now have the following entry:
+Find the `dtparam=audio=on` line in the file and comment it out by placing a # symbol at the start of the line. Anything written after the # symbol in any given line will be disregarded by the program. Your `/boot/firmware/config.txt` file should now contain the following entry:
+[source,ini]
----
#dtparam=audio=on
----
-Press CTRL+X, then Y and Enter to save, followed by a reboot of your Raspberry Pi in order for the settings to take effect.
+Press `Ctrl+X`, then the `Y` key, then *Enter* to save. Finally, reboot your Raspberry Pi in order for the settings to take effect.
+[source,console]
----
$ sudo reboot
----
-Alternatively, the `/boot/config.txt` file can be edited directly onto the Raspberry Pi's microSD card, inserted into your usual computer. Using the default file manager, open the `boot` volume on the card and edit the `config.txt` file using an appropriate text editor, then save the file, eject the microSD card and reinsert it back into your Raspberry Pi.
+Alternatively, the `/boot/firmware/config.txt` file can be edited directly onto the Raspberry Pi's microSD card, inserted into your usual computer. Using the default file manager, open the `/boot/firmware/` volume on the card and edit the `config.txt` file using an appropriate text editor, then save the file, eject the microSD card and reinsert it back into your Raspberry Pi.
-=== Attaching the HAT
+=== Attach the HAT
-The Raspberry Pi audio boards attach to the Raspberry Pi’s 40-pin header. They are designed to be supported on the Raspberry Pi using the supplied circuit board standoffs and screws. No soldering is required on the Raspberry Pi audio boards for normal operation unless you are using hardwired connections for specific connectors such as XLR (External Line Return) connections on the DAC Pro.
+The Raspberry Pi audio boards attach to the Raspberry Pi's 40-pin header. They are designed to be supported on the Raspberry Pi using the supplied circuit board standoffs and screws. No soldering is required on the Raspberry Pi audio boards for normal operation unless you are using hardwired connections for specific connectors such as XLR (External Line Return) connections on the DAC Pro.
All the necessary mounting hardware including spacers, screws and connectors is provided. The PCB spacers should be screwed, finger-tight only, to the Raspberry Pi before adding the audio board. The remaining screws should then be screwed into the spacers from above.
-=== Codec Zero configuration
+=== Hardware versions
+
+There are multiple versions of the audio cards. Your specific version determines the actions required for configuration. Older, IQaudIO-branded boards have a black PCB. Newer Raspberry Pi-branded boards have a green PCB. These boards are electrically equivalent, but have different EEPROM contents.
+
+After attaching the HAT and applying power, check that the power LED on your audio card is illuminated, if it has one. For example, the Codec Zero has an LED marked `PWR`.
+
+After establishing the card has power, use the following command to check the version of your board:
+
+[source,console]
+----
+$ grep -a . /proc/device-tree/hat/*
+----
+
+If the vendor string says "Raspberry Pi Ltd." then no further action is needed (but see below for the extra Codec Zero configuration). If it says "IQaudIO Limited www.iqaudio.com" then you will need the additional config.txt settings outlined below. If it says "No such file or directory" then the HAT is not being detected, but these config.txt settings may still make it work.
+
+[source,ini]
+----
+# Some magic to prevent the normal HAT overlay from being loaded
+dtoverlay=
+# And then choose one of the following, according to the model:
+dtoverlay=rpi-codeczero
+dtoverlay=rpi-dacplus
+dtoverlay=rpi-dacpro
+dtoverlay=rpi-digiampplus
+----
+
+=== Extra Codec Zero configuration
The Raspberry Pi Codec Zero board uses the Dialog Semiconductor DA7212 codec. This allows
the recording of audio from the built-in MEMS microphone, from stereo phono sockets (AUX
@@ -43,9 +72,9 @@ Each input and output device has its own mixer, allowing the audio levels and vo
independently. Within the codec itself, other mixers and switches exist to allow the output to be mixed to a single mono channel for single-speaker output. Signals may also be inverted; there is a five-band equaliser to adjust certain frequency bands. These settings can be controlled interactively, using AlsaMixer, or programmatically.
Both the AUX IN and AUX OUT are 1V RMS. It may be necessary to adjust
-the AUX IN’s mixer to ensure that the input signal doesn’t saturate the ADC. Similarly, the output mixers can be to be adjusted to get the best possible output.
+the AUX IN's mixer to ensure that the input signal doesn't saturate the ADC. Similarly, the output mixers can be to be adjusted to get the best possible output.
-Preconfigured scripts (loadable ALSA settings) https://github.com/iqaudio/Pi-Codec[are available on GitHub], offering:
+Preconfigured scripts (loadable ALSA settings) https://github.com/raspberrypi/Pi-Codec[are available on GitHub], offering:
* Mono MEMS mic recording, mono speaker playback
* Mono MEMS mic recording, mono AUX OUT playback
@@ -54,32 +83,52 @@ Preconfigured scripts (loadable ALSA settings) https://github.com/iqaudio/Pi-Cod
The Codec Zero needs to know which of these input and output settings are being used each time the Raspberry Pi powers on. Using a Terminal session on your Raspberry Pi, run the following command to download the scripts:
+[source,console]
----
-$ git clone https://github.com/iqaudio/Pi-Codec.git
+$ git clone https://github.com/raspberrypi/Pi-Codec.git
----
If git is not installed, run the following command to install it:
+[source,console]
----
$ sudo apt install git
----
The following command will set your device to use the on-board MEMS microphone and output for speaker playback:
+[source,console]
----
-$ sudo alsactl restore -f /home/pi/Pi-Codec/IQaudIO_Codec_OnboardMIC_record_and_SPK_playback.state
+$ sudo alsactl restore -f /home//Pi-Codec/Codec_Zero_OnboardMIC_record_and_SPK_playback.state
----
+This command may result in erroneous messages, including the following:
+
+* "failed to import hw"
+* "No state is present for card"
+
+In most cases, these warnings are harmless; you can safely ignore them.
+
+However, the following warnings may indicate a hardware failure:
+
+* "Remote I/O error"
+
+In Linux, the following warnings indicate that the kernel can't communicate with an I2C device:
+
+* "Remote I/O error" (`REMOTEIO`)
+
In order for your project to operate with your required settings when it is powered on, edit the `/etc/rc.local` file. The contents of this file are run at the end of every boot process, so it is ideal for this purpose. Edit the file:
+[source,console]
----
$ sudo nano /etc/rc.local
----
-Add the chosen script command above the exit 0 line and then Ctrl X, Y and Enter to save. The file should now look similar to this depending on your chosen setting:
+Add the chosen script command above the exit 0 line and then *Ctrl X*, *Y* and *Enter* to save. The file should now look similar to this depending on your chosen setting:
+[source,bash]
----
-#!/bin/sh -e
+#!/bin/sh
#
# rc.local
#
@@ -92,21 +141,23 @@ Add the chosen script command above the exit 0 line and then Ctrl X, Y and Enter
#
# By default this script does nothing.
-sudo alsactl restore -f /home/pi/Pi-Codec/IQaudIO_Codec_OnboardMIC_record_and_SPK_playback.state
+sudo alsactl restore -f /home//Pi-Codec/Codec_Zero_OnboardMIC_record_and_SPK_playback.state
exit 0
----
-Ctrl X, Y and Enter to save and reboot your device for the settings to take effect:
+Press `Ctrl+X`, then the `Y` key, then *Enter* to save. Reboot for the settings to take effect:
+[source,console]
----
$ sudo reboot
----
If you are using your Raspberry Pi and Codec Zero in a headless environment, there is one final step required to make the Codec Zero the default audio device without access to the GUI audio settings on the desktop. We need to create a small file in your home folder:
+[source,console]
----
-$ sudo nano ./asoundrc
+$ sudo nano .asoundrc
----
Add the following to the file:
@@ -118,29 +169,60 @@ pcm.!default {
}
----
-Ctrl X, Y and Enter to save, and reboot once more to complete the configuration:
+Press `Ctrl+X`, then the `Y` key, then *Enter* to save. Reboot once more to complete the configuration:
+
+Modern Linux distributions such as Raspberry Pi OS typically use PulseAudio or PipeWire for audio control. These frameworks are capable of mixing and switching audio from multiple sources. They provide a high-level API for audio applications to use. Many audio apps use these frameworks by default.
+
+Only create `~/.asoundrc` if an audio application needs to:
+* communicate directly with ALSA
+* run in an environment where PulseAudio or PipeWire are not present
+
+This file can interfere with the UI's view of underlying audio resources. As a result, we do not recommend creating `~/.asoundrc` when running the Raspberry Pi OS desktop.
+The UI may automatically clean up and remove this file if it exists.
+
+[source,console]
----
$ sudo reboot
----
-=== Muting and unmuting the DigiAMP{plus}
+=== Mute and unmute the DigiAMP{plus}
The DigiAMP{plus} mute state is toggled by GPIO22 on Raspberry Pi. The latest audio device tree
supports the unmute of the DigiAMP{plus} through additional parameters.
Firstly a "one-shot" unmute when kernel module loads.
+For Raspberry Pi boards:
+
+[source,ini]
+----
+dtoverlay=rpi-digiampplus,unmute_amp
+----
+
+For IQaudIO boards:
+
+[source,ini]
----
-dtoverlay=iqaudio-dacplus,unmute_amp
+dtoverlay=iqaudio-digiampplus,unmute_amp
----
Unmute the amp when an ALSA device is opened by a client. Mute, with a five-second delay
when the ALSA device is closed. (Reopening the device within the five-second close
window will cancel mute.)
+For Raspberry Pi boards:
+
+[source,ini]
+----
+dtoverlay=rpi-digiampplus,auto_mute_amp
+----
+
+For IQaudIO boards:
+
+[source,ini]
----
-dtoverlay=iqaudio-dacplus,auto_mute_amp
+dtoverlay=iqaudio-digiampplus,auto_mute_amp
----
If you do not want to control the mute state through the device tree, you can also script your own
@@ -148,14 +230,16 @@ solution.
The amp will start up muted. To unmute the amp:
+[source,console]
----
$ sudo sh -c "echo 22 > /sys/class/gpio/export"
$ sudo sh -c "echo out >/sys/class/gpio/gpio22/direction"
$ sudo sh -c "echo 1 >/sys/class/gpio/gpio22/value"
----
-to mute the amp once more:
+To mute the amp once more:
+[source,console]
----
$ sudo sh -c "echo 0 >/sys/class/gpio/gpio22/value"
----
diff --git a/documentation/asciidoc/accessories/audio/dac_plus.adoc b/documentation/asciidoc/accessories/audio/dac_plus.adoc
index 1d4324f10..dbef84b71 100644
--- a/documentation/asciidoc/accessories/audio/dac_plus.adoc
+++ b/documentation/asciidoc/accessories/audio/dac_plus.adoc
@@ -7,6 +7,7 @@ image::images/DAC+_Board_Diagram.jpg[width="80%"]
A Texas Instruments PCM5122 is used in the DAC{plus} to deliver analogue audio to the phono connectors of the device. It also supports a dedicated headphone amplifier and is powered via the Raspberry Pi through the GPIO header.
==== Pinouts
+
[cols="1,12"]
|===
| *P1* | Analogue out (0-2V RMS), carries GPIO27, MUTE signal (headphone detect), left and right
diff --git a/documentation/asciidoc/accessories/audio/dac_pro.adoc b/documentation/asciidoc/accessories/audio/dac_pro.adoc
index de360f443..2e8c444a5 100644
--- a/documentation/asciidoc/accessories/audio/dac_pro.adoc
+++ b/documentation/asciidoc/accessories/audio/dac_pro.adoc
@@ -11,6 +11,7 @@ dedicated headphone amplifier. The DAC Pro is powered by a Raspberry Pi through
As part of the DAC Pro, two three-pin headers (P7/P9) are exposed above the Raspberry Pi's USB and Ethernet ports for use by the optional XLR board, allowing differential/balanced output.
==== Pinouts
+
[cols="1,12"]
|===
| *P1* | Analogue out (0-2V RMS), carries GPIO27, MUTE signal (headphone detect), left and right
@@ -22,8 +23,8 @@ audio and left and right ground.
==== Optional XLR Board
-The Pi-DAC PRO exposes a 6 pin header used by the optional XLR board to provide Differential / Balanced output exposed by XLR sockets above the Pi’s USB/Ethernet ports.
+The Pi-DAC PRO exposes a 6 pin header used by the optional XLR board to provide Differential / Balanced output exposed by XLR sockets above the Pi's USB/Ethernet ports.
image::images/optional_xlr_board.jpg[width="80%"]
-An XLR connector is used in Studio and some hi-end hifi systems. It can also be used to drive ACTIVE “monitor” speakers as used at discos or on stage.
+An XLR connector is used in Studio and some hi-end hifi systems. It can also be used to drive ACTIVE "monitor" speakers as used at discos or on stage.
diff --git a/documentation/asciidoc/accessories/audio/digiamp_plus.adoc b/documentation/asciidoc/accessories/audio/digiamp_plus.adoc
index a2d816e9f..51347778e 100644
--- a/documentation/asciidoc/accessories/audio/digiamp_plus.adoc
+++ b/documentation/asciidoc/accessories/audio/digiamp_plus.adoc
@@ -6,7 +6,7 @@ DigiAMP{plus} uses the Texas Instruments TAS5756M PowerDAC and must be powered f
image::images/DigiAMP+_Board_Diagram.jpg[width="80%"]
-DigiAMP{plus}’s power in barrel connector is 5.5mm x 2.5mm.
+DigiAMP{plus}'s power in barrel connector is 5.5mm × 2.5mm.
At power-on, the amplifier is muted by default (the mute LED is illuminated). Software is responsible for the mute state and LED control (Raspberry Pi GPIO22).
diff --git a/documentation/asciidoc/accessories/audio/getting_started.adoc b/documentation/asciidoc/accessories/audio/getting_started.adoc
index 80099992a..7efbd7f9a 100644
--- a/documentation/asciidoc/accessories/audio/getting_started.adoc
+++ b/documentation/asciidoc/accessories/audio/getting_started.adoc
@@ -1,6 +1,6 @@
== Getting started
-=== Creating a toy chatter box
+=== Create a toy chatter box
As an example of what Raspberry Pi Audio Boards can do, let's walk through the creation of a toy chatter box. Its on-board microphone, programmable button and speaker driver make the Codec Zero an ideal choice for this application.
@@ -16,20 +16,24 @@ image::images/Chatterbox_Labels.png[width="80%"]
Use a small flat-head screwdriver to attach your speaker to the screw terminals. For the additional push button, solder the button wires directly to the Codec Zero pads as indicated, using GPIO pin 27 and Ground for the switch, and +3.3V and Ground for the LED, if necessary.
-=== Setting up your Raspberry Pi
+=== Set up your Raspberry Pi
-In this example, we are using Raspberry Pi OS Lite. Our guides on https://www.raspberrypi.com/documentation/computers/getting-started.html#installing-the-operating-system[Getting started] cover this topic in great detail. Make sure that you update your operating system before proceeding and follow the instructions provided for Codec Zero configuration, including the commands to enable the on-board microphone and speaker output.
+In this example, we are using Raspberry Pi OS Lite. Refer to our guide on xref:../computers/getting-started.adoc#installing-the-operating-system[installing Raspberry Pi OS] for more details.
-=== Programming your Raspberry Pi
+Make sure that you update your operating system before proceeding and follow the instructions provided for Codec Zero configuration, including the commands to enable the on-board microphone and speaker output.
+
+=== Program your Raspberry Pi
Open a shell — for instance by connecting via SSH — on your Raspberry Pi and run the following to create our Python script:
+[source,console]
----
$ sudo nano chatter_box.py
----
-Adding the following to the file:
+Add the following to the file, replacing `` with your username:
+[source,python]
----
#!/usr/bin/env python3
from gpiozero import Button
@@ -46,18 +50,18 @@ print(f"{date}")
# Make sure that the 'sounds' folder exists, and if it does not, create it
-path = '/home/pi/sounds'
+path = '/home//sounds'
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path)
print("The new directory is created!")
- os.system('chmod 777 -R /home/pi/sounds')
+ os.system('chmod 777 -R /home//sounds')
# Download a 'burp' sound if it does not already exist
-burp = '/home/pi/burp.wav'
+burp = '/home//burp.wav'
isExist = os.path.exists(burp)
if not isExist:
@@ -79,18 +83,18 @@ def released():
print("Released at %s after %.2f seconds" % (release_time, pressed_for))
if pressed_for < button.hold_time:
print("This is a short press")
- randomfile = random.choice(os.listdir("/home/pi/sounds/"))
- file = '/home/pi/sounds/' + randomfile
+ randomfile = random.choice(os.listdir("/home//sounds/"))
+ file = '/home//sounds/' + randomfile
os.system('aplay ' + file)
elif pressed_for > 20:
os.system('aplay ' + burp)
print("Erasing all recorded sounds")
- os.system('rm /home/pi/sounds/*');
+ os.system('rm /home//sounds/*');
def held():
print("This is a long press")
os.system('aplay ' + burp)
- os.system('arecord --format S16_LE --duration=5 --rate 48000 -c2 /home/pi/sounds/$(date +"%d_%m_%Y-%H_%M_%S")_voice.m4a');
+ os.system('arecord --format S16_LE --duration=5 --rate 48000 -c2 /home//sounds/$(date +"%d_%m_%Y-%H_%M_%S")_voice.m4a');
button.when_pressed = pressed
button.when_released = released
@@ -100,31 +104,33 @@ pause()
----
-Ctrl X, Y and Enter to save. To make the script executable, type the following:
+Press `Ctrl+X`, then the `Y` key, then *Enter* to save. To make the script executable, type the following:
+[source,console]
----
$ sudo chmod +x chatter_box.py
----
-Enter the following to create a crontab daemon that will automatically start the script each time the device is powered on:
+Next, we need to create a crontab daemon that will automatically start the script each time the device is powered on. Run the following command to open your crontab for editing:
+[source,console]
----
$ crontab -e
----
-You will be asked to select an editor; we recommend you use `nano`. Select it by entering the corresponding number, and press Enter to continue. The following line should be added to the bottom of the file:
+You will be asked to select an editor; we recommend you use `nano`. Select it by entering the corresponding number, and press Enter to continue. The following line should be added to the bottom of the file, replacing `` with your username:
----
-@reboot python /home/pi/chatter_box.py
+@reboot python /home//chatter_box.py
----
-Ctrl X, Y and Enter to save, then reboot your device.
+Press *Ctrl X*, then *Y*, then *Enter* to save, then reboot your device with `sudo reboot`.
-=== Operating your device
+=== Use the toy chatter box
The final step is to ensure that everything is operating as expected. Press the button and release it when you hear the burp. The recording will now begin for a period of five seconds. Once you have released the button, press it briefly again to hear the recording. Repeat this process as many times as you wish, and your sounds will be played at random. You can delete all recordings by pressing and holding the button, keeping the button pressed during the first burp and recording process, and releasing it after at least 20 seconds, at which point you will hear another burp sound confirming that the recordings have been deleted.
-video::BjXERzu8nS0[youtube]
+video::BjXERzu8nS0[youtube,width=80%,height=400px]
=== Next steps
diff --git a/documentation/asciidoc/accessories/audio/hardware-info.adoc b/documentation/asciidoc/accessories/audio/hardware-info.adoc
index 20d9da89a..240c9fd0f 100644
--- a/documentation/asciidoc/accessories/audio/hardware-info.adoc
+++ b/documentation/asciidoc/accessories/audio/hardware-info.adoc
@@ -39,7 +39,7 @@ If appropriate then the following are also used:
=== DAC PRO, DAC{plus}, DigiAMP{plus}, Codec Zero
-image::images/pin_table_new.jpg[width="80%"]
+image::images/all_audio_boards_gpio_pinouts.png[width="80%"]
The DAC PRO, DAC{plus} and DigiAMP{plus} re-expose the Raspberry Pi signals, allowing additional sensors and peripherals
to be added easily. Please note that some signals are for exclusive use (I2S and EEPROM) by some
@@ -52,37 +52,30 @@ image::images/pin_out_new.jpg[width="80%"]
To store the AlsaMixer settings, add the following at the command line:
+[source,console]
----
$ sudo alsactl store
----
You can save the current state to a file, then reload that state at startup.
-To save:
+To save, run the following command, replacing `` with your username:
+[source,console]
----
-$ sudo alsactl store -f /home/pi/usecase.state
+$ sudo alsactl store -f /home//usecase.state
----
-To restore a saved file:
+To restore a saved file, run the following command, replacing `` with your username:
+[source,console]
----
-$ sudo alsactl restore -f /home/pi/usecase.state
-----
-
-=== Using external USB devices
-
-If you want to enable 1.2 amp USB support (to allow USB hard disks to power up when
-accessory boards are in use), you may also want to add the following line to your `/boot/config.txt`
-file:
-
-----
-max_usb_current=1
+$ sudo alsactl restore -f /home//usecase.state
----
=== MPD-based audio with volume control
-To allow Music Player Daemon (MPD)-based audio software to control the audio board’s built in volume, the file
+To allow Music Player Daemon (MPD)-based audio software to control the audio board's built in volume, the file
`/etc/mpd.conf` may need to be changed to support the correct AlsaMixer name.
This can be achieved by ensuring the 'Audio output' section of `/etc/mpd.conf` has the 'mixer_control'
diff --git a/documentation/asciidoc/accessories/audio/images/Chatter_Box.jpg b/documentation/asciidoc/accessories/audio/images/Chatter_Box.jpg
index 7d7bfb0e0..b09c69521 100644
Binary files a/documentation/asciidoc/accessories/audio/images/Chatter_Box.jpg and b/documentation/asciidoc/accessories/audio/images/Chatter_Box.jpg differ
diff --git a/documentation/asciidoc/accessories/audio/images/Chatterbox_Labels.png b/documentation/asciidoc/accessories/audio/images/Chatterbox_Labels.png
index 7f54c5b97..379df111f 100644
Binary files a/documentation/asciidoc/accessories/audio/images/Chatterbox_Labels.png and b/documentation/asciidoc/accessories/audio/images/Chatterbox_Labels.png differ
diff --git a/documentation/asciidoc/accessories/audio/images/Codec_Zero_Board_Diagram.png b/documentation/asciidoc/accessories/audio/images/Codec_Zero_Board_Diagram.png
index 441453078..4e02bdedb 100644
Binary files a/documentation/asciidoc/accessories/audio/images/Codec_Zero_Board_Diagram.png and b/documentation/asciidoc/accessories/audio/images/Codec_Zero_Board_Diagram.png differ
diff --git a/documentation/asciidoc/accessories/audio/images/DAC+_Board_Diagram.png b/documentation/asciidoc/accessories/audio/images/DAC+_Board_Diagram.png
index afa6ed1d6..7a68f02c4 100644
Binary files a/documentation/asciidoc/accessories/audio/images/DAC+_Board_Diagram.png and b/documentation/asciidoc/accessories/audio/images/DAC+_Board_Diagram.png differ
diff --git a/documentation/asciidoc/accessories/audio/images/DAC_Pro_Board_Diagram.png b/documentation/asciidoc/accessories/audio/images/DAC_Pro_Board_Diagram.png
index 9cab3ed31..033ed5e1b 100644
Binary files a/documentation/asciidoc/accessories/audio/images/DAC_Pro_Board_Diagram.png and b/documentation/asciidoc/accessories/audio/images/DAC_Pro_Board_Diagram.png differ
diff --git a/documentation/asciidoc/accessories/audio/images/DigiAMP+_Board_Diagram.png b/documentation/asciidoc/accessories/audio/images/DigiAMP+_Board_Diagram.png
index 7c6411100..e4f2b336b 100644
Binary files a/documentation/asciidoc/accessories/audio/images/DigiAMP+_Board_Diagram.png and b/documentation/asciidoc/accessories/audio/images/DigiAMP+_Board_Diagram.png differ
diff --git a/documentation/asciidoc/accessories/audio/images/all_audio_boards_gpio_pinouts.png b/documentation/asciidoc/accessories/audio/images/all_audio_boards_gpio_pinouts.png
new file mode 100644
index 000000000..48783e9cd
Binary files /dev/null and b/documentation/asciidoc/accessories/audio/images/all_audio_boards_gpio_pinouts.png differ
diff --git a/documentation/asciidoc/accessories/audio/images/dac_plus.png b/documentation/asciidoc/accessories/audio/images/dac_plus.png
index 6c3ad6455..61154d683 100644
Binary files a/documentation/asciidoc/accessories/audio/images/dac_plus.png and b/documentation/asciidoc/accessories/audio/images/dac_plus.png differ
diff --git a/documentation/asciidoc/accessories/audio/images/gui.png b/documentation/asciidoc/accessories/audio/images/gui.png
old mode 100644
new mode 100755
index bbc51e407..61e97df72
Binary files a/documentation/asciidoc/accessories/audio/images/gui.png and b/documentation/asciidoc/accessories/audio/images/gui.png differ
diff --git a/documentation/asciidoc/accessories/audio/images/optional_xlr_board.jpg b/documentation/asciidoc/accessories/audio/images/optional_xlr_board.jpg
index 7e6e85d4e..526b1c2d5 100644
Binary files a/documentation/asciidoc/accessories/audio/images/optional_xlr_board.jpg and b/documentation/asciidoc/accessories/audio/images/optional_xlr_board.jpg differ
diff --git a/documentation/asciidoc/accessories/audio/images/pin_table_new.jpg b/documentation/asciidoc/accessories/audio/images/pin_table_new.jpg
deleted file mode 100644
index b9ca1a8bc..000000000
Binary files a/documentation/asciidoc/accessories/audio/images/pin_table_new.jpg and /dev/null differ
diff --git a/documentation/asciidoc/accessories/audio/images/wiring.jpg b/documentation/asciidoc/accessories/audio/images/wiring.jpg
index 5481ce90c..3a22c834b 100644
Binary files a/documentation/asciidoc/accessories/audio/images/wiring.jpg and b/documentation/asciidoc/accessories/audio/images/wiring.jpg differ
diff --git a/documentation/asciidoc/accessories/audio/images/write_protect_tabs.jpg b/documentation/asciidoc/accessories/audio/images/write_protect_tabs.jpg
index 5da7d0723..91e2f65f1 100644
Binary files a/documentation/asciidoc/accessories/audio/images/write_protect_tabs.jpg and b/documentation/asciidoc/accessories/audio/images/write_protect_tabs.jpg differ
diff --git a/documentation/asciidoc/accessories/audio/introduction.adoc b/documentation/asciidoc/accessories/audio/introduction.adoc
index 935b10087..01abb4690 100644
--- a/documentation/asciidoc/accessories/audio/introduction.adoc
+++ b/documentation/asciidoc/accessories/audio/introduction.adoc
@@ -5,6 +5,7 @@ Raspberry Pi Audio Boards bring high quality audio to your existing hi-fi or Ras
Each board has a specific purpose and set of features. The highest audio quality playback is available from our DAC PRO, DAC{plus} and DigiAMP{plus} boards, which support up to full HD audio (192kHz); while the Codec Zero supports up to HD audio (96kHz) and includes a built-in microphone, making it ideal for compact projects.
=== Features at a glance
+
[cols="2,1,1,1,1,1,1,1,1,1"]
|===
| | *Line out* | *Balanced out* | *Stereo speakers* | *Mono speaker* | *Headphones* | *Aux in* | *Aux out* | *Ext mic* | *Built-in mic*
diff --git a/documentation/asciidoc/accessories/audio/update-firmware.adoc b/documentation/asciidoc/accessories/audio/update-firmware.adoc
index 46e9bf61d..d5a16fdb9 100644
--- a/documentation/asciidoc/accessories/audio/update-firmware.adoc
+++ b/documentation/asciidoc/accessories/audio/update-firmware.adoc
@@ -2,7 +2,7 @@
Raspberry Pi Audio Boards use an EEPROM that contains information that is used by the host Raspberry Pi device to select the appropriate driver at boot time. This information is programmed into the EEPROM during manufacture. There are some circumstances where the end user may wish to update the EEPROM contents: this can be done from the command line.
-IMPORTANT: Before proceeding, you should update the Raspberry Pi OS running on your Raspberry Pi to the latest version.
+IMPORTANT: Before proceeding, update the version of Raspberry Pi OS running on your Raspberry Pi to the latest version.
=== The EEPROM write-protect link
@@ -12,29 +12,30 @@ image::images/write_protect_tabs.jpg[width="80%"]
NOTE: In some cases the two pads may already have a 0Ω resistor fitted to bridge the write-protect link, as illustrated in the picture of the Codec Zero board above.
-=== EEPROM Programming
+=== Program the EEPROM
Once the write-protect line has been pulled down, the EEPROM can be programmed.
-You should first install the utilites and then run the programmer. Open up a terminal window and type the following:
+You should first install the utilities and then run the programmer. Open up a terminal window and type the following:
+[source,console]
----
$ sudo apt update
$ sudo apt install rpi-audio-utils
$ sudo rpi-audio-flash
----
-After starting you will be presented with a warning screen.
+After starting, you will see a warning screen.
image::images/firmware-update/warning.png[]
-Selecting "Yes" to proceed will present you with a menu allowing you to select your hardware.
+Select "Yes" to proceed. You should see a menu where you can select your hardware.
image::images/firmware-update/select.png[]
NOTE: If no HAT is present, or if the connected HAT is not a Raspberry Pi Audio board, you will be presented with an error screen. If the firmware has already been updated on the board, a message will be displayed informing you that you do not have to continue.
-After selecting the correct hardware a screen will display while the new firmware is flashed to the HAT.
+After selecting the hardware, a screen will display while the new firmware is flashed to the HAT.
image::images/firmware-update/flashing.png[]
@@ -42,5 +43,5 @@ Afterwards a screen will display telling you that the new firmware has installed
image::images/firmware-update/flashed.png[]
-NOTE: If the firmware fails to install correctly, an error screen will be displayed. In the first instance you should remove and reseat the HAT board and try flashing the firmware again.
+NOTE: If the firmware fails to install correctly, you will see an error screen. Try removing and reseating the HAT, then flash the firmware again.
diff --git a/documentation/asciidoc/accessories/build-hat.adoc b/documentation/asciidoc/accessories/build-hat.adoc
index fcfc20065..472c939c4 100644
--- a/documentation/asciidoc/accessories/build-hat.adoc
+++ b/documentation/asciidoc/accessories/build-hat.adoc
@@ -29,4 +29,3 @@ include::build-hat/links-to-other.adoc[]
include::build-hat/compat.adoc[]
include::build-hat/mech.adoc[]
-
diff --git a/documentation/asciidoc/accessories/build-hat/images/blinking-light.gif b/documentation/asciidoc/accessories/build-hat/images/blinking-light.gif
deleted file mode 100644
index 401912503..000000000
Binary files a/documentation/asciidoc/accessories/build-hat/images/blinking-light.gif and /dev/null differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/blinking-light.webm b/documentation/asciidoc/accessories/build-hat/images/blinking-light.webm
new file mode 100644
index 000000000..12ecb8a3b
Binary files /dev/null and b/documentation/asciidoc/accessories/build-hat/images/blinking-light.webm differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/connect-motor.gif b/documentation/asciidoc/accessories/build-hat/images/connect-motor.gif
deleted file mode 100644
index 197a87cc8..000000000
Binary files a/documentation/asciidoc/accessories/build-hat/images/connect-motor.gif and /dev/null differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/connect-motor.webm b/documentation/asciidoc/accessories/build-hat/images/connect-motor.webm
new file mode 100644
index 000000000..70da88129
Binary files /dev/null and b/documentation/asciidoc/accessories/build-hat/images/connect-motor.webm differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/fitting-build-hat.gif b/documentation/asciidoc/accessories/build-hat/images/fitting-build-hat.gif
deleted file mode 100644
index f1cac0bf3..000000000
Binary files a/documentation/asciidoc/accessories/build-hat/images/fitting-build-hat.gif and /dev/null differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/fitting-build-hat.webm b/documentation/asciidoc/accessories/build-hat/images/fitting-build-hat.webm
new file mode 100644
index 000000000..8d64b6817
Binary files /dev/null and b/documentation/asciidoc/accessories/build-hat/images/fitting-build-hat.webm differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/powering-build-hat.gif b/documentation/asciidoc/accessories/build-hat/images/powering-build-hat.gif
deleted file mode 100644
index e065f39b2..000000000
Binary files a/documentation/asciidoc/accessories/build-hat/images/powering-build-hat.gif and /dev/null differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/powering-build-hat.webm b/documentation/asciidoc/accessories/build-hat/images/powering-build-hat.webm
new file mode 100644
index 000000000..e358683f9
Binary files /dev/null and b/documentation/asciidoc/accessories/build-hat/images/powering-build-hat.webm differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/raspi-config-2.png b/documentation/asciidoc/accessories/build-hat/images/raspi-config-2.png
old mode 100644
new mode 100755
index 8de810a1c..4dfd19cba
Binary files a/documentation/asciidoc/accessories/build-hat/images/raspi-config-2.png and b/documentation/asciidoc/accessories/build-hat/images/raspi-config-2.png differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/setting-up.png b/documentation/asciidoc/accessories/build-hat/images/setting-up.png
old mode 100644
new mode 100755
index b4e2d0399..8964b0e44
Binary files a/documentation/asciidoc/accessories/build-hat/images/setting-up.png and b/documentation/asciidoc/accessories/build-hat/images/setting-up.png differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/tall-headers.png b/documentation/asciidoc/accessories/build-hat/images/tall-headers.png
index 58eff7352..cf89aa68e 100644
Binary files a/documentation/asciidoc/accessories/build-hat/images/tall-headers.png and b/documentation/asciidoc/accessories/build-hat/images/tall-headers.png differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/turning-motor.gif b/documentation/asciidoc/accessories/build-hat/images/turning-motor.gif
deleted file mode 100644
index 71b7b0c06..000000000
Binary files a/documentation/asciidoc/accessories/build-hat/images/turning-motor.gif and /dev/null differ
diff --git a/documentation/asciidoc/accessories/build-hat/images/turning-motor.webm b/documentation/asciidoc/accessories/build-hat/images/turning-motor.webm
new file mode 100644
index 000000000..334b43eae
Binary files /dev/null and b/documentation/asciidoc/accessories/build-hat/images/turning-motor.webm differ
diff --git a/documentation/asciidoc/accessories/build-hat/introduction.adoc b/documentation/asciidoc/accessories/build-hat/introduction.adoc
index ca59abdf1..3ee1e7fd8 100644
--- a/documentation/asciidoc/accessories/build-hat/introduction.adoc
+++ b/documentation/asciidoc/accessories/build-hat/introduction.adoc
@@ -1,4 +1,5 @@
-== Introducing the Build HAT
+[[about-build-hat]]
+== About
The https://raspberrypi.com/products/build-hat[Raspberry Pi Build HAT] is an add-on board that connects to the 40-pin GPIO header of your Raspberry Pi, which was designed in collaboration with LEGO® Education to make it easy to control LEGO® Technic™ motors and sensors with Raspberry Pi computers.
@@ -8,7 +9,7 @@ NOTE: A full list of supported devices can be found in the xref:build-hat.adoc#d
It provides four connectors for LEGO® Technic™ motors and sensors from the SPIKE™ Portfolio. The available sensors include a distance sensor, a colour sensor, and a versatile force sensor. The angular motors come in a range of sizes and include integrated encoders that can be queried to find their position.
-The Build HAT fits all Raspberry Pi computers with a 40-pin GPIO header, including — with the addition of a ribbon cable or other extension device — Raspberry Pi 400. Connected LEGO® Technic™ devices can easily be controlled in Python, alongside standard Raspberry Pi accessories such as a camera module.
+The Build HAT fits all Raspberry Pi computers with a 40-pin GPIO header, including, with the addition of a ribbon cable or other extension device, Keyboard-series devices. Connected LEGO® Technic™ devices can easily be controlled in Python, alongside standard Raspberry Pi accessories such as a camera module.
The Raspberry Pi Build HAT power supply (PSU), which is https://raspberrypi.com/products/build-hat-power-supply[available separately], is designed to power both the Build HAT and Raspberry Pi computer along with all connected LEGO® Technic™ devices.
@@ -16,15 +17,15 @@ image::images/psu.jpg[width="80%"]
The LEGO® Education SPIKE™ Prime Set 45678 and SPIKE™ Prime Expansion Set 45681, available separately from LEGO® Education resellers, include a collection of useful elements supported by the Build HAT.
-NOTE: The HAT works with all 40-pin GPIO Raspberry Pi boards, including Raspberry Pi 4 and Raspberry Pi Zero. With the addition of a ribbon cable or other extension device, it can also be used with Raspberry Pi 400.
+NOTE: The HAT works with all 40-pin GPIO Raspberry Pi boards, including Zero-series devices. With the addition of a ribbon cable or other extension device, it can also be used with Keyboard-series devices.
* Controls up to 4 LEGO® Technic™ motors and sensors included in the SPIKE™ Portfolio
* Easy-to-use https://buildhat.readthedocs.io/[Python library] to control your LEGO® Technic™ devices
* Fits onto any Raspberry Pi computer with a 40-pin GPIO header
-* Onboard xref:../microcontrollers/rp2040.adoc[RP2040] microcontroller manages low-level control of LEGO® Technic™ devices
+* Onboard xref:../microcontrollers/silicon.adoc[RP2040] microcontroller manages low-level control of LEGO® Technic™ devices
* External 8V PSU https://raspberrypi.com/products/build-hat-power-supply[available separately] to power both Build HAT and Raspberry Pi
[NOTE]
====
-The Build HAT can not power the Raspberry Pi 400 as it does not support being powered via the GPIO headers.
+The Build HAT cannot power Keyboard-series devices, since they do not support power supply over the GPIO headers.
====
diff --git a/documentation/asciidoc/accessories/build-hat/net-brick.adoc b/documentation/asciidoc/accessories/build-hat/net-brick.adoc
index 32a14a8c5..f5f42ad8c 100644
--- a/documentation/asciidoc/accessories/build-hat/net-brick.adoc
+++ b/documentation/asciidoc/accessories/build-hat/net-brick.adoc
@@ -1,17 +1,17 @@
-=== Using the Build HAT from .NET
+=== Use the Build HAT from .NET
The Raspberry Pi Built HAT is referred to "Brick" in LEGO® parlance and you can talk directly to it from .NET using the https://datasheets.raspberrypi.com/build-hat/build-hat-serial-protocol.pdf[Build HAT Serial Protocol].
You can create a `brick` object as below,
-[csharp]
+[source,csharp]
----
Brick brick = new("/dev/serial0");
----
but you need to remember to dispose of the `brick` at the end of your code.
-[csharp]
+[source,csharp]
----
brick.Dispose();
----
@@ -20,18 +20,18 @@ WARNING: If you do not call `brick.Dispose()` your program will not terminate.
If you want to avoid calling `brick.Dispose` at the end, then create your brick with the `using` statement:
-[csharp]
+[source,csharp]
----
using Brick brick = new("/dev/serial0");
----
In this case, when reaching the end of the program, your brick will be automatically disposed.
-==== Displaying the information
+==== Display Build HAT information
You can gather the various software versions, the signature, and the input voltage:
-[csharp]
+[source,csharp]
----
var info = brick.BuildHatInformation;
Console.WriteLine($"version: {info.Version}, firmware date: {info.FirmwareDate}, signature:");
@@ -45,7 +45,7 @@ NOTE: The input voltage is read only once at boot time and is not read again aft
The functions `GetSensorType`, `GetSensor` will allow you to retrieve any information on connected sensor.
-[csharp]
+[source,csharp]
----
SensorType sensor = brick.GetSensorType((SensorPort)i);
Console.Write($"Port: {i} {(Brick.IsMotor(sensor) ? "Sensor" : "Motor")} type: {sensor} Connected: ");
@@ -53,7 +53,7 @@ Console.Write($"Port: {i} {(Brick.IsMotor(sensor) ? "Sensor" : "Motor")} type: {
In this example, you can as well use the `IsMotor` static function to check if the connected element is a sensor or a motor.
-[csharp]
+[source,csharp]
----
if (Brick.IsActiveSensor(sensor))
{
@@ -72,9 +72,9 @@ else
Most sensors implements events on their special properties. You can simply subscribe to `PropertyChanged` and `PropertyUpdated`. The changed one will be fired when the value is changing while the updated one when there is a success update to the property. Depending on the modes used, some properties may be updated in the background all the time while some others occasionally.
-You may be interested only when a color is changing or the position of the motor is changing, using it as a tachometer. In this case, the `PropertyChanged` is what you need!
+You may be interested only when a colour is changing or the position of the motor is changing, using it as a tachometer. In this case, the `PropertyChanged` is what you need!
-[csharp]
+[source,csharp]
----
Console.WriteLine("Move motor on Port A to more than position 100 to stop this test.");
brick.WaitForSensorToConnect(SensorPort.PortA);
@@ -102,11 +102,11 @@ void MotorPropertyEvent(object? sender, PropertyChangedEventArgs e)
}
----
-==== Waiting for initialization
+==== Wait for initialization
The brick can take a long time before it initializes. A wait for a sensor to be connected has been implemented.
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortB);
----
diff --git a/documentation/asciidoc/accessories/build-hat/net-installing-software.adoc b/documentation/asciidoc/accessories/build-hat/net-installing-software.adoc
index f0bfe12b3..0c9330e0b 100644
--- a/documentation/asciidoc/accessories/build-hat/net-installing-software.adoc
+++ b/documentation/asciidoc/accessories/build-hat/net-installing-software.adoc
@@ -1,43 +1,43 @@
-== Using the Build HAT from .NET
+== Use the Build HAT from .NET
-=== Installing the .NET Framework
+=== Install the .NET Framework
The .NET framework from Microsoft is not available via `apt` on Raspberry Pi. However, you can follow the https://docs.microsoft.com/en-us/dotnet/iot/deployment[official instructions] from Microsoft to install the .NET framework. Alternatively, there is a simplified https://www.petecodes.co.uk/install-and-use-microsoft-dot-net-5-with-the-raspberry-pi/[third party route] to get the .NET toolchain on to your Raspberry Pi.
WARNING: The installation script is run as `root`. You should read it first and make sure you understand what it is doing. If you are at all unsure you should follow the https://docs.microsoft.com/en-us/dotnet/iot/deployment[official instructions] manually.
-[.bash]
+[source,console]
----
$ wget -O - https://raw.githubusercontent.com/pjgpetecodes/dotnet5pi/master/install.sh | sudo bash
----
After installing the .NET framework you can create your project:
-[.bash]
+[source,console]
----
$ dotnet new console --name buildhat
----
This creates a default program in the `buildhat` subdirectory, and we need to be in that directory in order to continue:
-[.bash]
+[source,console]
----
$ cd buildhat
----
You will now need to install the following nuget packages:
-[.bash]
+
+[source,console]
----
$ dotnet add package System.Device.Gpio --version 2.1.0
$ dotnet add package Iot.Device.Bindings --version 2.1.0
----
-=== Running C# Code
+=== Run C# Code
-You can run the program with the `dotnet run` command. Let's try it now to make sure everything works.
-It should print "Hello World!"
+You can run the program with the `dotnet run` command. Let's try it now to make sure everything works. It should print "Hello World!"
-[.bash]
+[source,console]
----
$ dotnet run
Hello World!
@@ -45,7 +45,8 @@ Hello World!
(When instructed to "run the program" in the instructions that follow, you will simply rerun `dotnet run`)
-=== Editing C# Code
+=== Edit C# Code
+
In the instructions below, you will be editing the file `buildhat/Program.cs`, the C# program which was generated when you ran the above commands.
Any text editor will work to edit C# code, including Geany, the IDE/Text Editor that comes pre-installed. https://code.visualstudio.com/docs/setup/raspberry-pi/[Visual Studio Code] (often called "VS Code") is also a popular alternative.
diff --git a/documentation/asciidoc/accessories/build-hat/net-motors.adoc b/documentation/asciidoc/accessories/build-hat/net-motors.adoc
index 3945ff203..9e9d9ab54 100644
--- a/documentation/asciidoc/accessories/build-hat/net-motors.adoc
+++ b/documentation/asciidoc/accessories/build-hat/net-motors.adoc
@@ -1,10 +1,10 @@
-=== Using Motors from .NET
+=== Use Motors from .NET
There are two types of motors, the *passive* ones and the *active* ones. Active motors will provide detailed position, absolute position and speed while passive motors can only be controlled with speed.
A common set of functions to control the speed of the motors are available. There are 2 important ones: `SetPowerLimit` and `SetBias`:
-[csharp]
+[source,csharp]
----
train.SetPowerLimit(1.0);
train.SetBias(0.2);
@@ -25,7 +25,7 @@ The typical passive motor is a train and older Powered Up motors. The `Speed` pr
Functions to control `Start`, `Stop` and `SetSpeed` are also available. Here is an example of how to use it:
-[csharp]
+[source,csharp]
----
Console.WriteLine("This will run the motor for 20 secondes incrementing the PWM");
train.SetPowerLimit(1.0);
@@ -60,7 +60,7 @@ Active motors have `Speed`, `AbsolutePosition`, `Position` and `TargetSpeed` as
The code snippet shows how to get the motors, start them and read the properties:
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortA);
brick.WaitForSensorToConnect(SensorPort.PortD);
@@ -86,11 +86,11 @@ active.Stop();
active2.Stop();
----
-NOTE: You should not forget to start and stop your motors when needed.
+NOTE: Don't forget to start and stop your motors when needed.
Advance features are available for active motors. You can request to move for seconds, to a specific position, a specific absolute position. Here are couple of examples:
-[csharp]
+[source,csharp]
----
// From the previous example, this will turn the motors back to their initial position:
active.TargetSpeed = 100;
@@ -103,7 +103,7 @@ active2.MoveToPosition(0, true);
Each function allow you to block or not the thread for the time the operation will be performed. Note that for absolute and relative position moves, there is a tolerance of few degrees.
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortA);
var active = (ActiveMotor)brick.GetMotor(SensorPort.PortA);
diff --git a/documentation/asciidoc/accessories/build-hat/net-sensors.adoc b/documentation/asciidoc/accessories/build-hat/net-sensors.adoc
index c8d6d72e8..d6e6284f4 100644
--- a/documentation/asciidoc/accessories/build-hat/net-sensors.adoc
+++ b/documentation/asciidoc/accessories/build-hat/net-sensors.adoc
@@ -1,12 +1,12 @@
-=== Using Sensors from .NET
+=== Use Sensors from .NET
-Like for motors, you have active and passive sensors. Most recent sensors are active. The passive one are lights and simple buttons. Active ones are distance or color sensors, as well as small 3x3 pixel displays.
+Like for motors, you have active and passive sensors. Most recent sensors are active. The passive one are lights and simple buttons. Active ones are distance or colour sensors, as well as small 3×3 pixel displays.
==== Button/Touch Passive Sensor
The button/touch passive sensor have one specific property `IsPressed`. The property is set to true when the button is pressed. Here is a complete example with events:
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortA);
var button = (ButtonSensor)brick.GetSensor(SensorPort.PortA);
@@ -39,7 +39,7 @@ image::images/passive-light.png[Passive light, width="60%"]
The passive light are the train lights. They can be switched on and you can controlled their brightness.
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortA);
var light = (PassiveLight)brick.GetSensor(SensorPort.PortA);
@@ -55,9 +55,9 @@ light.Off()
==== Active Sensor
-The active sensor class is a generic one that all the active sensor heritate including active motors. They contains a set of properties regarding how they are connected to the Build HAT, the modes, the detailed combi modes, the hardware, software versions and a specific property called `ValueAsString`. The value as string contains the last measurement as a collection of strings. A measurement arrives like `P0C0: +23 -42 0`, the enumeration will contains `P0C0:`, `+23`, `-42` and `0`. This is made so if you are using advance modes and managing yourself the combi modes and commands, you'll be able to get the measurements.
+The active sensor class is a generic one that all the active sensor inherit including active motors. They contains a set of properties regarding how they are connected to the Build HAT, the modes, the detailed Combi modes, the hardware, software versions and a specific property called `ValueAsString`. The value as string contains the last measurement as a collection of strings. A measurement arrives like `P0C0: +23 -42 0`, the enumeration will contains `P0C0:`, `+23`, `-42` and `0`. This is made so if you are using advance modes and managing yourself the Combi modes and commands, you'll be able to get the measurements.
-All active sensor can run a specific measurement mode or a combi mode. You can setup one through the advance mode using the `SelectModeAndRead` and `SelectCombiModesAndRead` functions with the specific mode(s) you'd like to continuously have. It is important to understand that changing the mode or setting up a new mode will stop the previous mode.
+All active sensor can run a specific measurement mode or a Combi mode. You can setup one through the advance mode using the `SelectModeAndRead` and `SelectCombiModesAndRead` functions with the specific mode(s) you'd like to continuously have. It is important to understand that changing the mode or setting up a new mode will stop the previous mode.
The modes that can be combined in the Combi mode are listed in the `CombiModes` property. Al the properties of the sensors will be updated automatically when you'll setup one of those modes.
@@ -70,7 +70,7 @@ WeDo Tilt Sensor has a special `Tilt` property. The type is a point with X is th
You can set a continuous measurement for this sensor using the `ContinuousMeasurement` property.
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortA);
var tilt = (WeDoTiltSensor)brick.GetSensor(SensorPort.PortA);
@@ -89,9 +89,9 @@ while(!console.KeyAvailable)
.WeDo Distance sensor, https://www.bricklink.com/v2/catalog/catalogitem.page?S=45304-1&name=WeDo%202.0%20Motion%20Sensor&category=%5BEducational%20&%20Dacta%5D%5BWeDo%5D#T=S&O={%22iconly%22:0}[Image from Bricklink]
image::images/wedo-distance.png[WeDo Distance sensor, width="60%"]
-WeDo Distance Sensor gives you a distance in millimeters with the Distance property.
+WeDo Distance Sensor gives you a distance in millimetres with the Distance property.
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortA);
var distance = (WeDoDistanceSensor)brick.GetSensor(SensorPort.PortA);
@@ -110,7 +110,7 @@ image::images/spike-force.png[spike force sensor, width="60%"]
This force sensor measure the pressure applies on it and if it is pressed. The two properties can be access through `Force` and `IsPressed` properties.
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortA);
var force = (ForceSensor)brick.GetSensor(SensorPort.PortA);
@@ -122,14 +122,14 @@ while(!force.IsPressed)
}
----
-==== SPIKE Essential 3x3 Color Light Matrix
+==== SPIKE Essential 3×3 Colour Light Matrix
-.spike 3x3 matrix, https://www.bricklink.com/v2/catalog/catalogitem.page?P=45608c01&name=Electric,%203%20x%203%20Color%20Light%20Matrix%20-%20SPIKE%20Prime&category=%5BElectric%5D#T=C[Image from Bricklink]
-image::images/3x3matrix.png[spike 3x3 matrix, width="60%"]
+.spike 3×3 matrix, https://www.bricklink.com/v2/catalog/catalogitem.page?P=45608c01&name=Electric,%203%20x%203%20Color%20Light%20Matrix%20-%20SPIKE%20Prime&category=%5BElectric%5D#T=C[Image from Bricklink]
+image::images/3x3matrix.png[spike 3×3 matrix, width="60%"]
-This is a small 3x3 display with 9 different leds that can be controlled individually. The class exposes functions to be able to control the screen. Here is an example using them:
+This is a small 3×3 display with 9 different LEDs that can be controlled individually. The class exposes functions to be able to control the screen. Here is an example using them:
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortA);
var matrix = (ColorLightMatrix)brick.GetSensor(SensorPort.PortA);
@@ -154,23 +154,23 @@ Span col = stackalloc LedColor[9] { LedColor.White, LedColor.White, Le
matrix.DisplayColorPerPixel(brg, col);
----
-==== SPIKE Prime Color Sensor and Color and Distance Sensor
+==== SPIKE Prime Colour Sensor and Colour and Distance Sensor
-SPIKE color sensor:
+SPIKE colour sensor:
-.spike color sensor, https://www.bricklink.com/v2/catalog/catalogitem.page?P=37308c01&name=Electric%20Sensor,%20Color%20-%20Spike%20Prime&category=%5BElectric%5D#T=C&C=11[Image from Bricklink]
+.spike colour sensor, https://www.bricklink.com/v2/catalog/catalogitem.page?P=37308c01&name=Electric%20Sensor,%20Color%20-%20Spike%20Prime&category=%5BElectric%5D#T=C&C=11[Image from Bricklink]
image::images/spike-color.png[spike color sensor, width="60%"]
-Color and distance sensor:
+Colour and distance sensor:
.Color distance sensor, https://www.bricklink.com/v2/catalog/catalogitem.page?P=bb0891c01&name=Electric%20Sensor,%20Color%20and%20Distance%20-%20Boost&category=%5BElectric%5D#T=C&C=1[Image from Bricklink]
-image::images/color-distance.png[Color distance sensor, width="60%"]
+image::images/color-distance.png[Colour distance sensor, width="60%"]
-Those color sensor has multiple properties and functions. You can get the `Color`, the `ReflectedLight` and the `AmbiantLight`.
+Those colour sensor has multiple properties and functions. You can get the `Color`, the `ReflectedLight` and the `AmbiantLight`.
-On top of this, the Color and Distance sensor can measure the `Distance` and has an object `Counter`. It will count automatically the number of objects which will go in and out of the range. This does allow to count objects passing in front of the sensor. The distance is limited from 0 to 10 centimeters.
+On top of this, the Colour and Distance sensor can measure the `Distance` and has an object `Counter`. It will count automatically the number of objects which will go in and out of the range. This does allow to count objects passing in front of the sensor. The distance is limited from 0 to 10 centimetres.
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortC);
@@ -191,16 +191,16 @@ while (!Console.KeyAvailable)
}
----
-NOTE: For better measurement, it is not recommended to change the measurement mode in a very fast way, the color integration may not be done in a proper way. This example gives you the full spectrum of what you can do with the sensor. Also, this class do not implement a continuous measurement mode. You can setup one through the advance mode using the `SelectModeAndRead` function with the specific mode you'd like to continuously have. It is important to understand that changing the mode or setting up a new mode will stop the previous mode.
+NOTE: For better measurement, it is not recommended to change the measurement mode in a very fast way, the colour integration may not be done in a proper way. This example gives you the full spectrum of what you can do with the sensor. Also, this class do not implement a continuous measurement mode. You can setup one through the advance mode using the `SelectModeAndRead` function with the specific mode you'd like to continuously have. It is important to understand that changing the mode or setting up a new mode will stop the previous mode.
==== SPIKE Prime Ultrasonic Distance Sensor
-.spike distance sensor, https://www.bricklink.com/v2/catalog/catalogitem.page?P=37316c01&name=Electric%20Sensor,%20Distance%20-%20Spike%20Prime&category=%5BElectric%5D#T=C&C=11[Image from Bricklink]
-image::images/spike-distance.png[spike distance sensor, width="60%"]
+.Spike distance sensor, https://www.bricklink.com/v2/catalog/catalogitem.page?P=37316c01&name=Electric%20Sensor,%20Distance%20-%20Spike%20Prime&category=%5BElectric%5D#T=C&C=11[Image from Bricklink]
+image::images/spike-distance.png[Spike distance sensor, width="60%"]
-This is a distance sensor and it does implement a `Distance` property that will give the distance in millimeter. A `ContinuousMeasurement` mode is also available on this one.
+This is a distance sensor and it does implement a `Distance` property that will give the distance in millimetre. A `ContinuousMeasurement` mode is also available on this one.
-[csharp]
+[source,csharp]
----
brick.WaitForSensorToConnect(SensorPort.PortA);
var distance = (UltrasonicDistanceSensor)brick.GetSensor(SensorPort.PortA);
diff --git a/documentation/asciidoc/accessories/build-hat/preparing-build-hat.adoc b/documentation/asciidoc/accessories/build-hat/preparing-build-hat.adoc
index f535b5971..0e19d8bda 100644
--- a/documentation/asciidoc/accessories/build-hat/preparing-build-hat.adoc
+++ b/documentation/asciidoc/accessories/build-hat/preparing-build-hat.adoc
@@ -1,10 +1,10 @@
-== Preparing your Build HAT
+== Prepare your Build HAT
NOTE: Before starting to work with your Raspberry Pi Build HAT you should xref:../computers/getting-started.adoc#setting-up-your-raspberry-pi[set up] your Raspberry Pi, xref:../computers/getting-started.adoc#installing-the-operating-system[install] the latest version of the operating system using https://www.raspberrypi.com/downloads/[Raspberry Pi Imager].
Attach 9mm spacers to the bottom of the board. Seat the Raspberry Pi Build HAT onto your Raspberry Pi. Make sure you put it on the right way up. Unlike other HATs, all the components are on the bottom, leaving room for a breadboard or LEGO® elements on top.
-image::images/fitting-build-hat.gif[width="80%"]
+video::images/fitting-build-hat.webm[width="80%"]
=== Access the GPIO Pins
@@ -27,21 +27,21 @@ The following pins are used by the Build HAT itself and you should not connect a
|===
-=== Setting up your Raspberry Pi
+=== Set up your Raspberry Pi
-Once the Raspberry Pi has booted, open the Raspberry Pi Configuration tool by clicking on the Raspberry Menu button and then selecting “Preferences” and then “Raspberry Pi Configuration”.
+Once the Raspberry Pi has booted, open the Raspberry Pi Configuration tool by clicking on the Raspberry Menu button and then selecting "Preferences" and then "Raspberry Pi Configuration".
-Click on the “interfaces” tab and adjust the Serial settings as shown below:
+Click on the "interfaces" tab and adjust the Serial settings as shown below:
image::images/setting-up.png[width="50%"]
-==== Using your Raspberry Pi Headless
+==== Use your Raspberry Pi headless
-If you are running your Raspberry Pi headless and using `raspi-config`, select “Interface Options” from the first menu.
+If you are running your Raspberry Pi headless and using `raspi-config`, select "Interface Options" from the first menu.
image::images/raspi-config-1.png[width="70%"]
-Then “P6 Serial Port”.
+Then "P6 Serial Port".
image::images/raspi-config-2.png[width="70%"]
@@ -59,20 +59,20 @@ image::images/raspi-config-5.png[width="70%"]
You will need to reboot at this point if you have made any changes.
-=== Powering the Build HAT
+=== Power the Build HAT
-Connect an external power supply — the https://raspberrypi.com/products/build-hat-power-supply[official Raspberry Pi Build HAT power supply] is recommended — however any reliable +8V±10% power supply capable of supplying 48W via a DC 5521 centre positive barrel connector (5.5mm × 2.1mm × 11mm) will power the Build HAT. You don’t need to connect an additional USB power supply to the Raspberry Pi as well, unless you are using a Raspberry Pi 400.
+Connect an external power supply — the https://raspberrypi.com/products/build-hat-power-supply[official Raspberry Pi Build HAT power supply] is recommended — however any reliable +8V±10% power supply capable of supplying 48W via a DC 5521 centre positive barrel connector (5.5mm × 2.1mm × 11mm) will power the Build HAT. You don't need to connect an additional USB power supply to the Raspberry Pi unless you are using a Keyboard-series device.
[NOTE]
====
-The Build HAT can not power the Raspberry Pi 400 as it does not support being powered via the GPIO headers.
+The Build HAT cannot power Keyboard-series devices, since they do not support power supply over the GPIO headers.
====
-image::images/powering-build-hat.gif[width="80%"]
+video::images/powering-build-hat.webm[width="80%"]
[NOTE]
====
-The LEGO® Technic™ motors are very powerful; so to drive them you’ll need an external 8V power supply. If you want to read from motor encoders and the SPIKE™ force sensor, you can power your Raspberry Pi and Build HAT the usual way, via your Raspberry Pi’s USB power socket. The SPIKE™ colour and distance sensors, like the motors, require an https://raspberrypi.com/products/build-hat-power-supply[external power supply].
+The LEGO® Technic™ motors are very powerful; so to drive them you'll need an external 8V power supply. If you want to read from motor encoders and the SPIKE™ force sensor, you can power your Raspberry Pi and Build HAT the usual way, via your Raspberry Pi's USB power socket. The SPIKE™ colour and distance sensors, like the motors, require an https://raspberrypi.com/products/build-hat-power-supply[external power supply].
====
You have the choice to use Build HAT with Python or .NET.
diff --git a/documentation/asciidoc/accessories/build-hat/py-installing-software.adoc b/documentation/asciidoc/accessories/build-hat/py-installing-software.adoc
index 5279062a3..b9a93f8be 100644
--- a/documentation/asciidoc/accessories/build-hat/py-installing-software.adoc
+++ b/documentation/asciidoc/accessories/build-hat/py-installing-software.adoc
@@ -1,12 +1,19 @@
-== Using the Build HAT from Python
+== Use the Build HAT from Python
-=== Installing the Python Library
+=== Install the Build HAT Python Library
-Install the Build HAT Python library. Open a Terminal window and type,
+To install the Build HAT Python library, open a terminal window and run the following command:
-[source]
+[source,console]
----
-$ pip3 install buildhat
+$ sudo apt install python3-build-hat
+----
+
+Raspberry Pi OS versions prior to _Bookworm_ do not have access to the library with `apt`. Instead, run the following command to install the library using `pip`:
+
+[source,console]
+----
+$ sudo pip3 install buildhat
----
For more information about the Build HAT Python Library see https://buildhat.readthedocs.io/[ReadTheDocs].
diff --git a/documentation/asciidoc/accessories/build-hat/py-motors.adoc b/documentation/asciidoc/accessories/build-hat/py-motors.adoc
index 7c2a1ab3b..7cf498f67 100644
--- a/documentation/asciidoc/accessories/build-hat/py-motors.adoc
+++ b/documentation/asciidoc/accessories/build-hat/py-motors.adoc
@@ -1,19 +1,19 @@
-=== Using Motors from Python
+=== Use Motors from Python
There are xref:build-hat.adoc#device-compatibility[a number of motors] that work with the Build HAT.
-==== Connecting a Motor
+==== Connect a Motor
-Connect a motor to port A on the Build HAT. The LPF2 connectors need to be inserted the correct way up. If the connector doesn’t slide in easily, rotate by 180 degrees and try again.
+Connect a motor to port A on the Build HAT. The LPF2 connectors need to be inserted the correct way up. If the connector doesn't slide in easily, rotate by 180 degrees and try again.
-image::images/connect-motor.gif[width="80%"]
+video::images/connect-motor.webm[width="80%"]
-==== Working with Motors
+==== Work with Motors
Start the https://thonny.org/[Thonny IDE]. Add the program code below:
-[source,python,linenums]
+[source,python]
----
from buildhat import Motor
@@ -22,24 +22,24 @@ motor_a = Motor('A')
motor_a.run_for_seconds(5)
----
-Run the program by clicking the play/run button. If this is the first time you’re running a Build HAT program since the Raspberry Pi has booted, there will be a few seconds pause while the firmware is copied across to the board. You should see the red LED extinguish and the green LED illuminate. Subsequent executions of a Python program will not require this pause.
+Run the program by clicking the play/run button. If this is the first time you're running a Build HAT program since the Raspberry Pi has booted, there will be a few seconds pause while the firmware is copied across to the board. You should see the red LED extinguish and the green LED illuminate. Subsequent executions of a Python program will not require this pause.
-image::images/blinking-light.gif[width="80%"]
+video::images/blinking-light.webm[width="80%"]
Your motor should turn clockwise for 5 seconds.
-image::images/turning-motor.gif[width="80%"]
+video::images/turning-motor.webm[width="80%"]
Change the final line of your program and re-run.
-[source,python,linenums, start=5]
+[source,python]
----
motor_a.run_for_seconds(5, speed=50)
----
The motor should now turn faster. Make another change:
-[source,python,linenums, start=5]
+[source,python]
----
motor_a.run_for_seconds(5, speed=-50)
----
diff --git a/documentation/asciidoc/accessories/build-hat/py-sensors.adoc b/documentation/asciidoc/accessories/build-hat/py-sensors.adoc
index 889a251ce..15571eae8 100644
--- a/documentation/asciidoc/accessories/build-hat/py-sensors.adoc
+++ b/documentation/asciidoc/accessories/build-hat/py-sensors.adoc
@@ -1,16 +1,16 @@
-=== Using Sensors from Python
+=== Use Sensors from Python
There is a xref:build-hat.adoc#device-compatibility[large range of sensors] that work with the Build HAT.
-==== Working with Sensors
+==== Work with Sensors
Connect a Colour sensor to port B on the Build HAT, and a Force sensor to port C.
-NOTE: If you’re not intending to drive a motor, then you don’t need an external power supply and you can use a standard USB power supply for your Raspberry Pi.
+NOTE: If you're not intending to drive a motor, then you don't need an external power supply and you can use a standard USB power supply for your Raspberry Pi.
Create another new program:
-[source,python,linenums]
+[source,python]
----
from signal import pause
from buildhat import ForceSensor, ColorSensor
@@ -30,4 +30,4 @@ button.when_released = handle_released
pause()
----
-Run it and hold a coloured object (LEGO® elements are ideal) in front of the colour sensor and press the Force sensor plunger. The sensor’s LED should switch on and the name of the closest colour should be displayed in the thonny REPL.
+Run it and hold a coloured object (LEGO® elements are ideal) in front of the colour sensor and press the Force sensor plunger. The sensor's LED should switch on and the name of the closest colour should be displayed in the Thonny REPL.
diff --git a/documentation/asciidoc/accessories/bumper.adoc b/documentation/asciidoc/accessories/bumper.adoc
new file mode 100644
index 000000000..01e8de0fb
--- /dev/null
+++ b/documentation/asciidoc/accessories/bumper.adoc
@@ -0,0 +1 @@
+include::bumper/about.adoc[]
diff --git a/documentation/asciidoc/accessories/bumper/about.adoc b/documentation/asciidoc/accessories/bumper/about.adoc
new file mode 100644
index 000000000..ee9f12052
--- /dev/null
+++ b/documentation/asciidoc/accessories/bumper/about.adoc
@@ -0,0 +1,31 @@
+== About
+
+.The Raspberry Pi Bumper for Raspberry Pi 5
+image::images/bumper.jpg[width="80%"]
+
+The Raspberry Pi Bumper for Raspberry Pi 5 is a snap-on silicone cover that protects
+the bottom and edges of the board. When attached, the mounting holes of the Raspberry Pi remain accessible through the bumper.
+
+The Bumper is only compatible with Raspberry Pi 5.
+
+== Assembly instructions
+
+.Assembling the bumper
+image::images/assembly.png[width="80%"]
+
+To attach the Raspberry Pi Bumper to your Raspberry Pi:
+
+. Turn off your Raspberry Pi and disconnect the power cable.
+. Remove the SD card from the SD card slot of your Raspberry Pi.
+. Align the bumper with the board.
+. Press the board gently but firmly into the bumper, taking care to avoid contact between the bumper and any of the board’s components.
+. Insert your SD card back into the SD card slot of your Raspberry Pi.
+. Reconnect your Raspberry Pi to power.
+
+To remove the Raspberry Pi Bumper from your Raspberry Pi:
+
+. Turn off your Raspberry Pi and disconnect the power cable.
+. Remove the SD card from the SD card slot of your Raspberry Pi.
+. Gently but firmly peel the bumper away from the board, taking care to avoid contact between the bumper and any of the board’s components.
+. Insert your SD card back into the SD card slot of your Raspberry Pi.
+. Reconnect your Raspberry Pi to power.
diff --git a/documentation/asciidoc/accessories/bumper/images/assembly.png b/documentation/asciidoc/accessories/bumper/images/assembly.png
new file mode 100644
index 000000000..bdcfb0328
Binary files /dev/null and b/documentation/asciidoc/accessories/bumper/images/assembly.png differ
diff --git a/documentation/asciidoc/accessories/bumper/images/bumper.jpg b/documentation/asciidoc/accessories/bumper/images/bumper.jpg
new file mode 100644
index 000000000..14682676a
Binary files /dev/null and b/documentation/asciidoc/accessories/bumper/images/bumper.jpg differ
diff --git a/documentation/asciidoc/accessories/camera.adoc b/documentation/asciidoc/accessories/camera.adoc
index c3606e3e8..f5076f9fa 100644
--- a/documentation/asciidoc/accessories/camera.adoc
+++ b/documentation/asciidoc/accessories/camera.adoc
@@ -1,5 +1,9 @@
include::camera/camera_hardware.adoc[]
-include::camera/hqcam_filter_removal.adoc[]
+include::camera/filters.adoc[]
-include::camera/lens.adoc[]
\ No newline at end of file
+include::camera/lens.adoc[]
+
+include::camera/synchronous_cameras.adoc[]
+
+include::camera/external_trigger.adoc[]
diff --git a/documentation/asciidoc/accessories/camera/camera_hardware.adoc b/documentation/asciidoc/accessories/camera/camera_hardware.adoc
index 2260c726d..3b8dafbd5 100644
--- a/documentation/asciidoc/accessories/camera/camera_hardware.adoc
+++ b/documentation/asciidoc/accessories/camera/camera_hardware.adoc
@@ -1,5 +1,5 @@
:figure-caption!:
-== Camera Modules
+== About the Camera Modules
There are now several official Raspberry Pi camera modules. The original 5-megapixel model was https://www.raspberrypi.com/news/camera-board-available-for-sale/[released] in 2013, it was followed by an 8-megapixel https://www.raspberrypi.com/products/camera-module-v2/[Camera Module 2] which was https://www.raspberrypi.com/news/new-8-megapixel-camera-board-sale-25/[released] in 2016. The latest camera model is the 12-megapixel https://raspberrypi.com/products/camera-module-3/[Camera Module 3] which was https://www.raspberrypi.com/news/new-autofocus-camera-modules/[released] in 2023. The original 5MP device is no longer available from Raspberry Pi.
@@ -11,39 +11,62 @@ image::images/cm3.jpg[Camera Module 3 normal and wide angle]
.Camera Module 3 NoIR (left) and Camera Module 3 NoIR Wide (right)
image::images/cm3_noir.jpg[Camera Module 3 NoIR normal and wide angle]
-Aditionally a 12-megapixel https://www.raspberrypi.com/products/raspberry-pi-high-quality-camera/[High Quality Camera] with CS- or M12-mount variants for use with external lenses was https://www.raspberrypi.com/news/new-product-raspberry-pi-high-quality-camera-on-sale-now-at-50/[released in 2020] and https://www.raspberrypi.com/news/new-autofocus-camera-modules/[2023] respectively. There is no infrared version of the HQ Camera, however the xref:camera.adoc#hq-camera-filter-removal[IR Filter can be removed] if required.
+Additionally, a 12-megapixel https://www.raspberrypi.com/products/raspberry-pi-high-quality-camera/[High Quality Camera] with CS- or M12-mount variants for use with external lenses was https://www.raspberrypi.com/news/new-product-raspberry-pi-high-quality-camera-on-sale-now-at-50/[released in 2020] and https://www.raspberrypi.com/news/new-autofocus-camera-modules/[2023] respectively. There is no infrared version of the HQ Camera, however the xref:camera.adoc#filter-removal[IR Filter can be removed] if required.
.HQ Camera, M12-mount (left) and C/CS-mount (right)
image::images/hq.jpg[M12- and C/CS-mount versions of the HQ Camera]
-NOTE: Raspberry Pi Camera Modules are compatible with all Raspberry Pi computers with CSI connectors - that is, all models except Raspberry Pi 400 and the 2016 launch version of Zero.
+The Raspberry Pi AI Camera uses the Sony IMX500 imaging sensor to provide low-latency and high-performance AI capabilities to any camera application. Tight integration with xref:../computers/camera_software.adoc[Raspberry Pi's camera software stack] allows users to deploy their own neural network models with minimal effort.
-=== Installing a Raspberry Pi camera
+image::images/ai-camera-hero.png[The Raspberry Pi AI Camera]
+
+Finally, there is the Global Shutter camera, which was http://raspberrypi.com/news/new-raspberry-pi-global-shutter-camera[released in 2023]. There is no infrared version of the GS Camera, however the xref:camera.adoc#filter-removal[IR Filter can be removed] if required.
+
+.Global Shutter Camera
+image::images/gs-camera.jpg[GS Camera]
+
+NOTE: Raspberry Pi Camera Modules are compatible with all Raspberry Pi computers with CSI connectors.
+
+=== Rolling or Global shutter?
+
+Most digital cameras, including our Camera Modules, use a **rolling shutter**: they scan the image they're capturing line-by-line, then output the results. You may have noticed that this can cause distortion effects in some settings; if you've ever photographed rotating propeller blades, you've probably spotted the image shimmering rather than looking like an object that is rotating. The propeller blades have had enough time to change position in the tiny moment that the camera has taken to swipe across and observe the scene.
+
+A **global shutter**, like the one on our Global Shutter Camera Module, doesn't do this. It captures the light from every pixel in the scene at once, so your photograph of propeller blades will not suffer from the same distortion.
+
+Why is this useful? Fast-moving objects, like those propeller blades, are now easy to capture; we can also synchronise several cameras to take a photo at precisely the same moment in time. There are plenty of benefits here, like minimising distortion when capturing stereo images. (The human brain is confused if any movement that appears in the left eye has not appeared in the right eye yet.) The Raspberry Pi Global Shutter Camera can also operate with shorter exposure times - down to 30µs, given enough light - than a rolling shutter camera, which makes it useful for high-speed photography.
+
+NOTE: The Global Shutter Camera's image sensor has a 6.3mm diagonal active sensing area, which is similar in size to Raspberry Pi's HQ Camera. However, the pixels are larger and can collect more light. Large pixel size and low pixel count are valuable in machine-vision applications; the more pixels a sensor produces, the harder it is to process the image in real time. To get around this, many applications downsize and crop images. This is unnecessary with the Global Shutter Camera and the appropriate lens magnification, where the lower resolution and large pixel size mean an image can be captured natively.
+
+== Install a Raspberry Pi camera
WARNING: Cameras are sensitive to static. Earth yourself prior to handling the PCB. A sink tap or similar should suffice if you don't have an earthing strap.
-==== Connecting the Camera
+=== Connect the Camera
+
+Before connecting any Camera, shut down your Raspberry Pi and disconnect it from power.
+
+The flex cable inserts into the connector labelled CAMERA on the Raspberry Pi, which is located between the Ethernet and HDMI ports. The cable must be inserted with the silver contacts facing the HDMI port. To open the connector, pull the tabs on the top of the connector upwards, then towards the Ethernet port. The flex cable should be inserted firmly into the connector, with care taken not to bend the flex at too acute an angle. To close the connector, push the top part of the connector down and away from the Ethernet port while holding the flex cable in place.
-The flex cable inserts into the connector labelled CAMERA on the Raspberry Pi, which is located between the Ethernet and HDMI ports. The cable must be inserted with the silver contacts facing the HDMI port. To open the connector, pull the tabs on the top of the connector upwards, then towards the Ethernet port. The flex cable should be inserted firmly into the connector, with care taken not to bend the flex at too acute an angle. To close the connector, push the top part of the connector towards the HDMI port and down, while holding the flex cable in place.
+The following video shows how to connect the original camera on the original Raspberry Pi 1:
-We have created a video to illustrate the process of connecting the camera. Although the video shows the original camera on the original Raspberry Pi 1, the principle is the same for all camera boards:
+video::GImeVqHQzsE[youtube,width=80%,height=400px]
-video::GImeVqHQzsE[youtube]
+All Raspberry Pi boards with a camera connector use the same installation method, though the Raspberry Pi 5 and all Raspberry Pi Zero models require a https://www.raspberrypi.com/products/camera-cable/[different camera cable].
-Depending on the model, the camera may come with a small piece of translucent blue plastic film covering the lens. This is only present to protect the lens while it is being mailed to you, and needs to be removed by gently peeling it off.
+Some cameras may come with a small piece of translucent blue plastic film covering the lens. This is only present to protect the lens during shipping. To remove it, gently peel it off.
NOTE: There is additional documentation available around fitting the recommended https://datasheets.raspberrypi.com/hq-camera/cs-mount-lens-guide.pdf[6mm] and https://datasheets.raspberrypi.com/hq-camera/c-mount-lens-guide.pdf[16mm] lens to the HQ Camera.
-=== Preparing the Software
+=== Prepare the Software
-Before proceeding, we recommend ensuring that your kernel, GPU firmware and applications are all up to date. Please follow the instructions on xref:../computers/os.adoc#using-apt[keeping your operating system up to date].
+Before proceeding, we recommend ensuring that your kernel, GPU firmware and applications are all up to date. Please follow the instructions on xref:../computers/os.adoc#update-software[keeping your operating system up to date].
-Then, please follow the relevant setup instructions for the xref:../computers/camera_software.adoc#getting-started[libcamera] software stack, and the https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf[Picamera2 Python library].
+Then, please follow the relevant setup instructions for xref:../computers/camera_software.adoc#rpicam-apps[`rpicam-apps`], and the https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf[Picamera2 Python library].
-=== Hardware Specification
+== Hardware Specification
|===
-| | Camera Module v1 | Camera Module v2 | Camera Module 3 | Camera Module 3 Wide | HQ Camera
+| | Camera Module v1 | Camera Module v2 | Camera Module 3 | Camera Module 3 Wide | HQ Camera | AI Camera | GS Camera
| Net price
| $25
@@ -51,27 +74,35 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| $25
| $35
| $50
+| $70
+| $50
| Size
| Around 25 × 24 × 9 mm
| Around 25 × 24 × 9 mm
| Around 25 × 24 × 11.5 mm
| Around 25 × 24 × 12.4 mm
-| 38 x 38 x 18.4mm (excluding lens)
+| 38 × 38 × 18.4mm (excluding lens)
+| 25 × 24 × 11.9mm
+| 38 × 38 × 19.8mm (29.5mm with adaptor and dust cap)
| Weight
| 3g
| 3g
| 4g
| 4g
-|
+| 30.4g
+| 6g
+| 34g (41g with adaptor and dust cap)
| Still resolution
-| 5 Megapixels
-| 8 Megapixels
-| 11.9 Megapixels
-| 11.9 Megapixels
-| 12.3 Megapixels
+| 5 megapixels
+| 8 megapixels
+| 11.9 megapixels
+| 11.9 megapixels
+| 12.3 megapixels
+| 12.3 megapixels
+| 1.58 megapixels
| Video modes
| 1080p30, 720p60 and 640 × 480p60/90
@@ -79,6 +110,8 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| 2304 × 1296p56, 2304 × 1296p30 HDR, 1536 × 864p120
| 2304 × 1296p56, 2304 × 1296p30 HDR, 1536 × 864p120
| 2028 × 1080p50, 2028 × 1520p40 and 1332 × 990p120
+| 2028 × 1520p30, 4056 × 3040p10
+| 1456 × 1088p60
| Sensor
| OmniVision OV5647
@@ -86,27 +119,35 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| Sony IMX708
| Sony IMX708
| Sony IMX477
+| Sony IMX500
+| Sony IMX296
| Sensor resolution
| 2592 × 1944 pixels
| 3280 × 2464 pixels
-| 4608 x 2592 pixels
-| 4608 x 2592 pixels
-| 4056 x 3040 pixels
+| 4608 × 2592 pixels
+| 4608 × 2592 pixels
+| 4056 × 3040 pixels
+| 4056 × 3040 pixels
+| 1456 × 1088 pixels
| Sensor image area
| 3.76 × 2.74 mm
-| 3.68 x 2.76 mm (4.6 mm diagonal)
-| 6.45 x 3.63mm (7.4mm diagonal)
-| 6.45 x 3.63mm (7.4mm diagonal)
-| 6.287mm x 4.712 mm (7.9mm diagonal)
+| 3.68 × 2.76 mm (4.6 mm diagonal)
+| 6.45 × 3.63mm (7.4mm diagonal)
+| 6.45 × 3.63mm (7.4mm diagonal)
+| 6.287mm × 4.712 mm (7.9mm diagonal)
+| 6.287mm × 4.712 mm (7.9mm diagonal)
+| 6.3mm diagonal
| Pixel size
| 1.4 µm × 1.4 µm
-| 1.12 µm x 1.12 µm
-| 1.4 µm x 1.4 µm
-| 1.4 µm x 1.4 µm
-| 1.55 µm x 1.55 µm
+| 1.12 µm × 1.12 µm
+| 1.4 µm × 1.4 µm
+| 1.4 µm × 1.4 µm
+| 1.55 µm × 1.55 µm
+| 1.55 µm × 1.55 µm
+| 3.45 µm × 3.45 µm
| Optical size
| 1/4"
@@ -114,6 +155,8 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| 1/2.43"
| 1/2.43"
| 1/2.3"
+| 1/2.3"
+| 1/2.9"
| Focus
| Fixed
@@ -121,6 +164,8 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| Motorized
| Motorized
| Adjustable
+| Adjustable
+| Adjustable
| Depth of field
| Approx 1 m to ∞
@@ -128,6 +173,8 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| Approx 10 cm to ∞
| Approx 5 cm to ∞
| N/A
+| Approx 20 cm to ∞
+| N/A
| Focal length
| 3.60 mm +/- 0.01
@@ -135,6 +182,8 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| 4.74 mm
| 2.75 mmm
| Depends on lens
+| 4.74 mm
+| Depends on lens
| Horizontal Field of View (FoV)
| 53.50 +/- 0.13 degrees
@@ -142,6 +191,8 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| 66 degrees
| 102 degrees
| Depends on lens
+| 66 ±3 degrees
+| Depends on lens
| Vertical Field of View (FoV)
| 41.41 +/- 0.11 degrees
@@ -149,6 +200,8 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| 41 degrees
| 67 degrees
| Depends on lens
+| 52.3 ±3 degrees
+| Depends on lens
| Focal ratio (F-Stop)
| F2.9
@@ -156,22 +209,39 @@ Then, please follow the relevant setup instructions for the xref:../computers/ca
| F1.8
| F2.2
| Depends on lens
+| F1.79
+| Depends on lens
-| Maximum exposure times (seconds)
-| 6
+| Maximum exposure time (seconds)
+| 3.28
| 11.76
| 112
| 112
| 670.74
+| 112
+| 15.5
| Lens Mount
| N/A
| N/A
| N/A
| N/A
-| CS- or M12-mount
+| C/CS- or M12-mount
+| N/A
+| C/CS
+
+| NoIR version available?
+| Yes
+| Yes
+| Yes
+| Yes
+| No
+| No
+| No
|===
+NOTE: There is https://github.com/raspberrypi/libcamera/issues/43[some evidence] to suggest that the Camera Module 3 may emit RFI at a harmonic of the CSI clock rate. This RFI is in a range to interfere with GPS L1 frequencies (1575 MHz). Please see the https://github.com/raspberrypi/libcamera/issues/43[thread on Github] for details and proposed workarounds.
+
=== Mechanical Drawings
Available mechanical drawings;
@@ -179,18 +249,23 @@ Available mechanical drawings;
* Camera Module 2 https://datasheets.raspberrypi.com/camera/camera-module-2-mechanical-drawing.pdf[PDF]
* Camera Module 3 https://datasheets.raspberrypi.com/camera/camera-module-3-standard-mechanical-drawing.pdf[PDF]
* Camera Module 3 Wide https://datasheets.raspberrypi.com/camera/camera-module-3-wide-mechanical-drawing.pdf[PDF]
+* Camera Module 3 https://datasheets.raspberrypi.com/camera/camera-module-3-step.zip[STEP files]
* HQ Camera Module (CS-mount version) https://datasheets.raspberrypi.com/hq-camera/hq-camera-cs-mechanical-drawing.pdf[PDF]
** The CS-mount https://datasheets.raspberrypi.com/hq-camera/hq-camera-cs-lensmount-drawing.pdf[PDF]
* HQ Camera Module (M12-mount version) https://datasheets.raspberrypi.com/hq-camera/hq-camera-m12-mechanical-drawing.pdf[PDF]
+* GS Camera Module
+https://datasheets.raspberrypi.com/gs-camera/gs-camera-mechanical-drawing.pdf[PDF]
NOTE: Board dimensions and mounting-hole positions for Camera Module 3 are identical to Camera Module 2. However, due to changes in the size and position of the sensor module, it is not mechanically compatible with the camera lid for the Raspberry Pi Zero Case.
=== Schematics
.Schematic of the Raspberry Pi CSI camera connector.
-image:images/RPi-S5-conn.png[camera connector]
+image:images/RPi-S5-conn.png[camera connector, width="65%"]
Other available schematics;
* Camera Module v2 https://datasheets.raspberrypi.com/camera/camera-module-2-schematics.pdf[PDF]
+* Camera Module v3 https://datasheets.raspberrypi.com/camera/camera-module-3-schematics.pdf[PDF]
* HQ Camera Module https://datasheets.raspberrypi.com/hq-camera/hq-camera-schematics.pdf[PDF]
+
diff --git a/documentation/asciidoc/accessories/camera/external_trigger.adoc b/documentation/asciidoc/accessories/camera/external_trigger.adoc
new file mode 100644
index 000000000..642412d54
--- /dev/null
+++ b/documentation/asciidoc/accessories/camera/external_trigger.adoc
@@ -0,0 +1,80 @@
+== External Trigger on the GS Camera
+
+The Global Shutter (GS) camera can be triggered externally by pulsing the external trigger (denoted on the board as XTR) connection on the board. Multiple cameras can be connected to the same pulse, allowing for an alternative way to synchronise two cameras.
+
+The exposure time is equal to the low pulse-width time plus an additional 14.26us. i.e. a low pulse of 10000us leads to an exposure time of 10014.26us. Framerate is directly controlled by how often you pulse the pin. A PWM frequency of 30Hz will lead to a framerate of 30 frames per second.
+
+image::images/external_trigger.jpg[alt="Image showing pulse format",width="80%"]
+
+=== Preparation
+
+WARNING: This modification includes removing an SMD soldered part. You should not attempt this modification unless you feel you are competent to complete it. When soldering to the Camera board, please remove the plastic back cover to avoid damaging it.
+
+If your board has transistor Q2 fitted (shown in blue on the image below), then you will need to remove R11 from the board (shown in red). This connects GP1 to XTR and without removing R11, the camera will not operate in external trigger mode.
+The location of the components is displayed below.
+
+image::images/resistor.jpg[alt="Image showing resistor to be removed",width="80%"]
+
+Next, solder a wire to the touchpoints of XTR and GND on the GS Camera board. Note that XTR is a 1.8V input, so you may need a level shifter or potential divider.
+
+We can use a Raspberry Pi Pico to provide the trigger. Connect any Pico GPIO pin (GP28 is used in this example) to XTR via a 1.5kΩ resistor. Also connect a 1.8kΩ resistor between XTR and GND to reduce the high logic level to 1.8V. A wiring diagram is shown below.
+
+image::images/pico_wiring.jpg[alt="Image showing Raspberry Pi Pico wiring",width="50%"]
+
+==== Raspberry Pi Pico MicroPython Code
+
+[source,python]
+----
+from machine import Pin, PWM
+
+from time import sleep
+
+pwm = PWM(Pin(28))
+
+framerate = 30
+shutter = 6000 # In microseconds
+
+frame_length = 1000000 / framerate
+pwm.freq(framerate)
+
+pwm.duty_u16(int((1 - (shutter - 14) / frame_length) * 65535))
+----
+
+The low pulse width is equal to the shutter time, and the frequency of the PWM equals the framerate.
+
+NOTE: In this example, Pin 28 connects to the XTR touchpoint on the GS camera board.
+
+=== Camera driver configuration
+
+This step is only necessary if you have more than one camera with XTR wired in parallel.
+
+Edit `/boot/firmware/config.txt`. Change `camera_auto_detect=1` to `camera_auto_detect=0`.
+
+Append this line:
+[source]
+----
+dtoverlay=imx296,always-on
+----
+When using the CAM0 port on a Raspberry Pi 5, CM4 or CM5, append `,cam0` to that line without a space. If both cameras are on the same Raspberry Pi you will need two dtoverlay lines, only one of them ending with `,cam0`.
+
+If the external trigger will not be started right away, you also need to increase the libcamera timeout xref:camera.adoc#libcamera-configuration[as above].
+
+=== Starting the camera
+
+Enable external triggering:
+
+[source,console]
+----
+$ echo 1 | sudo tee /sys/module/imx296/parameters/trigger_mode
+----
+
+Run the code on the Pico, then set the camera running:
+
+[source,console]
+----
+$ rpicam-hello -t 0 --qt-preview --shutter 3000
+----
+
+Every time the Pico pulses the pin, it should capture a frame. However, if `--gain` and `--awbgains` are not set, some frames will be dropped to allow AGC and AWB algorithms to settle.
+
+NOTE: When running `rpicam-apps`, always specify a fixed shutter duration, to ensure the AGC does not try to adjust the camera's shutter speed. The value is not important, since it is actually controlled by the external trigger pulse.
diff --git a/documentation/asciidoc/accessories/camera/filters.adoc b/documentation/asciidoc/accessories/camera/filters.adoc
new file mode 100644
index 000000000..32ac70e02
--- /dev/null
+++ b/documentation/asciidoc/accessories/camera/filters.adoc
@@ -0,0 +1,71 @@
+== Camera Filters
+
+Some transmission characteristics are available for the Camera Module 3 and the HQ and GS cameras.
+
+NOTE: These graphs are available as https://datasheets.raspberrypi.com/camera/camera-extended-spectral-sensitivity.pdf[a PDF].
+
+=== Camera Module 3
+
+The Camera Module 3 is built around the IMX708, which has the following spectral sensitivity characteristics.
+
+image::images/cm3-filter.png[Camera Module 3 Transmission Graph, width="65%"]
+
+=== HQ Camera
+
+Raspberry Pi HQ Camera without IR-Cut filter.
+
+image::images/hq.png[HQ Camera Transmission Graph without IR-Cut filter,width="65%"]
+
+
+=== GS Camera
+
+Raspberry Pi GS Camera without IR-Cut filter.
+
+image::images/gs.png[GS Camera Transmission Graph without IR-Cut filter,width="65%"]
+
+
+=== HQ and GS Cameras
+
+The HQ and GS Cameras use a Hoya CM500 infrared filter. Its transmission characteristics are as represented in the following graph.
+
+image::images/hoyacm500.png[CM500 Transmission Graph,width="65%"]
+
+== IR Filter
+
+Both the High Quality Camera and Global Shutter Camera contain an IR filter to reduce the camera's sensitivity to infrared light and help outdoor photos look more natural. However, you may remove the filter to:
+
+* Enhance colours in certain types of photography, such as images of plants, water, and the sky
+* Provide night vision in a location that is illuminated with infrared light
+
+=== Filter Removal
+
+WARNING: *This procedure cannot be reversed:* the adhesive that attaches the filter will not survive being lifted and replaced, and while the IR filter is about 1.1mm thick, it may crack when it is removed. *Removing it will void the warranty on the product*.
+
+You can remove the filter from both the HQ and GS cameras. The HQ camera is shown in the demonstration below.
+
+image:images/FILTER_ON_small.jpg[width="65%"]
+
+NOTE: Make sure to work in a clean and dust-free environment, as the sensor will be exposed to the air.
+
+. Unscrew the two 1.5 mm hex lock keys on the underside of the main circuit board. Be careful not to let the washers roll away.
++
+image:images/SCREW_REMOVED_small.jpg[width="65%"]
+. There is a gasket of slightly sticky material between the housing and PCB which will require some force to separate. You may try some ways to weaken the adhesive, such as a little isopropyl alcohol and/or heat (~20-30 C).
+. Once the adhesive is loose, lift up the board and place it down on a very clean surface. Make sure the sensor does not touch the surface.
++
+image:images/FLATLAY_small.jpg[width="65%"]
+. Face the lens upwards and place the mount on a flat surface.
++
+image:images/SOLVENT_small.jpg[width="65%"]
+. To minimise the risk of breaking the filter, use a pen top or similar soft plastic item to push down on the filter only at the very edges where the glass attaches to the aluminium. The glue will break and the filter will detach from the lens mount.
++
+image:images/REMOVE_FILTER_small.jpg[width="65%"]
+. Given that changing lenses will expose the sensor, at this point you could affix a clear filter (for example, OHP plastic) to minimize the chance of dust entering the sensor cavity.
+. Replace the main housing over the circuit board. Be sure to realign the housing with the gasket, which remains on the circuit board.
+. Apply the nylon washer first to prevent damage to the circuit board.
+. Next, fit the steel washer, which prevents damage to the nylon washer. Screw down the two hex lock keys. As long as the washers have been fitted in the correct order, they do not need to be screwed very tightly.
++
+image:images/FILTER_OFF_small.jpg[width="65%"]
+
+NOTE: It is likely to be difficult or impossible to glue the filter back in place and return the device to functioning as a normal optical camera.
+
diff --git a/documentation/asciidoc/accessories/camera/hqcam_filter_removal.adoc b/documentation/asciidoc/accessories/camera/hqcam_filter_removal.adoc
deleted file mode 100644
index 17f5062d3..000000000
--- a/documentation/asciidoc/accessories/camera/hqcam_filter_removal.adoc
+++ /dev/null
@@ -1,38 +0,0 @@
-== HQ Camera Filter Transmission
-
-The HQ Camera uses a Hoya CM500 infrared filter. Its transmission characteristics are as represented in the following graph.
-
-image::images/hoyacm500.png[CM500 Transmission Graph]
-
-== HQ Camera Filter Removal
-
-The High Quality Camera contains an IR filter, which is used to reduce the camera's sensitivity to infrared light. This ensures that outdoor photos look more natural. However, some nature photography can be enhanced with the removal of this filter; the colours of sky, plants, and water can be affected by its removal. The camera can also be used without the filter for night vision in a location that is illuminated with infrared light.
-
-WARNING: *This procedure cannot be reversed:* the adhesive that attaches the filter will not survive being lifted and replaced, and while the IR filter is about 1.1mm thick, it may crack when it is removed. *Removing it will void the warranty on the product*. Nevertheless, removing the filter will be desirable to some users.
-
-To remove the filter:
-
-* Work in a clean and dust-free environment, as the sensor will be exposed to the air.
-
-image:images/rpi_hq_cam_sensor.jpg[camera sensor, width="70%"]
-
-* Unscrew the two 1.5 mm hex lock keys on the underside of the main circuit board. Be careful not to let the washers roll away. There is a gasket of slightly sticky material between the housing and PCB which will require some force to separate.
-
-image:images/rpi_hq_cam_gasket.jpg[camera gasket, width="70%"]
-
-* Lift up the board and place it down on a very clean surface. Make sure the sensor does not touch the surface.
-* Before completing the next step, read through all of the steps and decide whether you are willing to void your warranty. *Do not proceed* unless you are sure that you are willing to void your warranty.
-* Turn the lens around so that it is "looking" upwards and place it on a table.
-* You may try some ways to weaken the adhesive, such as a little isopropyl alcohol and/or heat (~20-30 C). Using a pen top or similar soft plastic item, push down on the filter only at the very edges where the glass attaches to the aluminium - to minimise the risk of breaking the filter. The glue will break and the filter will detach from the lens mount.
-
-image:images/rpi_hq_cam_ir_filter.jpg[camera ir filter, width="70%"]
-
-* Given that changing lenses will expose the sensor, at this point you could affix a clear filter (for example, OHP plastic) to minimize the chance of dust entering the sensor cavity.
-
-image:images/rpi_hq_cam_clear_filter.jpg[camera protective filter, width="70%"]
-
-* Replace the main housing over the circuit board. Be sure to realign the housing with the gasket, which remains on the circuit board.
-* The nylon washer prevents damage to the circuit board; apply this washer first. Next, fit the steel washer, which prevents damage to the nylon washer.
-* Screw down the two hex lock keys. As long as the washers have been fitted in the correct order, they do not need to be screwed very tightly.
-* Note that it is likely to be difficult or impossible to glue the filter back in place and return the device to functioning as a normal optical camera.
-
diff --git a/documentation/asciidoc/accessories/camera/images/FILTER_OFF.jpg b/documentation/asciidoc/accessories/camera/images/FILTER_OFF.jpg
new file mode 100644
index 000000000..918eb217f
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/FILTER_OFF.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/FILTER_OFF_small.jpg b/documentation/asciidoc/accessories/camera/images/FILTER_OFF_small.jpg
new file mode 100644
index 000000000..a0ab753ff
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/FILTER_OFF_small.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/FILTER_ON.jpg b/documentation/asciidoc/accessories/camera/images/FILTER_ON.jpg
new file mode 100644
index 000000000..47abc24c7
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/FILTER_ON.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/FILTER_ON_small.jpg b/documentation/asciidoc/accessories/camera/images/FILTER_ON_small.jpg
new file mode 100644
index 000000000..4de5eefe3
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/FILTER_ON_small.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/FLATLAY.jpg b/documentation/asciidoc/accessories/camera/images/FLATLAY.jpg
new file mode 100644
index 000000000..6fad88e33
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/FLATLAY.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/FLATLAY_small.jpg b/documentation/asciidoc/accessories/camera/images/FLATLAY_small.jpg
new file mode 100644
index 000000000..2cb6d17c2
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/FLATLAY_small.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/REMOVE_FILTER.jpg b/documentation/asciidoc/accessories/camera/images/REMOVE_FILTER.jpg
new file mode 100644
index 000000000..845fda298
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/REMOVE_FILTER.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/REMOVE_FILTER_small.jpg b/documentation/asciidoc/accessories/camera/images/REMOVE_FILTER_small.jpg
new file mode 100644
index 000000000..46d2e73fc
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/REMOVE_FILTER_small.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/SCREW_REMOVED.jpg b/documentation/asciidoc/accessories/camera/images/SCREW_REMOVED.jpg
new file mode 100644
index 000000000..02cb97774
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/SCREW_REMOVED.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/SCREW_REMOVED_small.jpg b/documentation/asciidoc/accessories/camera/images/SCREW_REMOVED_small.jpg
new file mode 100644
index 000000000..0c9cb169c
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/SCREW_REMOVED_small.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/SOLVENT.jpg b/documentation/asciidoc/accessories/camera/images/SOLVENT.jpg
new file mode 100644
index 000000000..e7c4e965a
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/SOLVENT.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/SOLVENT_small.jpg b/documentation/asciidoc/accessories/camera/images/SOLVENT_small.jpg
new file mode 100644
index 000000000..589f1a8c7
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/SOLVENT_small.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/ai-camera-hero.png b/documentation/asciidoc/accessories/camera/images/ai-camera-hero.png
new file mode 100644
index 000000000..a0186287c
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/ai-camera-hero.png differ
diff --git a/documentation/asciidoc/accessories/camera/images/cm3-filter.png b/documentation/asciidoc/accessories/camera/images/cm3-filter.png
new file mode 100644
index 000000000..669f8c86e
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/cm3-filter.png differ
diff --git a/documentation/asciidoc/accessories/camera/images/external_trigger.jpg b/documentation/asciidoc/accessories/camera/images/external_trigger.jpg
new file mode 100644
index 000000000..dd6f4d27b
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/external_trigger.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/gs-camera.jpg b/documentation/asciidoc/accessories/camera/images/gs-camera.jpg
new file mode 100644
index 000000000..f03c35928
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/gs-camera.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/gs.png b/documentation/asciidoc/accessories/camera/images/gs.png
new file mode 100644
index 000000000..e75662f40
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/gs.png differ
diff --git a/documentation/asciidoc/accessories/camera/images/hq.png b/documentation/asciidoc/accessories/camera/images/hq.png
new file mode 100644
index 000000000..42cd3b7c1
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/hq.png differ
diff --git a/documentation/asciidoc/accessories/camera/images/m12-lens.jpg b/documentation/asciidoc/accessories/camera/images/m12-lens.jpg
index dae4e2599..875f0297a 100644
Binary files a/documentation/asciidoc/accessories/camera/images/m12-lens.jpg and b/documentation/asciidoc/accessories/camera/images/m12-lens.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/pico_wiring.jpg b/documentation/asciidoc/accessories/camera/images/pico_wiring.jpg
new file mode 100644
index 000000000..c26df7a84
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/pico_wiring.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/resistor.jpg b/documentation/asciidoc/accessories/camera/images/resistor.jpg
new file mode 100644
index 000000000..7d9fc1077
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/resistor.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_clear_filter.jpg b/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_clear_filter.jpg
index 00daf6b07..dc401f9ad 100644
Binary files a/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_clear_filter.jpg and b/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_clear_filter.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_gasket.jpg b/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_gasket.jpg
index b4cd564a4..572ca3167 100644
Binary files a/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_gasket.jpg and b/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_gasket.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_ir_filter.jpg b/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_ir_filter.jpg
index 8b786511b..ee09e5b4c 100644
Binary files a/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_ir_filter.jpg and b/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_ir_filter.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_sensor.jpg b/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_sensor.jpg
index d9792c2f3..40bb170bb 100644
Binary files a/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_sensor.jpg and b/documentation/asciidoc/accessories/camera/images/rpi_hq_cam_sensor.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/images/synchronous_camera_wiring.fzz b/documentation/asciidoc/accessories/camera/images/synchronous_camera_wiring.fzz
new file mode 100644
index 000000000..d17305956
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/synchronous_camera_wiring.fzz differ
diff --git a/documentation/asciidoc/accessories/camera/images/synchronous_camera_wiring.jpg b/documentation/asciidoc/accessories/camera/images/synchronous_camera_wiring.jpg
new file mode 100644
index 000000000..995a59882
Binary files /dev/null and b/documentation/asciidoc/accessories/camera/images/synchronous_camera_wiring.jpg differ
diff --git a/documentation/asciidoc/accessories/camera/lens.adoc b/documentation/asciidoc/accessories/camera/lens.adoc
index 343404fdd..ad461444f 100644
--- a/documentation/asciidoc/accessories/camera/lens.adoc
+++ b/documentation/asciidoc/accessories/camera/lens.adoc
@@ -1,6 +1,8 @@
== Recommended Lenses
-The following lenses are recommended for use with our HQ cameras.
+The following lenses are recommended for use with our HQ and GS cameras.
+
+NOTE: While the HQ Camera is available in both C/CS- and M12-mount versions, the GS Camera is available only with a C/CS-mount.
=== C/CS Lenses
@@ -12,14 +14,11 @@ We recommend two lenses, a 6mm wide angle lens and a 16mm telephoto lens. These
2+| Resolution | 10MP | 3MP
2+| Image format | 1" | 1/2"
-2+| Aperture | F1.4 to 1.6 | F1.2
+2+| Aperture | F1.4 to F16 | F1.2
2+| Mount | C | CS
-.4+| Field Angle
-| 1" | 44.6°× 33.6°
-.4+| 63°
-| 2/3" | 30.0°× 23.2°
-| 1/1.8" | 24.7°× 18.6°
-| 1/2" | 21.8°× 16.4°
+.2+| Field of View H°×V° (D°)
+| HQ | 22.2°×16.7° (27.8°)| 55°×45° (71°)
+| GS| 17.8°×13.4° (22.3) | 45°×34° (56°)
2+| Back focal length | 17.53mm | 7.53mm
2+| M.O.D. | 0.2m | 0.2m
2+| Dimensions | φ39.00×50.00mm | φ30×34mm
@@ -39,5 +38,5 @@ We recommend three lenses manufactured by https://www.gaojiaoptotech.com/[Gaojia
2+| Image format | 1/1.7" | 1/2" | 1/2.3"
2+| Aperture | F1.8 | F2.4 | F2.5
2+| Mount 3+| M12
-2+| Field of View (D/H/V) | 72.64°/57.12°/42.44° | 18.3°/14.7°/11.1° | 184.6°/140°/102.6°
+2+| HQ Field of View H°×V° (D°) | 49°×36° (62°) | 14.4°×10.9° (17.9)° | 140°×102.6° (184.6°)
|===
diff --git a/documentation/asciidoc/accessories/camera/synchronous_cameras.adoc b/documentation/asciidoc/accessories/camera/synchronous_cameras.adoc
new file mode 100644
index 000000000..9561864ff
--- /dev/null
+++ b/documentation/asciidoc/accessories/camera/synchronous_cameras.adoc
@@ -0,0 +1,108 @@
+== Synchronous Captures
+
+The High Quality (HQ) Camera supports synchronous captures.
+One camera (the "source") can be configured to generate a pulse on its XVS (Vertical Sync) pin when a frame capture is initiated.
+Other ("sink") cameras can listen for this pulse, and capture a frame at the same time as the source camera.
+
+This method is largely superseded by xref:../computers/camera_software.adoc#software-camera-synchronisation[software camera synchronisation] which can operate over long distances without additional wires and has sub-millisecond accuracy. But when cameras are physically close, wired synchronisation may be used.
+
+NOTE: Global Shutter (GS) Cameras can also be operated in a synchronous mode. However, the source camera will record one extra frame. Instead, for GS Cameras we recommend using an xref:camera.adoc#external-trigger-on-the-gs-camera[external trigger source]. You cannot synchronise a GS Camera and an HQ Camera.
+
+=== Connecting the cameras
+
+Solder a wire to the XVS test point of each camera, and connect them together.
+
+Solder a wire to the GND test point of each camera, and connect them together.
+
+*For GS Cameras only,* you will also need to connect the XHS (Horizontal Sync) test point of each camera together. On any GS Camera that you wish to act as a sink, bridge the two halves of the MAS pad with solder.
+
+NOTE: An earlier version of this document recommended an external pull-up for XVS. This is no longer recommended. Instead, ensure you have the latest version of Raspberry Pi OS and set the `always-on` property for all connected cameras.
+
+=== Driver configuration
+
+You will need to configure the camera drivers to keep their 1.8V power supplies on when not streaming, and optionally to select the source and sink roles.
+
+==== For the HQ Camera
+
+Edit `/boot/firmware/config.txt`. Change `camera_auto_detect=1` to `camera_auto_detect=0`.
+
+Append this line for a source camera:
+[source]
+----
+dtoverlay=imx477,always-on,sync-source
+----
+
+Or for a sink:
+[source]
+----
+dtoverlay=imx477,always-on,sync-sink
+----
+
+When using the CAM0 port on a Raspberry Pi 5, CM4 or CM5, append `,cam0` to that line without a space. If two cameras are on the same Raspberry Pi you will need two dtoverlay lines, only one of them ending with `,cam0`.
+
+Alternatively, if you wish to swap the cameras' roles at runtime (and they are not both connected to the same Raspberry Pi), omit `,sync-source` or `,sync-sink` above. Instead you can set a module parameter before starting each camera:
+
+For the Raspbery Pi with the source camera:
+[source,console]
+----
+$ echo 1 | sudo tee /sys/module/imx477/parameters/trigger_mode
+----
+
+For the Raspberry Pi with the sink camera:
+[source,console]
+----
+$ echo 2 | sudo tee /sys/module/imx477/parameters/trigger_mode
+----
+You will need to do this every time the system is booted.
+
+==== For the GS Camera
+
+Edit `/boot/firmware/config.txt`. Change `camera_auto_detect=1` to `camera_auto_detect=0`.
+
+For either a source or a sink, append this line:
+[source]
+----
+dtoverlay=imx296,always-on
+----
+When using the CAM0 port on a Raspberry Pi 5, CM4 or CM5, append `,cam0` to that line without a space. If two cameras are on the same Raspberry Pi you will need two dtoverlay lines, only one of them ending with `,cam0`.
+
+On the GS Camera, the sink role is enabled by the MAS pin and cannot be configured by software ("trigger_mode" and "sync-sink" relate to the xref:camera.adoc#external-trigger-on-the-gs-camera[external trigger method], and should _not_ be set for this method).
+
+=== Libcamera configuration
+
+If the cameras are not all started within 1 second, the `rpicam` applications can time out. To prevent this, you must edit a configuration file on any Raspberry Pi(s) with sink cameras.
+
+On Raspberry Pi 5 or CM5:
+[source,console]
+----
+$ cp /usr/share/libcamera/pipeline/rpi/pisp/example.yaml timeout.yaml
+----
+
+On other Raspberry Pi models:
+[source,console]
+----
+$ cp /usr/share/libcamera/pipeline/rpi/vc4/rpi_apps.yaml timeout.yaml
+----
+
+Now edit the copy. In both cases, delete the `#` (comment) from the `"camera_timeout_value_ms":` line, and change the number to `60000` (60 seconds).
+
+=== Starting the cameras
+
+Run the following commands to start the sink:
+
+[source,console]
+----
+$ export LIBCAMERA_RPI_CONFIG_FILE=timeout.yaml
+$ rpicam-vid --frames 300 --qt-preview -o sink.h264
+----
+
+Wait a few seconds, then run the following command to start the source:
+
+[source,console]
+----
+$ rpicam-vid --frames 300 --qt-preview -o source.h264
+----
+Frames should be synchronised. Use `--frames` to ensure the same number of frames are captured, and that the recordings are exactly the same length.
+Running the sink first ensures that no frames are missed.
+
+NOTE: When using the GS camera in synchronous mode, the sink will not record exactly the same number of frames as the source. **The source records one extra frame before the sink starts recording**. Because of this, you need to specify that the sink records one less frame with the `--frames` option.
diff --git a/documentation/asciidoc/accessories/display.adoc b/documentation/asciidoc/accessories/display.adoc
index 818837521..abfac0c01 100644
--- a/documentation/asciidoc/accessories/display.adoc
+++ b/documentation/asciidoc/accessories/display.adoc
@@ -1,6 +1,3 @@
include::display/display_intro.adoc[]
include::display/legacy.adoc[]
-
-include::display/troubleshooting.adoc[]
-
diff --git a/documentation/asciidoc/accessories/display/display_intro.adoc b/documentation/asciidoc/accessories/display/display_intro.adoc
index a676fe100..d61ea0398 100644
--- a/documentation/asciidoc/accessories/display/display_intro.adoc
+++ b/documentation/asciidoc/accessories/display/display_intro.adoc
@@ -1,139 +1,165 @@
== Raspberry Pi Touch Display
-The Raspberry Pi Touch Display is an LCD display which connects to the Raspberry Pi through the DSI connector. In some situations, it allows for the use of both the HDMI and LCD displays at the same time (this requires software support).
+The https://www.raspberrypi.com/products/raspberry-pi-touch-display/[Raspberry Pi Touch Display] is an LCD display that connects to a Raspberry Pi using a DSI connector and GPIO connector.
-=== Board Support
+.The Raspberry Pi 7-inch Touch Display
+image::images/display.png[The Raspberry Pi 7-inch Touch Display, width="70%"]
-The DSI display is designed to work with all models of Raspberry Pi, however early models that do not have mounting holes (the Raspberry Pi 1 Model A and B) will require additional mounting hardware to fit the HAT-dimensioned stand-offs on the display PCB.
+The Touch Display is compatible with all models of Raspberry Pi, except the Zero series and Keyboard series, which lack a DSI connector. The earliest Raspberry Pi models lack appropriate mounting holes, requiring additional mounting hardware to fit the stand-offs on the display PCB.
-=== Physical Installation
+The display has the following key features:
-The following image shows how to attach the Raspberry Pi to the back of the Touch Display (if required), and how to connect both the data (ribbon cable) and power (red/black wires) from the Raspberry Pi to the display. If you are not attaching the Raspberry Pi to the back of the display, take extra care when attaching the ribbon cable to ensure it is the correct way round. The black and red power wires should be attached to the GND and 5v pins respectively.
+* 800×480px RGB LCD display
+* 24-bit colour
+* Industrial quality: 140 degree viewing angle horizontal, 120 degree viewing angle vertical
+* 10-point multi-touch touchscreen
+* PWM backlight control and power control over I2C interface
+* Metal-framed back with mounting points for Raspberry Pi display conversion board and Raspberry Pi
+* Backlight lifetime: 20000 hours
+* Operating temperature: -20 to +70 degrees centigrade
+* Storage temperature: -30 to +80 degrees centigrade
+* Contrast ratio: 500
+* Average brightness: 250 cd/m^2^
+* Viewing angle (degrees):
+ ** Top - 50
+ ** Bottom - 70
+ ** Left - 70
+ ** Right - 70
+* Power requirements: 200mA at 5V typical, at maximum brightness.
+* Outer dimensions: 192.96 × 110.76mm
+* Viewable area: 154.08 × 85.92mm
-image::images/GPIO_power-500x333.jpg[DSI Display Connections]
-The other three pins should be left disconnected, unless connecting the display to an original Raspberry Pi 1 Model A or B. See the section on xref:display.adoc#legacy-support[legacy support] for more information on connecting the display to an original Raspberry Pi.
+=== Mount the Touch Display
-NOTE: An original Raspberry Pi can be easily identified from other models, it is the only model with a 26-pin rather than 40-pin GPIO header connector.
+You can mount a Raspberry Pi to the back of the Touch Display using its stand-offs and then connect the appropriate cables. You can also mount the Touch Display in a separate chassis if you have one available. The connections remain the same, though you may need longer cables depending on the chassis.
-=== Screen Orientation
+.A Raspberry Pi connected to the Touch Display
+image::images/GPIO_power-500x333.jpg[Image of Raspberry Pi connected to the Touch Display, width="70%"]
-LCD displays have an optimum viewing angle, and depending on how the screen is mounted it may be necessary to change the orientation of the display to give the best results. By default, the Raspberry Pi Touch Display and Raspberry Pi are set up to work best when viewed from slightly above, for example on a desktop. If viewing from below, you can physically rotate the display, and then tell the system software to compensate by running the screen upside down.
+Connect one end of the Flat Flexible Cable (FFC) to the `RPI-DISPLAY` port on the Touch Display PCB. The silver or gold contacts should face away from the display. Then connect the other end of the FFC to the `DISPLAY` port on the Raspberry Pi. The contacts on this end should face inward, towards the Raspberry Pi.
-==== KMS and FKMS Mode
+If the FFC is not fully inserted or positioned correctly, you will experience issues with the display. You should always double check this connection when troubleshooting, especially if you don't see anything on your display, or the display shows only a single colour.
-KMS and FKMS modes are used by default on the Raspberry Pi 4B. KMS and FKMS use the DRM/MESA libraries to provide graphics and 3D acceleration.
+NOTE: A https://datasheets.raspberrypi.com/display/7-inch-display-mechanical-drawing.pdf[mechanical drawing] of the Touch Display is available for download.
-To set screen orientation when running the graphical desktop, select the `Screen Configuration` option from the `Preferences` menu. Right click on the DSI display rectangle in the layout editor, select Orientation then the required option.
+=== Power the Touch Display
-To set screen orientation when in console mode, you will need to edit the kernel command line to pass the required orientation to the system.
+We recommend using the Raspberry Pi's GPIO to provide power to the Touch Display. Alternatively, you can power the display directly with a separate micro USB power supply.
-[,bash]
-----
-sudo nano /boot/cmdline.txt
-----
+==== Power from a Raspberry Pi
-To rotate by 90 degrees clockwise, add the following to the cmdline, making sure everything is on the same line, do not add any carriage returns. Possible rotation values are 0, 90, 180 and 270.
+To power the Touch Display using a Raspberry Pi, you need to connect two jumper wires between the 5V and `GND` pins on xref:../computers/raspberry-pi.adoc#gpio[Raspberry Pi's GPIO] and the 5V and `GND` pins on the display, as shown in the following illustration.
-----
-video=DSI-1:800x480@60,rotate=90
-----
+.The location of the display's 5V and `GND` pins
+image::images/display_plugs.png[Illustration of display pins, width="40%"]
-NOTE: In console mode it is not possible to rotate the DSI display separately to the HDMI display, so if you have both attached they must both be set to the same value.
+Before you begin, make sure the Raspberry Pi is powered off and not connected to any power source. Connect one end of the black jumper wire to pin six (`GND`) on the Raspberry Pi and one end of the red jumper wire to pin four (5V). If pin six isn't available, you can use any other open `GND` pin to connect the black wire. If pin four isn't available, you can use any other 5V pin to connect the red wire, such as pin two.
-==== Legacy Graphics Mode
+.The location of the Raspberry Pi headers
+image::images/pi_plugs.png[Illustration of Raspberry Pi headers, width="40%"]
-Legacy graphics mode is used by default on all Raspberry Pi models prior to the Raspberry Pi 4B, and can also be used on the Raspberry Pi 4B if required, by disabling KMS and FKMS modes by commenting out the KMS or FKMS line in `config.txt`.
+Next, connect the other end of the black wire to the `GND` pin on the display and the other end of the red wire to the 5V pin on the display. Once all the connections are made, you should see the Touch Display turn on the next time you turn on your Raspberry Pi.
-NOTE: Legacy mode on the Raspberry Pi 4B has no 3D acceleration so it should only be used if you have a specific reason for needing it.
+Use the other three pins on the Touch Display to connect the display to an original Raspberry Pi 1 Model A or B. Refer to our documentation on xref:display.adoc#legacy-support[legacy support] for more information.
-To flip the display, add the following line to the file `/boot/config.txt`:
+NOTE: To identify an original Raspberry Pi, check the GPIO header connector. Only the original model has a 26-pin GPIO header connector; subsequent models have 40 pins.
-`lcd_rotate=2`
+==== Power from a micro USB supply
-This will vertically flip the LCD and the touch screen, compensating for the physical orientation of the display.
+If you don't want to use a Raspberry Pi to provide power to the Touch Display, you can use a micro USB power supply instead. We recommend using the https://www.raspberrypi.com/products/micro-usb-power-supply/[Raspberry Pi 12.5W power supply] to make sure the display runs as intended.
-You can also rotate the display by adding the following to the `config.txt` file.
+Do not connect the GPIO pins on your Raspberry Pi to the display if you choose to use micro USB for power. The only connection between the two boards should be the Flat Flexible Cable.
-* `display_lcd_rotate=x`, where `x` can be one of the following:
+WARNING: When using a micro USB cable to power the display, mount it inside a chassis that blocks access to the display's PCB during usage.
-|===
-| display_lcd_rotate | result
+=== Use an on-screen keyboard
-| 0
-| no rotation
+Raspberry Pi OS _Bookworm_ and later include the Squeekboard on-screen keyboard by default. When a touch display is attached, the on-screen keyboard should automatically show when it is possible to enter text and automatically hide when it is not possible to enter text.
-| 1
-| rotate 90 degrees clockwise
+For applications which do not support text entry detection, use the keyboard icon at the right end of the taskbar to manually show and hide the keyboard.
-| 2
-| rotate 180 degrees clockwise
+You can also permanently show or hide the on-screen keyboard in the Display tab of Raspberry Pi Configuration or the `Display` section of `raspi-config`.
-| 3
-| rotate 270 degrees clockwise
+TIP: In Raspberry Pi OS releases prior to _Bookworm_, use `matchbox-keyboard` instead. If you use the wayfire desktop compositor, use `wvkbd` instead.
-| 0x10000
-| horizontal flip
+=== Change screen orientation
-| 0x20000
-| vertical flip
-|===
+If you want to physically rotate the display, or mount it in a specific position, select **Screen Configuration** from the **Preferences** menu. Right-click on the touch display rectangle (likely DSI-1) in the layout editor, select **Orientation**, then pick the best option to fit your needs.
+
+image::images/display-rotation.png[Screenshot of orientation options in screen configuration, width="80%"]
+
+==== Rotate screen without a desktop
+
+To set the screen orientation on a device that lacks a desktop environment, edit the `/boot/firmware/cmdline.txt` configuration file to pass an orientation to the system. Add the following line to `cmdline.txt`:
+
+[source,ini]
+----
+video=DSI-1:800x480@60,rotate=
+----
+
+Replace the `` placeholder with one of the following values, which correspond to the degree of rotation relative to the default on your display:
+
+* `0`
+* `90`
+* `180`
+* `270`
-NOTE: The 90 and 270 degree rotation options require additional memory on the GPU, so these will not work with the 16MB GPU split.
+For example, a rotation value of `90` rotates the display 90 degrees to the right. `180` rotates the display 180 degrees, or upside-down.
-=== Touchscreen Orientation
+NOTE: It is not possible to rotate the DSI display separately from the HDMI display with `cmdline.txt`. When you use DSI and HDMI simultaneously, they share the same rotation value.
-Additionally, you have the option to change the rotation of the touchscreen independently of the display itself by adding a `dtoverlay` instruction in `config.txt`, for example:
+==== Rotate touch input
-`dtoverlay=rpi-ft5406,touchscreen-swapped-x-y=1,touchscreen-inverted-x=1`
+WARNING: Rotating touch input via device tree can cause conflicts with your input library. Whenever possible, configure touch event rotation in your input library or desktop.
-The options for the touchscreen are:
+Rotation of touch input is independent of the orientation of the display itself. To change this you need to manually add a `dtoverlay` instruction in xref:../computers/config_txt.adoc[`/boot/firmware/config.txt`]. Add the following line at the end of `config.txt`:
+
+[source,ini]
+----
+dtoverlay=vc4-kms-dsi-7inch,invx,invy
+----
+
+Then, disable automatic display detection by removing the following line from `config.txt`, if it exists:
+
+[source,ini]
+----
+display_auto_detect=1
+----
+
+==== Touch Display device tree option reference
+
+The `vc4-kms-dsi-7inch` overlay supports the following options:
|===
| DT parameter | Action
-| touchscreen-size-x
+| `sizex`
| Sets X resolution (default 800)
-| touchscreen-size-y
-| Sets Y resolution (default 600)
+| `sizey`
+| Sets Y resolution (default 480)
-| touchscreen-inverted-x
+| `invx`
| Invert X coordinates
-| touchscreen-inverted-y
+| `invy`
| Invert Y coordinates
-| touchscreen-swapped-x-y
+| `swapxy`
| Swap X and Y coordinates
-|===
-=== Troubleshooting
-
-Read our troubleshooting steps, tips, and tricks here: xref:display.adoc#troubleshooting-the-display[Raspberry Pi Touch Display troubleshooting].
+| `disable_touch`
+| Disables the touch overlay totally
+|===
-=== Specifications
+To specify these options, add them, separated by commas, to your `dtoverlay` line in `/boot/firmware/config.txt`. Boolean values default to true when present, but you can set them to false using the suffix "=0". Integer values require a value, e.g. `sizey=240`. For instance, to set the X resolution to 400 pixels and invert both X and Y coordinates, use the following line:
-* 800×480 RGB LCD display
-* 24-bit colour
-* Industrial quality: 140-degree viewing angle horizontal, 130-degree viewing angle vertical
-* 10-point multi-touch touchscreen
-* PWM backlight control and power control over I2C interface
-* Metal-framed back with mounting points for Raspberry Pi display conversion board and Raspberry Pi
-* Backlight lifetime: 20000 hours
-* Operating temperature: -20 to +70 degrees centigrade
-* Storage temperature: -30 to +80 degrees centigrade
-* Contrast ratio: 500
-* Average brightness: 250 cd/m^2^
-* Viewing angle (degrees):
- ** Top - 50
- ** Bottom - 70
- ** Left - 70
- ** Right - 70
-* Power requirements: 200mA at 5V typical, at maximum brightness.
+[source,ini]
+----
+dtoverlay=vc4-kms-dsi-7inch,sizex=400,invx,invy
+----
-==== Mechanical Specification
+=== Installation on Compute Module based devices.
-* Outer dimensions: 192.96 × 110.76mm
-* Viewable area: 154.08 × 85.92mm
-* https://datasheets.raspberrypi.com/display/7-inch-display-mechanical-drawing.pdf[Download mechanical drawing (PDF)]
+All Raspberry Pi SBCs auto-detect the official Touch Displays as the circuitry connected to the DSI connector on the Raspberry Pi board is fixed; this autodetection ensures the correct Device Tree entries are passed to the kernel. However, Compute Modules are intended for industrial applications where the integrator can use any and all GPIOs and interfaces for whatever purposes they require. Autodetection is therefore not feasible, and hence is disabled on Compute Module devices. This means that the Device Tree fragments required to set up the display need to be loaded via some other mechanism, which can be either with a dtoverlay entry in config.txt as described above, via a custom base DT file, or if present, a HAT EEPROM.
\ No newline at end of file
diff --git a/documentation/asciidoc/accessories/display/images/display-rotation.png b/documentation/asciidoc/accessories/display/images/display-rotation.png
new file mode 100755
index 000000000..86eb3a10b
Binary files /dev/null and b/documentation/asciidoc/accessories/display/images/display-rotation.png differ
diff --git a/documentation/asciidoc/accessories/display/images/display.png b/documentation/asciidoc/accessories/display/images/display.png
new file mode 100644
index 000000000..dd7ae3361
Binary files /dev/null and b/documentation/asciidoc/accessories/display/images/display.png differ
diff --git a/documentation/asciidoc/accessories/display/images/display_plugs.png b/documentation/asciidoc/accessories/display/images/display_plugs.png
new file mode 100644
index 000000000..4e1fd5da9
Binary files /dev/null and b/documentation/asciidoc/accessories/display/images/display_plugs.png differ
diff --git a/documentation/asciidoc/accessories/display/images/pi_plugs.png b/documentation/asciidoc/accessories/display/images/pi_plugs.png
new file mode 100644
index 000000000..44f607d74
Binary files /dev/null and b/documentation/asciidoc/accessories/display/images/pi_plugs.png differ
diff --git a/documentation/asciidoc/accessories/display/legacy.adoc b/documentation/asciidoc/accessories/display/legacy.adoc
index 6d4d9f974..eab11d275 100644
--- a/documentation/asciidoc/accessories/display/legacy.adoc
+++ b/documentation/asciidoc/accessories/display/legacy.adoc
@@ -1,17 +1,14 @@
== Legacy Support
-NOTE: These instructions are for the original Raspberry Pi 1 Model A and B boards only. You can identify an original board as it is the only model with a 26-pin GPIO header, all other models have the now-standard 40-pin connector.
+WARNING: These instructions are for the original Raspberry Pi, Model A, and B, boards only. To identify an original Raspberry Pi, check the GPIO header connector. Only the original model has a 26-pin GPIO header connector; subsequent models have 40 pins.
-The DSI connector on the Raspberry Pi 1 Model A and B boards does not have the I2C connections required to talk to the touchscreen controller and DSI controller. You can work around this by using the additional set of jumper cables provided with the display kit to wire up the I2C bus on the GPIO pins to the display controller board.
+The DSI connector on both the Raspberry Pi 1 Model A and B boards does not have the I2C connections required to talk to the touchscreen controller and DSI controller. To work around this, use the additional set of jumper cables provided with the display kit. Connect SCL/SDA on the GPIO header to the horizontal pins marked SCL/SDA on the display board. Power the Model A/B via the GPIO pins using the jumper cables.
-Using the jumper cables, connect SCL/SDA on the GPIO header to the horizontal pins marked SCL/SDA on the display board. We also recommend that you power the Model A/B via the GPIO pins using the jumper cables.
+DSI display autodetection is disabled by default on these boards. To enable detection, add the following line to the xref:../computers/config_txt.adoc#what-is-config-txt[`/boot/firmware/config.txt`] file:
-For the GPIO header pinout, see http://pinout.xyz/[this diagram].
+[source,ini]
+----
+ignore_lcd=0
+----
-DSI display autodetection is disabled by default on these boards. To enable detection, add the following line to `/boot/config.txt`:
-
-`ignore_lcd=0`
-
-Power the setup via the `PWR IN` micro-USB connector on the display board. Do not power the setup via the Raspberry Pi's micro-USB port: the input polyfuse's maximum current rating will be exceeded as the display consumes approximately 400mA.
-
-NOTE: With the display connected to the GPIO I2C pins, the GPU will assume control of the respective I2C bus. The host operating system should not access this I2C bus, as simultaneous use of the bus by both the GPU and Linux will result in sporadic crashes.
+Power the setup via the `PWR IN` micro-USB connector on the display board. Do not power the setup via the Raspberry Pi's micro-USB port. This will exceed the input polyfuse's maximum current rating, since the display consumes approximately 400mA.
diff --git a/documentation/asciidoc/accessories/display/troubleshooting.adoc b/documentation/asciidoc/accessories/display/troubleshooting.adoc
deleted file mode 100644
index d743b3ec5..000000000
--- a/documentation/asciidoc/accessories/display/troubleshooting.adoc
+++ /dev/null
@@ -1,206 +0,0 @@
-== Troubleshooting the Display
-
-[discrete]
-=== Have you got a good power supply?
-
-Having intermittent problems, or seeing a little rainbow square in the top right corner? It is likely that you need a better power supply.
-
-We recommend our official 2.5A adapter because we know it works, but any good 2.5A supply should work.
-
-[discrete]
-=== Have you updated Raspberry Pi OS?
-
-If not, many problems will be solved by making sure your software is up-to date.
-
-You can undo any previous use of `rpi-update` and get your Raspberry Pi back to the latest stable software by connecting
-to a network and running:
-
-[,bash]
-----
-sudo apt update
-sudo apt install --reinstall libraspberrypi0 libraspberrypi-{bin,dev,doc} raspberrypi-bootloader
-sudo reboot
-----
-
-[discrete]
-=== My touchscreen doesn't work, or works intermittently
-
-* Make sure you've updated Raspberry Pi OS (see above for steps)
-* Check the smaller ribbon cable is seated properly
-
-If you want to make sure your Raspberry Pi has detected your touchscreen, try running:
-
-[,bash]
-----
-dmesg | grep -i ft5406
-----
-
-You should see a couple of lines that look like this:
-
-[,text]
-----
-[ 5.224267] rpi-ft5406 rpi_ft5406: Probing device
-[ 5.225960] input: FT5406 memory based driver as /devices/virtual/input/input3
-----
-
-A detected touchscreen will also cause the `fbheight` and `fbwidth` parameters in `/proc/cmdline` to equal 480 and 800 respectively (the resolution of the screen). You can verify this by running:
-
-----
-cat /proc/cmdline | grep bcm2708_fb
-----
-
-[discrete]
-=== My screen is upside-down!
-
-Depending on your display stand, you might find that the LCD display defaults to being upside-down. You can fix this by rotating it with `/boot/config.txt`.
-
-[,bash]
-----
-sudo nano /boot/config.txt
-----
-
-Then add:
-
-[,bash]
-----
-lcd_rotate=2
-----
-
-Hit `CTRL+X` and `y` to save. And finally:
-
-----
-sudo reboot
-----
-
-[discrete]
-=== My display fades out to weird patterns when I shutdown/reboot my Raspberry Pi
-
-Don't panic! This is perfectly normal.
-
-[discrete]
-=== My display is black
-
-* Make sure you've updated Raspberry Pi OS (see above for steps)
-* Check the ribbon cable between your Raspberry Pi and the LCD is properly seated
-* Make sure you have a SD card properly inserted into your Raspberry Pi
-
-[discrete]
-=== My display is white
-
-* Check the larger ribbon cable between the display and driver board is properly seated
-
-[discrete]
-=== Raspberry Pi OS says my screen is 752x448. Surely that's wrong?
-
-Yes, the screen should be 800x480. This is a result of overscan being enabled.
-
-Disable it by running raspi-config:
-
-[,bash]
-----
-sudo raspi-config
-----
-
-And then navigating to *Advanced Options* > *Overscan* and picking *Disable*.
-
-[discrete]
-=== My touchscreen isn't aligned correctly: my taps are slightly out
-
-This is probably also a side-effect of overscan being enabled, try the solution above.
-
-[discrete]
-=== Some windows are cut off at the bottom of the screen so I can't use them
-
-If some windows in X are cut off at the side/bottom of the screen, this is unfortunately a side-effect of developers assuming a minimum screen resolution of 1024x768 pixels.
-
-You can usually reveal hidden buttons and fields by;
-
-* right clicking on the edge or top of the window,
-* picking "move"
-* using the up arrow key to nudge the window up off the top of the screen
-
-If you don't have a mouse, see the right click fix below.
-
-=== Tips and Tricks
-
-==== How do I use multiple monitors?
-
-At the moment you can't use HDMI and the LCD together in the X desktop, but you can send the output of certain applications to one screen or the other.
-
-Omxplayer is one example. It has been modified to enable secondary display output.
-
-To start displaying a video onto the LCD display (assuming it is the default display) just type:
-
-[,bash]
-----
-omxplayer video.mkv
-----
-
-To start a second video onto the HDMI type:
-
-[,bash]
-----
-omxplayer --display=5 video.mkv
-----
-
-NOTE: You may need to increase the amount of memory allocated to the GPU to 128MB if the videos are 1080P. Adjust the gpu_mem value in `config.txt` for this. The Raspberry Pi headline figures are 1080P30 decode, so if you are using two 1080P clips it may not play correctly depending on the complexity of the videos.
-
-Display numbers are:
-
-* LCD: 4
-* TV/HDMI: 5
-* Auto select non-default display: 6
-
-==== How do I enable right click?
-
-You can emulate a right click with a setting change. Just:
-
-[,bash]
-----
-sudo nano /etc/X11/xorg.conf
-----
-
-Paste in:
-
-----
-Section "InputClass"
- Identifier "calibration"
- Driver "evdev"
- MatchProduct "FT5406 memory based driver"
-
- Option "EmulateThirdButton" "1"
- Option "EmulateThirdButtonTimeout" "750"
- Option "EmulateThirdButtonMoveThreshold" "30"
-EndSection
-----
-
-Hit `CTRL+X` and `y` to save. Then:
-
-[,bash]
-----
-sudo reboot
-----
-
-Once enabled, right click works by pressing and holding the touchscreen and will be activated after a short delay.
-
-==== How do I get an on-screen keyboard?
-
-===== Florence Virtual Keyboard
-
-Install with:
-
-[,bash]
-----
-sudo apt install florence
-----
-
-===== Matchbox Virtual Keyboard
-
-Install like so:
-
-[,bash]
-----
-sudo apt install matchbox-keyboard
-----
-
-And then find in *Accessories* > *Keyboard*.
diff --git a/documentation/asciidoc/accessories/keyboard-and-mouse/connecting-things.adoc b/documentation/asciidoc/accessories/keyboard-and-mouse/connecting-things.adoc
index 8078b92ac..a23011f5c 100644
--- a/documentation/asciidoc/accessories/keyboard-and-mouse/connecting-things.adoc
+++ b/documentation/asciidoc/accessories/keyboard-and-mouse/connecting-things.adoc
@@ -1,6 +1,6 @@
== Connecting it all Together
-This is the configuration we recommend for using your Raspberry Pi, official keyboard and hub, and official mouse together. The hub on the keyboard ensures easy access to USB drives, and the mouse’s cable is tidy, while being long enough to allow you to use the mouse left- or right-handed.
+This is the configuration we recommend for using your Raspberry Pi, official keyboard and hub, and official mouse together. The hub on the keyboard ensures easy access to USB drives, and the mouse's cable is tidy, while being long enough to allow you to use the mouse left- or right-handed.
image::images/everything.png[width="80%"]
diff --git a/documentation/asciidoc/accessories/keyboard-and-mouse/getting-started-keyboard.adoc b/documentation/asciidoc/accessories/keyboard-and-mouse/getting-started-keyboard.adoc
index fc690f669..364973807 100644
--- a/documentation/asciidoc/accessories/keyboard-and-mouse/getting-started-keyboard.adoc
+++ b/documentation/asciidoc/accessories/keyboard-and-mouse/getting-started-keyboard.adoc
@@ -2,7 +2,7 @@
Our official keyboard includes three host USB ports for connecting external devices, such as USB mice, USB drives, and other USB- controlled devices.
-The product’s micro USB port is for connection to the Raspberry Pi. Via the USB hub built into the keyboard, the Raspberry Pi controls, and provides power to, the three USB Type A ports.
+The product's micro USB port is for connection to the Raspberry Pi. Via the USB hub built into the keyboard, the Raspberry Pi controls, and provides power to, the three USB Type A ports.
image::images/back-of-keyboard.png[width="80%"]
diff --git a/documentation/asciidoc/accessories/m2-hat-plus.adoc b/documentation/asciidoc/accessories/m2-hat-plus.adoc
new file mode 100644
index 000000000..b9501e937
--- /dev/null
+++ b/documentation/asciidoc/accessories/m2-hat-plus.adoc
@@ -0,0 +1 @@
+include::m2-hat-plus/about.adoc[]
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/about.adoc b/documentation/asciidoc/accessories/m2-hat-plus/about.adoc
new file mode 100644
index 000000000..a3b033a28
--- /dev/null
+++ b/documentation/asciidoc/accessories/m2-hat-plus/about.adoc
@@ -0,0 +1,141 @@
+[[m2-hat-plus]]
+== About
+
+.The Raspberry Pi M.2 HAT+
+image::images/m2-hat-plus.jpg[width="80%"]
+
+The Raspberry Pi M.2 HAT+ M Key enables you to connect M.2 peripherals such as NVMe drives and other PCIe accessories to Raspberry Pi 5's PCIe interface.
+
+The M.2 HAT+ adapter board converts between the PCIe connector on Raspberry Pi 5 and a single M.2 M key edge connector. You can connect any device that uses the 2230 or 2242 form factors. The M.2 HAT+ can supply up to 3A of power.
+
+The M.2 HAT+ uses Raspberry Pi's https://datasheets.raspberrypi.com/hat/hat-plus-specification.pdf[HAT+ specification], which allows Raspberry Pi OS to automatically detect the HAT+ and any connected devices.
+
+The included threaded spacers provide ample room to fit the Raspberry Pi Active Cooler beneath an M.2 HAT+.
+
+The M.2 HAT+ is _only_ compatible with the https://www.raspberrypi.com/products/raspberry-pi-5-case/[Raspberry Pi Case for Raspberry Pi 5] _if you remove the lid and the included fan_.
+
+== Features
+
+* Single-lane PCIe 2.0 interface (500 MB/s peak transfer rate)
+* Supports devices that use the M.2 M key edge connector
+* Supports devices with the 2230 or 2242 form factor
+* Supplies up to 3A to connected M.2 devices
+* Power and activity LEDs
+* Conforms to the https://datasheets.raspberrypi.com/hat/hat-plus-specification.pdf[Raspberry Pi HAT+ specification]
+* Includes:
+** ribbon cable
+** 16mm GPIO stacking header
+** 4 threaded spacers
+** 8 screws
+** 1 knurled double-flanged drive attachment screw to secure and support the M.2 peripheral
+
+[[m2-hat-plus-installation]]
+== Install
+
+To use the Raspberry Pi M.2 HAT+, you will need:
+
+* a Raspberry Pi 5
+
+Each M.2 HAT+ comes with a ribbon cable, GPIO stacking header, and mounting hardware. Complete the following instructions to install your M.2 HAT+:
+
+. First, ensure that your Raspberry Pi runs the latest software. Run the following command to update:
++
+[source,console]
+----
+$ sudo apt update && sudo apt full-upgrade
+----
+
+. Next, xref:../computers/raspberry-pi.adoc#update-the-bootloader-configuration[ensure that your Raspberry Pi firmware is up-to-date]. Run the following command to see what firmware you're running:
++
+[source,console]
+----
+$ sudo rpi-eeprom-update
+----
++
+If you see December 6, 2023 or a later date, proceed to the next step. If you see a date earlier than December 6, 2023, run the following command to open the Raspberry Pi Configuration CLI:
++
+[source,console]
+----
+$ sudo raspi-config
+----
++
+Under `Advanced Options` > `Bootloader Version`, choose `Latest`. Then, exit `raspi-config` with `Finish` or the *Escape* key.
++
+Run the following command to update your firmware to the latest version:
++
+[source,console]
+----
+$ sudo rpi-eeprom-update -a
+----
++
+Then, reboot with `sudo reboot`.
+
+. Disconnect the Raspberry Pi from power before beginning installation.
+
+
+. The M.2 HAT+ is compatible with the Raspberry Pi 5 Active Cooler. If you have an Active Cooler, install it before installing the M.2 HAT+.
++
+--
+image::images/m2-hat-plus-installation-01.png[width="60%"]
+--
+. Install the spacers using four of the provided screws. Firmly press the GPIO stacking header on top of the Raspberry Pi GPIO pins; orientation does not matter as long as all pins fit into place. Disconnect the ribbon cable from the M.2 HAT+, and insert the other end into the PCIe port of your Raspberry Pi. Lift the ribbon cable holder from both sides, then insert the cable with the copper contact points facing inward, towards the USB ports. With the ribbon cable fully and evenly inserted into the PCIe port, push the cable holder down from both sides to secure the ribbon cable firmly in place.
++
+--
+image::images/m2-hat-plus-installation-02.png[width="60%"]
+--
+. Set the M.2 HAT+ on top of the spacers, and use the four remaining screws to secure it in place.
++
+--
+image::images/m2-hat-plus-installation-03.png[width="60%"]
+--
+. Insert the ribbon cable into the slot on the M.2 HAT+. Lift the ribbon cable holder from both sides, then insert the cable with the copper contact points facing up. With the ribbon cable fully and evenly inserted into the port, push the cable holder down from both sides to secure the ribbon cable firmly in place.
++
+--
+image::images/m2-hat-plus-installation-04.png[width="60%"]
+--
+. Remove the drive attachment screw by turning the screw counter-clockwise. Insert your M.2 SSD into the M.2 key edge connector, sliding the drive into the slot at a slight upward angle. Do not force the drive into the slot: it should slide in gently.
++
+--
+image::images/m2-hat-plus-installation-05.png[width="60%"]
+--
+. Push the notch on the drive attachment screw into the slot at the end of your M.2 drive. Push the drive flat against the M.2 HAT+, and insert the SSD attachment screw by turning the screw clockwise until the SSD feels secure. Do not over-tighten the screw.
++
+--
+image::images/m2-hat-plus-installation-06.png[width="60%"]
+--
+. Congratulations, you have successfully installed the M.2 HAT+. Connect your Raspberry Pi to power; Raspberry Pi OS will automatically detect the M.2 HAT+. If you use Raspberry Pi Desktop, you should see an icon representing the drive on your desktop. If you don't use a desktop, you can find the drive at `/dev/nvme0n1`. To make your drive automatically available for file access, consider xref:../computers/configuration.adoc#automatically-mount-a-storage-device[configuring automatic mounting].
++
+--
+image::images/m2-hat-plus-installation-07.png[width="60%"]
+--
+
+WARNING: Always disconnect your Raspberry Pi from power before connecting or disconnecting a device from the M.2 slot.
+
+== Boot from NVMe
+
+To boot from an NVMe drive attached to the M.2 HAT+, complete the following steps:
+
+. xref:../computers/getting-started.adoc#raspberry-pi-imager[Format your NVMe drive using Raspberry Pi Imager]. You can do this from your Raspberry Pi if you already have an SD card with a Raspberry Pi OS image.
+. Boot your Raspberry Pi into Raspberry Pi OS using an SD card or USB drive to alter the boot order in the persistent on-board EEPROM configuration.
+. In a terminal on your Raspberry Pi, run `sudo raspi-config` to open the Raspberry Pi Configuration CLI.
+. Under `Advanced Options` > `Boot Order`, choose `NVMe/USB boot`. Then, exit `raspi-config` with `Finish` or the *Escape* key.
+. Reboot your Raspberry Pi with `sudo reboot`.
+
+For more information, see xref:../computers/raspberry-pi.adoc#nvme-ssd-boot[NVMe boot].
+
+== Enable PCIe Gen 3
+
+WARNING: The Raspberry Pi 5 is not certified for Gen 3.0 speeds. PCIe Gen 3.0 connections may be unstable.
+
+To enable PCIe Gen 3 speeds, follow the instructions at xref:../computers/raspberry-pi.adoc#pcie-gen-3-0[enable PCIe Gen 3.0].
+
+== Schematics
+
+.Schematics for the Raspberry Pi M.2 HAT+
+image::images/m2-hat-plus-schematics.png[width="80%"]
+
+Schematics are also available as a https://datasheets.raspberrypi.com/m2-hat-plus/raspberry-pi-m2-hat-plus-schematics.pdf[PDF].
+
+== Product brief
+
+For more information about the M.2 HAT+, including mechanical specifications and operating environment limitations, see the https://datasheets.raspberrypi.com/m2-hat-plus/raspberry-pi-m2-hat-plus-product-brief.pdf[product brief].
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-01.png b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-01.png
new file mode 100644
index 000000000..89eda454c
Binary files /dev/null and b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-01.png differ
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-02.png b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-02.png
new file mode 100644
index 000000000..b11d07a45
Binary files /dev/null and b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-02.png differ
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-03.png b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-03.png
new file mode 100644
index 000000000..c11a504ee
Binary files /dev/null and b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-03.png differ
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-04.png b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-04.png
new file mode 100644
index 000000000..ae6e321dc
Binary files /dev/null and b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-04.png differ
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-05.png b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-05.png
new file mode 100644
index 000000000..0a93df849
Binary files /dev/null and b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-05.png differ
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-06.png b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-06.png
new file mode 100644
index 000000000..209ec6cbc
Binary files /dev/null and b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-06.png differ
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-07.png b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-07.png
new file mode 100644
index 000000000..238b75df8
Binary files /dev/null and b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-installation-07.png differ
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-schematics.png b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-schematics.png
new file mode 100644
index 000000000..5d0688fbd
Binary files /dev/null and b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus-schematics.png differ
diff --git a/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus.jpg b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus.jpg
new file mode 100644
index 000000000..30a05a30b
Binary files /dev/null and b/documentation/asciidoc/accessories/m2-hat-plus/images/m2-hat-plus.jpg differ
diff --git a/documentation/asciidoc/accessories/monitor.adoc b/documentation/asciidoc/accessories/monitor.adoc
new file mode 100644
index 000000000..b100eb439
--- /dev/null
+++ b/documentation/asciidoc/accessories/monitor.adoc
@@ -0,0 +1 @@
+include::monitor/monitor_intro.adoc[]
diff --git a/documentation/asciidoc/accessories/monitor/images/drill-hole-template.pdf b/documentation/asciidoc/accessories/monitor/images/drill-hole-template.pdf
new file mode 100644
index 000000000..1d77318e3
Binary files /dev/null and b/documentation/asciidoc/accessories/monitor/images/drill-hole-template.pdf differ
diff --git a/documentation/asciidoc/accessories/monitor/images/drill-hole-template.png b/documentation/asciidoc/accessories/monitor/images/drill-hole-template.png
new file mode 100644
index 000000000..a1553774a
Binary files /dev/null and b/documentation/asciidoc/accessories/monitor/images/drill-hole-template.png differ
diff --git a/documentation/asciidoc/accessories/monitor/images/mechanical-drawing.pdf b/documentation/asciidoc/accessories/monitor/images/mechanical-drawing.pdf
new file mode 100644
index 000000000..d74544841
Binary files /dev/null and b/documentation/asciidoc/accessories/monitor/images/mechanical-drawing.pdf differ
diff --git a/documentation/asciidoc/accessories/monitor/images/mechanical-drawing.png b/documentation/asciidoc/accessories/monitor/images/mechanical-drawing.png
new file mode 100644
index 000000000..41faee30b
Binary files /dev/null and b/documentation/asciidoc/accessories/monitor/images/mechanical-drawing.png differ
diff --git a/documentation/asciidoc/accessories/monitor/images/monitor-hero.png b/documentation/asciidoc/accessories/monitor/images/monitor-hero.png
new file mode 100644
index 000000000..dbaa5f56d
Binary files /dev/null and b/documentation/asciidoc/accessories/monitor/images/monitor-hero.png differ
diff --git a/documentation/asciidoc/accessories/monitor/images/no-hdmi.png b/documentation/asciidoc/accessories/monitor/images/no-hdmi.png
new file mode 100644
index 000000000..408ad418b
Binary files /dev/null and b/documentation/asciidoc/accessories/monitor/images/no-hdmi.png differ
diff --git a/documentation/asciidoc/accessories/monitor/images/no-valid-hdmi-signal-standby.png b/documentation/asciidoc/accessories/monitor/images/no-valid-hdmi-signal-standby.png
new file mode 100644
index 000000000..2c0312118
Binary files /dev/null and b/documentation/asciidoc/accessories/monitor/images/no-valid-hdmi-signal-standby.png differ
diff --git a/documentation/asciidoc/accessories/monitor/images/not-supported-resolution.png b/documentation/asciidoc/accessories/monitor/images/not-supported-resolution.png
new file mode 100644
index 000000000..533421738
Binary files /dev/null and b/documentation/asciidoc/accessories/monitor/images/not-supported-resolution.png differ
diff --git a/documentation/asciidoc/accessories/monitor/images/power-saving-mode.png b/documentation/asciidoc/accessories/monitor/images/power-saving-mode.png
new file mode 100644
index 000000000..106694ee1
Binary files /dev/null and b/documentation/asciidoc/accessories/monitor/images/power-saving-mode.png differ
diff --git a/documentation/asciidoc/accessories/monitor/monitor_intro.adoc b/documentation/asciidoc/accessories/monitor/monitor_intro.adoc
new file mode 100644
index 000000000..ae747671a
--- /dev/null
+++ b/documentation/asciidoc/accessories/monitor/monitor_intro.adoc
@@ -0,0 +1,119 @@
+== Raspberry Pi Monitor
+
+The https://www.raspberrypi.com/products/raspberry-pi-monitor/[Raspberry Pi Monitor] is a 15.6" 1920 × 1080p IPS LCD display that connects to a computer using an HDMI cable. The Monitor also requires a USB-C power source. For full brightness and volume range, this must be a USB-PD source capable of at least 1.5A of current.
+
+.The Raspberry Pi Monitor
+image::images/monitor-hero.png[The Raspberry Pi Monitor, width="100%"]
+
+The Monitor is compatible with all models of Raspberry Pi that support HDMI output.
+
+=== Controls
+
+The back of the Monitor includes the following controls:
+
+* a button that enters and exits Standby mode (indicated by the ⏻ (power) symbol)
+* buttons that increase and decrease display brightness (indicated by the 🔆 (sun) symbol)
+* buttons that increase and decrease speaker volume (indicated by the 🔈 (speaker) symbol)
+
+=== On screen display messages
+
+The on-screen display (OSD) may show the following messages:
+
+[cols="1a,6"]
+|===
+| Message | Description
+
+| image::images/no-hdmi.png[No HDMI signal detected]
+| No HDMI signal detected.
+
+| image::images/no-valid-hdmi-signal-standby.png[Standby mode]
+| The monitor will soon enter standby mode to conserve power.
+
+| image::images/not-supported-resolution.png[Unsupported resolution]
+| The output display resolution of the connected device is not supported.
+
+| image::images/power-saving-mode.png[Power saving mode]
+| The monitor is operating in Power Saving mode, with reduced brightness and volume, because the monitor is not connected to a power supply capable of delivering 1.5A of current or greater.
+|===
+
+Additionally, the OSD shows information about display brightness changes using the 🔆 (sun) symbol, and speaker volume level changes using the 🔈 (speaker) symbol. Both brightness and volume use a scale that ranges from 0 to 100.
+
+TIP: If you attempt to exit Standby mode when the display cannot detect an HDMI signal, the red LED beneath the Standby button will briefly light, but the display will remain in Standby mode.
+
+=== Position the Monitor
+
+Use the following approaches to position the Monitor:
+
+* Angle the Monitor on the integrated stand.
+* Mount the Monitor on an arm or stand using the four VESA mount holes on the back of the red rear plastic housing.
++
+IMPORTANT: Use spacers to ensure adequate space for display and power cable egress.
+* Flip the integrated stand fully upwards, towards the top of the monitor. Use the drill hole template to create two mounting points spaced 55mm apart. Hang the Monitor using the slots on the back of the integrated stand.
++
+.Drill hole template
+image::images/drill-hole-template.png[Drill hole template, width="40%"]
+
+=== Power the Monitor
+
+The Raspberry Pi Monitor draws power from a 5V https://en.wikipedia.org/wiki/USB_hardware#USB_Power_Delivery[USB Power Delivery] (USB-PD) power source. Many USB-C power supplies, including the official power supplies for the Raspberry Pi 4 and Raspberry Pi 5, support this standard.
+
+When using a power source that provides at least 1.5A of current over USB-PD, the Monitor operates in **Full Power mode**. In Full Power mode, you can use the full range (0%-100%) of display brightness and speaker volume.
+
+When using a power source that does _not_ supply at least 1.5A of current over USB-PD (including all USB-A power sources), the Monitor operates in **Power Saving mode**. Power Saving mode limits the maximum display brightness and the maximum speaker volume to ensure reliable operation. In Power Saving mode, you can use a limited range (0-50%) of display brightness and a limited range (0-60%) of speaker volume. When powered from a Raspberry Pi, the Monitor operates in Power Saving mode, since Raspberry Pi devices cannot provide 1.5A of current over a USB-A connection.
+
+To switch from Power Saving mode to Full Power mode, press and hold the *increase brightness* button for 3 seconds.
+
+[TIP]
+====
+If the Monitor flashes on and off, your USB power supply is not capable of providing sufficient current to power the monitor. This can happen if you power the Monitor from a Raspberry Pi 5 or Pi 500 which is itself powered by a 5V/3A power supply. Try the following fixes to stop the Monitor from flashing on and off:
+
+* reduce the display brightness and volume (you may have to connect your monitor to another power supply to access the settings)
+* switch to a different power source or cable
+
+====
+
+=== Specification
+
+Diagonal: 15.6"
+
+Resolution: 1920 × 1080
+
+Type: IPS LCD
+
+Colour gamut: 45%
+
+Contrast: 800:1
+
+Brightness: 250cd/m^2^
+
+Screen coating: Anti-glare 3H hardness
+
+Display area: 344 × 193mm
+
+Dimensions: 237 × 360 × 20mm
+
+Weight: 850g
+
+Supported resolutions:
+
+* 1920 × 1080p @ 50/60Hz
+* 1280 × 720p @ 50/60Hz
+* 720 × 576p @ 50/60Hz
+* 720 × 480p @ 50/60Hz
+* 640 × 480p @ 50/60Hz
+
+Input: HDMI 1.4; supports DDC-CI
+
+Power input: USB-C; requires 1.5A over USB-PD at 5V for full brightness and volume range
+
+Power consumption: 4.5-6.5W during use; < 0.1W at idle
+
+Speakers: 2 × 1.2W (stereo)
+
+Ports: 3.5mm audio jack
+
+
+=== Mechanical drawing
+
+.Mechanical Drawing
+image::images/mechanical-drawing.png[Mechanical drawing, width="80%"]
diff --git a/documentation/asciidoc/accessories/sd-cards.adoc b/documentation/asciidoc/accessories/sd-cards.adoc
new file mode 100644
index 000000000..ffdb0161a
--- /dev/null
+++ b/documentation/asciidoc/accessories/sd-cards.adoc
@@ -0,0 +1 @@
+include::sd-cards/about.adoc[]
diff --git a/documentation/asciidoc/accessories/sd-cards/about.adoc b/documentation/asciidoc/accessories/sd-cards/about.adoc
new file mode 100644
index 000000000..1d8f41170
--- /dev/null
+++ b/documentation/asciidoc/accessories/sd-cards/about.adoc
@@ -0,0 +1,37 @@
+== About
+
+.A Raspberry Pi SD Card inserted into a Raspberry Pi 5
+image::images/sd-hero.jpg[width="80%"]
+
+SD card quality is a critical factor in determining the overall user experience for a Raspberry Pi. Slow bus speeds and lack of command queueing can reduce the performance of even the most powerful Raspberry Pi models.
+
+Raspberry Pi's official microSD cards support DDR50 and SDR104 bus speeds. Additionally, Raspberry Pi SD cards support the command queueing (CQ) extension, which permits some pipelining of random read operations, ensuring optimal performance.
+
+You can even buy Raspberry Pi SD cards pre-programmed with the latest version of Raspberry Pi OS.
+
+Raspberry Pi SD cards are available in the following sizes:
+
+* 32GB
+* 64GB
+* 128GB
+
+== Specifications
+
+.A 128GB Raspberry Pi SD Card
+image::images/sd-cards.png[width="80%"]
+
+Raspberry Pi SD cards use the SD6.1 SD specification.
+
+Raspberry Pi SD cards use the microSDHC/microSDXC form factor.
+
+Raspberry Pi SD cards have the following Speed Class ratings: C10, U3, V30, A2.
+
+The following table describes the read and write speeds of Raspberry Pi SD cards using 4KB of random data:
+
+|===
+| Raspberry Pi Model | Interface | Read Speed | Write Speed
+
+| 4 | DDR50 | 3,200 IOPS | 1,200 IOPS
+| 5 | SDR104 | 5,000 IOPS | 2,000 IOPS
+|===
+
diff --git a/documentation/asciidoc/accessories/sd-cards/images/sd-cards.png b/documentation/asciidoc/accessories/sd-cards/images/sd-cards.png
new file mode 100644
index 000000000..9651ba959
Binary files /dev/null and b/documentation/asciidoc/accessories/sd-cards/images/sd-cards.png differ
diff --git a/documentation/asciidoc/accessories/sd-cards/images/sd-hero.jpg b/documentation/asciidoc/accessories/sd-cards/images/sd-hero.jpg
new file mode 100644
index 000000000..759745039
Binary files /dev/null and b/documentation/asciidoc/accessories/sd-cards/images/sd-hero.jpg differ
diff --git a/documentation/asciidoc/accessories/sense-hat.adoc b/documentation/asciidoc/accessories/sense-hat.adoc
index 1f0294676..c0db67f2b 100644
--- a/documentation/asciidoc/accessories/sense-hat.adoc
+++ b/documentation/asciidoc/accessories/sense-hat.adoc
@@ -1,6 +1,5 @@
-
include::sense-hat/intro.adoc[]
-include::sense-hat/software.adoc[]
-
include::sense-hat/hardware.adoc[]
+
+include::sense-hat/software.adoc[]
diff --git a/documentation/asciidoc/accessories/sense-hat/hardware.adoc b/documentation/asciidoc/accessories/sense-hat/hardware.adoc
index cf5c6272e..735ce713a 100644
--- a/documentation/asciidoc/accessories/sense-hat/hardware.adoc
+++ b/documentation/asciidoc/accessories/sense-hat/hardware.adoc
@@ -1,4 +1,4 @@
-== Sense HAT hardware
+== Features
The Sense HAT has an 8×8 RGB LED matrix and a five-button joystick, and includes the following sensors:
@@ -10,149 +10,16 @@ The Sense HAT has an 8×8 RGB LED matrix and a five-button joystick, and include
* Humidity
* Colour and brightness
-Schematics and mechanical drawings for the Sense HAT are available for download.
+Schematics and mechanical drawings for the Sense HAT and the Sense HAT V2 are available for download.
-* https://datasheets.raspberrypi.com/sense-hat/sense-hat-schematics.pdf[Sense HAT schematics].
+* https://datasheets.raspberrypi.com/sense-hat/sense-hat-schematics.pdf[Sense HAT V1 schematics].
+* https://datasheets.raspberrypi.com/sense-hat/sense-hat-v2-schematics.pdf[Sense HAT V2 schematics].
* https://datasheets.raspberrypi.com/sense-hat/sense-hat-mechanical-drawing.pdf[Sense HAT mechanical drawings].
=== LED matrix
-The LED matrix is an RGB565 https://www.kernel.org/doc/Documentation/fb/framebuffer.txt[framebuffer] with the id "RPi-Sense FB". The appropriate device node can be written to as a standard file or mmap-ed. The included 'snake' example shows how to access the framebuffer.
+The LED matrix is an RGB565 https://www.kernel.org/doc/Documentation/fb/framebuffer.txt[framebuffer] with the id `RPi-Sense FB`. The appropriate device node can be written to as a standard file or mmap-ed. The included snake example shows how to access the framebuffer.
=== Joystick
-The joystick comes up as an input event device named "Raspberry Pi Sense HAT Joystick", mapped to the arrow keys and `Enter`. It should be supported by any library which is capable of handling inputs, or directly through the https://www.kernel.org/doc/Documentation/input/input.txt[evdev interface]. Suitable libraries include SDL, http://www.pygame.org/docs/[pygame] and https://python-evdev.readthedocs.org/en/latest/[python-evdev]. The included 'snake' example shows how to access the joystick directly.
-
-== Hardware calibration
-
-Install the necessary software and run the calibration program as follows:
-
-[,bash]
-----
-$ sudo apt update
-$ sudo apt install octave -y
-$ cd
-$ cp /usr/share/librtimulib-utils/RTEllipsoidFit ./ -a
-$ cd RTEllipsoidFit
-$ RTIMULibCal
-----
-
-You will then see this menu:
-
-----
-Options are:
-
- m - calibrate magnetometer with min/max
- e - calibrate magnetometer with ellipsoid (do min/max first)
- a - calibrate accelerometers
- x - exit
-
-Enter option:
-----
-
-Press lowercase `m`. The following message will then show. Press any key to start.
-
-----
- Magnetometer min/max calibration
- --------------------------------
- Waggle the IMU chip around, ensuring that all six axes
- (+x, -x, +y, -y and +z, -z) go through their extrema.
- When all extrema have been achieved, enter 's' to save, 'r' to reset
- or 'x' to abort and discard the data.
-
- Press any key to start...
-----
-
-After it starts, you will see something similar to this scrolling up the screen:
-
-----
- Min x: 51.60 min y: 69.39 min z: 65.91
- Max x: 53.15 max y: 70.97 max z: 67.97
-----
-
-Focus on the two lines at the very bottom of the screen, as these are the most recently posted measurements from the program.
-
-Now, pick up the Raspberry Pi and Sense HAT and move it around in every possible way you can think of. It helps if you unplug all non-essential cables to avoid clutter.
-
-Try and get a complete circle in each of the pitch, roll and yaw axes. Take care not to accidentally eject the SD card while doing this. Spend a few minutes moving the Sense HAT, and stop when you find that the numbers are not changing anymore.
-
-Now press lowercase `s` then lowercase `x` to exit the program. If you run the `ls` command now, you'll see a new `RTIMULib.ini` file has been created.
-
-In addition to those steps, you can also do the ellipsoid fit by performing the steps above, but pressing `e` instead of `m`.
-
-When you're done, copy the resulting `RTIMULib.ini` to /etc/ and remove the local copy in `~/.config/sense_hat/`:
-
-[,bash]
-----
-$ rm ~/.config/sense_hat/RTIMULib.ini
-$ sudo cp RTIMULib.ini /etc
-----
-
-== Reading and writing EEPROM data
-
-Enable I2C0 and I2C1 by adding the following line to `/boot/config.txt`:
-
-----
- dtparam=i2c_vc=on
- dtparam=i2c_arm=on
-----
-
-Enter the following command to reboot:
-
-[,bash]
-----
- sudo systemctl reboot
-----
-
-Download and build the flash tool:
-
-[,bash]
-----
-$ git clone https://github.com/raspberrypi/hats.git
-$ cd hats/eepromutils
-$ make
-----
-
-NOTE: These steps may not work on Raspberry Pi 2 Model B Rev 1.0 and Raspberry Pi 3 Model B boards. The firmware will take control of I2C0, causing the ID pins to be configured as inputs.
-
-=== Reading
-
-EEPROM data can be read with the following command:
-
-[,bash]
-----
-$ sudo ./eepflash.sh -f=sense_read.eep -t=24c32 -r
-----
-
-=== Writing
-
-Download EEPROM settings and build the `.eep` binary:
-
-[,bash]
-----
-$ wget https://github.com/raspberrypi/rpi-sense/raw/master/eeprom/eeprom_settings.txt -O sense_eeprom.txt
- ./eepmake sense_eeprom.txt sense.eep /boot/overlays/rpi-sense-overlay.dtb
-----
-
-Disable write protection:
-
-[,bash]
-----
-$ i2cset -y -f 1 0x46 0xf3 1
-----
-
-Write the EEPROM data:
-
-[,bash]
-----
-$ sudo ./eepflash.sh -f=sense.eep -t=24c32 -w
-----
-
-Re-enable write protection:
-
-[,bash]
-----
- i2cset -y -f 1 0x46 0xf3 0
-----
-
-WARNING: This operation will not damage your Raspberry Pi or Sense Hat, but if an error occurs, the HAT may no longer be automatically detected. The steps above are provided for debugging purposes only.
+The joystick comes up as an input event device named `Raspberry Pi Sense HAT Joystick`, mapped to the arrow keys and **Enter**. It should be supported by any library which is capable of handling inputs, or directly through the https://www.kernel.org/doc/Documentation/input/input.txt[evdev interface]. Suitable libraries include SDL, http://www.pygame.org/docs/[pygame] and https://python-evdev.readthedocs.org/en/latest/[python-evdev]. The included `snake` example shows how to access the joystick directly.
diff --git a/documentation/asciidoc/accessories/sense-hat/images/Sense-HAT.jpg b/documentation/asciidoc/accessories/sense-hat/images/Sense-HAT.jpg
index ef74aa37a..e1eebd815 100644
Binary files a/documentation/asciidoc/accessories/sense-hat/images/Sense-HAT.jpg and b/documentation/asciidoc/accessories/sense-hat/images/Sense-HAT.jpg differ
diff --git a/documentation/asciidoc/accessories/sense-hat/intro.adoc b/documentation/asciidoc/accessories/sense-hat/intro.adoc
index ebf2c15c5..01f8a2425 100644
--- a/documentation/asciidoc/accessories/sense-hat/intro.adoc
+++ b/documentation/asciidoc/accessories/sense-hat/intro.adoc
@@ -1,9 +1,9 @@
-== Introducing the Sense HAT
+== About
-The https://www.raspberrypi.com/products/sense-hat/[Raspberry Pi Sense HAT] is an add-on board that gives your Raspberry Pi an array of sensing capabilities. The on-board sensors allow you to monitor pressure, humidity, temperature, colour, orientation, and movement. The bright 8×8 RGB LED matrix allows you to visualise data from the sensors, and the five-button joystick lets users interact with your projects.
+The https://www.raspberrypi.com/products/sense-hat/[Raspberry Pi Sense HAT] is an add-on board that gives your Raspberry Pi an array of sensing capabilities. The on-board sensors allow you to monitor pressure, humidity, temperature, colour, orientation, and movement. The 8×8 RGB LED matrix allows you to visualise data from the sensors. The five-button joystick lets users interact with your projects.
image::images/Sense-HAT.jpg[width="70%"]
-The Sense HAT was originally developed for use on the International Space Station, as part of the educational https://astro-pi.org/[Astro Pi] programme run by the https://raspberrypi.org[Raspberry Pi Foundation] in partnership with the https://www.esa.int/[European Space Agency]. It is well suited to many projects that require position, motion, orientation, or environmental sensing. The Sense HAT is powered by the Raspberry Pi computer to which it is connected.
+The Sense HAT was originally developed for use on the International Space Station as part of the educational https://astro-pi.org/[Astro Pi] programme run by the https://raspberrypi.org[Raspberry Pi Foundation] in partnership with the https://www.esa.int/[European Space Agency]. It can help with any project that requires position, motion, orientation, or environmental sensing.
-An officially supported xref:sense-hat.adoc#using-the-sense-hat-with-python[Python library] provides access to all of the on-board sensors, the LED matrix, and the joystick. The Sense HAT is compatible with any Raspberry Pi computer with a 40-pin GPIO header.
+An officially supported xref:sense-hat.adoc#use-the-sense-hat-with-python[Python library] provides access to the on-board sensors, LED matrix, and joystick. The Sense HAT is compatible with any Raspberry Pi device with a 40-pin GPIO header.
diff --git a/documentation/asciidoc/accessories/sense-hat/software.adoc b/documentation/asciidoc/accessories/sense-hat/software.adoc
index ead3bc605..33261939a 100644
--- a/documentation/asciidoc/accessories/sense-hat/software.adoc
+++ b/documentation/asciidoc/accessories/sense-hat/software.adoc
@@ -1,51 +1,191 @@
-== Installation
+== Install
-In order to work correctly, the Sense HAT requires an up-to-date kernel, I2C to be enabled, and a few libraries to get started.
+In order to work correctly, the Sense HAT requires:
-Ensure your APT package list is up-to-date:
+* an up-to-date kernel
+* https://en.wikipedia.org/wiki/I%C2%B2C[I2C] enabled on your Raspberry Pi
+* a few dependencies
-[,bash]
+Complete the following steps to get your Raspberry Pi device ready to connect to the Sense HAT:
+
+. First, ensure that your Raspberry Pi runs the latest software. Run the following command to update:
++
+[source,console]
----
- sudo apt update
+$ sudo apt update && sudo apt full-upgrade
----
-Next, install the sense-hat package, which will ensure the kernel is up to date, enable I2C, and install the necessary libraries and programs:
+. Next, install the `sense-hat` package, which will ensure the kernel is up to date, enable I2C, and install the necessary dependencies:
++
+[source,console]
+----
+$ sudo apt install sense-hat
+----
-[,bash]
+. Finally, reboot your Raspberry Pi to enable I2C and load the new kernel, if it changed:
++
+[source,console]
----
- sudo apt install sense-hat
+$ sudo reboot
----
-Finally, a reboot may be required if I2C was disabled or the kernel was not up-to-date prior to the install:
+== Calibrate
+
+Install the necessary software and run the calibration program as follows:
-[,bash]
+[source,console]
----
- sudo reboot
+$ sudo apt update
+$ sudo apt install octave -y
+$ cd
+$ cp /usr/share/librtimulib-utils/RTEllipsoidFit ./ -a
+$ cd RTEllipsoidFit
+$ RTIMULibCal
----
-== Getting started
+The calibration program displays the following menu:
-[.float-group]
---
-image::images/experiment-with-the-sense-hat.png[role="related thumb right",link=https://github.com/raspberrypipress/released-pdfs/raw/main/experiment-with-the-sense-hat.pdf]
-After installation, example code can be found under `/usr/src/sense-hat/examples`.
+----
+Options are:
+
+ m - calibrate magnetometer with min/max
+ e - calibrate magnetometer with ellipsoid (do min/max first)
+ a - calibrate accelerometers
+ x - exit
+
+Enter option:
+----
+
+Press lowercase `m`. The following message will then show. Press any key to start.
+
+----
+Magnetometer min/max calibration
+-------------------------------
+Waggle the IMU chip around, ensuring that all six axes
+(+x, -x, +y, -y and +z, -z) go through their extrema.
+When all extrema have been achieved, enter 's' to save, 'r' to reset
+or 'x' to abort and discard the data.
+
+Press any key to start...
+----
+
+After it starts, you should see output similar to the following scrolling up the screen:
+
+----
+Min x: 51.60 min y: 69.39 min z: 65.91
+Max x: 53.15 max y: 70.97 max z: 67.97
+----
+
+Focus on the two lines at the very bottom of the screen, as these are the most recently posted measurements from the program.
+
+Now, pick up the Raspberry Pi and Sense HAT and move it around in every possible way you can think of. It helps if you unplug all non-essential cables to avoid clutter.
+
+Try and get a complete circle in each of the pitch, roll and yaw axes. Take care not to accidentally eject the SD card while doing this. Spend a few minutes moving the Sense HAT, and stop when you find that the numbers are not changing any more.
+
+Now press lowercase `s` then lowercase `x` to exit the program. If you run the `ls` command now, you'll see a new `RTIMULib.ini` file has been created.
+
+In addition to those steps, you can also do the ellipsoid fit by performing the steps above, but pressing `e` instead of `m`.
+
+When you're done, copy the resulting `RTIMULib.ini` to `/etc/` and remove the local copy in `~/.config/sense_hat/`:
+[source,console]
+----
+$ rm ~/.config/sense_hat/RTIMULib.ini
+$ sudo cp RTIMULib.ini /etc
+----
-You can find more information on how to use the Sense HAT in the Raspberry Pi Press book https://github.com/raspberrypipress/released-pdfs/raw/main/experiment-with-the-sense-hat.pdf[Experiment with the Sense HAT]. Written by The Raspberry Pi Foundation's Education Team, it is part of the MagPi Essentials series published by Raspberry Pi Press. The book covers the background of the Astro Pi project, and walks you through how to make use of all the Sense HAT features using the xref:sense-hat.adoc#using-the-sense-hat-with-python[Python library].
+== Getting started
-You can download this book as a PDF file for free, it has been released under a Creative Commons https://creativecommons.org/licenses/by-nc-sa/3.0/[Attribution-NonCommercial-ShareAlike] 3.0 Unported (CC BY NC-SA) licence.
---
+After installation, example code can be found under `/usr/src/sense-hat/examples`.
-=== Using the Sense HAT with Python
+=== Use the Sense HAT with Python
`sense-hat` is the officially supported library for the Sense HAT; it provides access to all of the on-board sensors and the LED matrix.
-Complete documentation for the library can be found at https://pythonhosted.org/sense-hat/[pythonhosted.org/sense-hat].
+Complete documentation for the library can be found at https://sense-hat.readthedocs.io/en/latest/[sense-hat.readthedocs.io].
-=== Using the Sense HAT with {cpp}
+=== Use the Sense HAT with C++
https://github.com/RPi-Distro/RTIMULib[RTIMULib] is a {cpp} and Python library that makes it easy to use 9-dof and 10-dof IMUs with embedded Linux systems. A pre-calibrated settings file is provided in `/etc/RTIMULib.ini`, which is also copied and used by `sense-hat`. The included examples look for `RTIMULib.ini` in the current working directory, so you may wish to copy the file there to get more accurate data.
The RTIMULibDrive11 example comes pre-compiled to help ensure everything works as intended. It can be launched by running `RTIMULibDrive11` and closed by pressing `Ctrl C`.
NOTE: The C/{cpp} examples can be compiled by running `make` in the appropriate directory.
+
+== Troubleshooting
+
+=== Read and write EEPROM data
+
+These steps are provided for debugging purposes only.
+
+NOTE: On Raspberry Pi 2 Model B Rev 1.0 and Raspberry Pi 3 Model B boards, these steps may not work. The firmware will take control of I2C0, causing the ID pins to be configured as inputs.
+
+Before you can read and write EEPROM data to and from the Sense HAT, you must complete the following steps:
+
+. Enable I2C0 and I2C1 by adding the following line to the xref:../computers/config_txt.adoc#what-is-config-txt[`/boot/firmware/config.txt`] file:
++
+[source,ini]
+----
+dtparam=i2c_vc=on
+dtparam=i2c_arm=on
+----
+
+. Run the following command to reboot:
++
+[source,console]
+----
+$ sudo reboot
+----
+
+. Download and build the flash tool:
++
+[source,console]
+----
+$ git clone https://github.com/raspberrypi/hats.git
+$ cd hats/eepromutils
+$ make
+----
+
+==== Read
+
+To read EEPROM data, run the following command:
+
+[source,console]
+----
+$ sudo ./eepflash.sh -f=sense_read.eep -t=24c32 -r
+----
+
+==== Write
+
+NOTE: This operation will not damage your Raspberry Pi or Sense HAT, but if an error occurs, your Raspberry Pi may fail to automatically detect the HAT.
+
+
+. First, download EEPROM settings and build the `.eep` binary:
++
+[source,console]
+----
+$ wget https://github.com/raspberrypi/rpi-sense/raw/master/eeprom/eeprom_settings.txt -O sense_eeprom.txt
+$ ./eepmake sense_eeprom.txt sense.eep /boot/firmware/overlays/rpi-sense-overlay.dtb
+----
+
+. Next, disable write protection:
++
+[source,console]
+----
+$ i2cset -y -f 1 0x46 0xf3 1
+----
+
+. Write the EEPROM data:
++
+[source,console]
+----
+$ sudo ./eepflash.sh -f=sense.eep -t=24c32 -w
+----
+
+. Finally, re-enable write protection:
++
+[source,console]
+----
+$ i2cset -y -f 1 0x46 0xf3 0
+----
+
diff --git a/documentation/asciidoc/accessories/ssd-kit.adoc b/documentation/asciidoc/accessories/ssd-kit.adoc
new file mode 100644
index 000000000..2533220b5
--- /dev/null
+++ b/documentation/asciidoc/accessories/ssd-kit.adoc
@@ -0,0 +1 @@
+include::ssd-kit/about.adoc[]
diff --git a/documentation/asciidoc/accessories/ssd-kit/about.adoc b/documentation/asciidoc/accessories/ssd-kit/about.adoc
new file mode 100644
index 000000000..390aef6d3
--- /dev/null
+++ b/documentation/asciidoc/accessories/ssd-kit/about.adoc
@@ -0,0 +1,13 @@
+== About
+
+.A 512GB Raspberry Pi SSD Kit
+image::images/ssd-kit.png[width="80%"]
+
+The Raspberry Pi SSD Kit bundles a xref:../accessories/m2-hat-plus.adoc[Raspberry Pi M.2 HAT+] with a xref:../accessories/ssds.adoc[Raspberry Pi SSD].
+
+The Raspberry Pi SSD Kit includes a 16mm stacking header, spacers, and
+screws to enable fitting on Raspberry Pi 5 alongside a Raspberry Pi Active Cooler.
+
+== Install
+
+To install the Raspberry Pi SSD Kit, follow the xref:../accessories/m2-hat-plus.adoc#m2-hat-plus-installation[installation instructions for the Raspberry Pi M.2 HAT+].
diff --git a/documentation/asciidoc/accessories/ssd-kit/images/ssd-kit.png b/documentation/asciidoc/accessories/ssd-kit/images/ssd-kit.png
new file mode 100644
index 000000000..9381c5ca1
Binary files /dev/null and b/documentation/asciidoc/accessories/ssd-kit/images/ssd-kit.png differ
diff --git a/documentation/asciidoc/accessories/ssds.adoc b/documentation/asciidoc/accessories/ssds.adoc
new file mode 100644
index 000000000..3934f0db6
--- /dev/null
+++ b/documentation/asciidoc/accessories/ssds.adoc
@@ -0,0 +1 @@
+include::ssds/about.adoc[]
diff --git a/documentation/asciidoc/accessories/ssds/about.adoc b/documentation/asciidoc/accessories/ssds/about.adoc
new file mode 100644
index 000000000..abccf00e9
--- /dev/null
+++ b/documentation/asciidoc/accessories/ssds/about.adoc
@@ -0,0 +1,32 @@
+== About
+
+.A 512GB Raspberry Pi SSD
+image::images/ssd.png[width="80%"]
+
+SSD quality is a critical factor in determining the overall user experience for a Raspberry Pi.
+Raspberry Pi provides official SSDs that are tested to ensure compatibility with Raspberry Pi models and peripherals.
+
+Raspberry Pi SSDs are available in the following sizes:
+
+* 256GB
+* 512GB
+
+To use an SSD with your Raspberry Pi, you need a Raspberry Pi 5-compatible M.2 adapter, such as the xref:../accessories/m2-hat-plus.adoc[Raspberry Pi M.2 HAT+].
+
+== Specifications
+
+Raspberry Pi SSDs are PCIe Gen 3-compliant.
+
+Raspberry Pi SSDs use the NVMe 1.4 register interface and command set.
+
+Raspberry Pi SSDs use the M.2 2230 form factor.
+
+The following table describes the read and write speeds of Raspberry Pi SSDs using 4KB of random data:
+
+[cols="1,2,2"]
+|===
+| Size | Read Speed | Write Speed
+
+| 256GB | 40,000 IOPS | 70,000 IOPS
+| 512GB | 50,000 IOPS | 90,000 IOPS
+|===
diff --git a/documentation/asciidoc/accessories/ssds/images/ssd.png b/documentation/asciidoc/accessories/ssds/images/ssd.png
new file mode 100644
index 000000000..25bbdc3a7
Binary files /dev/null and b/documentation/asciidoc/accessories/ssds/images/ssd.png differ
diff --git a/documentation/asciidoc/accessories/touch-display-2.adoc b/documentation/asciidoc/accessories/touch-display-2.adoc
new file mode 100644
index 000000000..982c35d56
--- /dev/null
+++ b/documentation/asciidoc/accessories/touch-display-2.adoc
@@ -0,0 +1 @@
+include::touch-display-2/about.adoc[]
diff --git a/documentation/asciidoc/accessories/touch-display-2/about.adoc b/documentation/asciidoc/accessories/touch-display-2/about.adoc
new file mode 100644
index 000000000..19c2dedef
--- /dev/null
+++ b/documentation/asciidoc/accessories/touch-display-2/about.adoc
@@ -0,0 +1,221 @@
+The https://www.raspberrypi.com/products/touch-display-2/[Raspberry Pi Touch Display 2] is a portrait orientation touchscreen LCD (with rotation options) designed for interactive projects like tablets, entertainment systems, and information dashboards.
+
+.The Raspberry Pi Touch Display 2
+image::images/touch-display-2-hero.jpg[width="80%"]
+
+== Specifications
+
+This section describes the physical characteristics and capabilities of Touch Display 2, including dimensions, features, and hardware.
+
+=== Dimensions
+
+The Touch Display 2 is available in two sizes: 5-inch and 7-inch (measured diagonally). Aside from the physical size, these two displays have identical features and functionality. The following table summarises the dimensions of these two displays:
+
+[cols="1,1,1,1,1"]
+|===
+|
+|*Depth*
+|*Outline dimensions*
+|*Viewing area*
+|*Active area*
+
+|*5-inch display*
+|16 mm
+|143.5 x 91.5 mm
+|111.5 x 63 mm
+|110.5 x 62 mm
+
+|*7-inch display*
+|15 mm
+|189.5 x 120 mm
+|155.5 x 88 mm
+|154.5 x 87 mm
+|===
+
+=== Features
+Touch Display 2 (both 5-inch and 7-inch) includes the following features:
+
+* **720 x 1280 pixel resolution.** High-definition output.
+* **24-bit RGB display.** Capable of showing over 16 million colours.
+* **Multitouch.** Supports up to five simultaneous touch points.
+* **Mouse-equivalence.** Supports full desktop control without a physical mouse, for example, selecting, dragging, scrolling, and long-pressing for menus.
+* **On-screen keyboard.** Supports a visual keyboard in place of a physical keyboard.
+* **Integrated power.** Powered directly by the host Raspberry Pi, requiring no separate power supply.
+
+=== Hardware
+
+The Touch Display 2 box contains the following parts:
+
+- A Touch Display 2
+- Eight M2.5 screws
+- A 15-way to 15-way FFC
+- A 22-way to 15-way FFC for Raspberry Pi 5
+- A GPIO power cable
+
+The following image shows these items from top to bottom, left to right.
+
+.Parts included in the Touch Display 2 box
+image::images/touch-display-2-whats-in-the-booooox.jpg["Parts included in the Touch Display 2 box", width="80%"]
+
+=== Connectors
+
+The Touch Display 2 connects to a Raspberry Pi using:
+
+- A **DSI connector** for video and touch data.
+- The **GPIO header** for power.
+
+To make the DSI connection, use a **Flat Flexible Cable (FFC)** included with your display. The type of FFC you need depends on your Raspberry Pi model:
+
+- For **Raspberry Pi 5**, use the **22-way to 15-way FFC**.
+- For all other Raspberry Pi models, use the **15-way to 15-way FFC**.
+
+The Touch Display 2 is compatible with all models of Raspberry Pi from Raspberry Pi 1B+ onwards, except the Zero series and Keyboard series, which lack a DSI connector.
+
+== Connect to Raspberry Pi
+
+After determining the correct FFC for your Raspberry Pi model, you can connect your Touch Display 2 to your Raspberry Pi. After completing the following steps, you can reconnect your Raspberry Pi to power. It can take up to one minute for Raspberry Pi OS to start displaying output to the Touch Display 2 screen.
+
+.A Raspberry Pi 5 connected and mounted to the Touch Display 2
+image::images/touch-display-2-installation-diagram.png["A Raspberry Pi 5 connected and mounted to the Touch Display 2", width="80%"]
+
+IMPORTANT: Disconnect your Raspberry Pi from power before completing the following steps.
+
+=== Step 1. Connect FFC to Touch Display 2
+
+. Slide the retaining clip outwards from both sides of the FFC connector on the Touch Display 2.
+. Insert one 15-way end of your FFC into the Touch Display 2 FFC connector, with the metal contacts facing upwards, away from the Touch Display 2.
+ - If you're connecting to a Raspberry Pi 5, and therefore using the **22-way to 15-way FFC**, the 22-way end is the smaller end of the cable. Insert the larger end of the cable into the Touch Display 2 FFC connector.
+ - If you're using the **15-way to 15-way FFC**, insert either end of the cable into the Touch Display 2 FFC connector.
+. Hold the FFC firmly in place and simultaneously push the retaining clip back in to the Touch Display 2 FFC connector from both sides.
+
+=== Step 2. Connect FFC to Raspberry Pi
+
+. Slide the retaining clip upwards from both sides of the DSI connector of your Raspberry Pi.
+ - This port should be marked with some variation of the term **DISPLAY**, **CAM/DISP**, or **DISP**.
+ - If your Raspberry Pi has multiple DSI connectors, we recommend using the port labelled **1**.
+. Insert the other end of your FFC into the Raspberry Pi DSI connector, with the metal contacts facing the Ethernet and USB-A ports.
+. Hold the FFC firmly in place and simultaneously push the retaining clip back down on the FFC connector of the Raspberry Pi to secure the cable.
+
+=== Step 3. Connect the GPIO power cable
+
+. Plug the smaller end of the GPIO power cable into the **J1** port on the Touch Display 2.
+. Connect the three-pin end of the GPIO power cable to your xref:../computers/raspberry-pi.adoc#gpio[Raspberry Pi's GPIO].
+
+This connects the red cable (5 V power) to pin 2 and the black cable (ground) to pin 6. Viewed from above, with the Ethernet and USB-A ports facing down, these pins are located in the top-right corner of the board, with pin 2 in the top right-most position.
+
+.The GPIO connection to the Touch Display 2
+image::images/touch-display-2-gpio-connection.png[The GPIO connection to the Touch Display 2, width="40%"]
+
+WARNING: Connecting the power cable incorrectly might cause damage to the display.
+
+=== Step 4. Mount your Raspberry Pi to the Touch Display 2 (optional)
+
+Optionally, use the included M2.5 screws to mount your Raspberry Pi to the back of your Touch Display 2.
+
+. Align the four corner stand-offs of your Raspberry Pi with the four mounting points that surround the FFC connector and J1 port on the back of the Touch Display 2.
+. Insert the M2.5 screws (included) into the four corner stand-offs and tighten until your Raspberry Pi is secure.
+
+Take care not to pinch the FFC.
+
+== Use an on-screen keyboard
+
+Raspberry Pi OS **Bookworm** and later already includes the **Squeekboard on-screen keyboard**. With a Touch Display 2 attached, the keyboard automatically appears when you can enter text, and automatically disappears when you can't.
+
+For applications that don't support text entry detection, you can manually show or hide the keyboard using the keyboard icon at the right side of the taskbar. You can also permanently show or hide the on-screen keyboard using the Raspberry Pi graphical interface or the command line.
+
+- **Raspberry Pi desktop interface:** From the Raspberry Pi menu, go to **Preferences > Raspberry Pi Configuration > Display** and choose your on-screen keyboard setting.
+- **Command line:** Open a terminal and enter `sudo raspi-config`. Navigate to the **Display** section of `raspi-config` and then choose your keyboard setting.
+
+== Change screen orientation
+
+You can change the orientation behaviour of the Touch Display 2, both with a desktop and without a desktop. This is useful if you want to physically rotate the screen or mount it in a landscape position.
+
+You have four rotation options:
+
+- **0** maintains the default display position, which is a portrait orientation.
+- **90** rotates the display 90 degrees to the right (clockwise), making it a landscape orientation.
+- **180** rotates the display 180 degrees to the right (clockwise), which flips the display upside down.
+- **270** rotates the display 270 degrees to the right (clockwise), which is the same as rotating the display 90 degrees to the left (counterclockwise), making it a landscape orientation.
+
+=== With a desktop
+If you have the Raspberry Pi OS desktop running, you can rotate the display through the **Screen Configuration** tool:
+
+. Go to **Preferences > Screen Configuration**. This opens the layout editor where you can see your connected displays.
+. Right-click the rectangle in the layout editor that represents your Touch Display 2 (likely labelled `DSI-1`).
+. Select **Orientation**.
+. Choose a rotation: *0°*, *90°*, *180°*, or *270°*. This rotates the display by the specified number of degrees to the right.
+
+=== Without a desktop
+
+To rotate the display without a desktop, edit the `/boot/firmware/cmdline.txt` file, which contains parameters that Raspberry Pi OS reads when it boots. Add the following to the end of `cmdline.txt`, replacing `` with the number of degrees to rotate by (`0`, `90`, `180`, or `270`):
+
+[source,ini]
+----
+video=DSI-1:720x1280@60,rotate=
+----
+
+NOTE: You can't rotate the DSI display separately from the HDMI display with `cmdline.txt`. When you use DSI and HDMI simultaneously, they share the same rotation value.
+
+== Customise touchscreen settings
+
+You can use the Device Tree overlay to tell Raspberry Pi OS how to configure the Touch Display 2 at boot.
+
+- For the 5-inch display, the overlay is called `vc4-kms-dsi-ili9881-5inch`.
+- For the 7-inch display, the overlay is called `vc4-kms-dsi-ili9881-7inch`.
+
+You can modify the Device Tree overlay in the boot configuration file (`/boot/firmware/config.txt`).
+
+Open `/boot/firmware/config.txt` and then add the required Device Tree parameters to the `dtoverlay` line, separated by commas.
+
+- Booleans (`invx`, `invy`, `swapxy`, and `disable_touch`) default to true if present, but you can set them to false using the suffix `=0`.
+- Integers (`sizex` and `sizey`) require a number, for example, `sizey=240`.
+
+See the table below for details.
+
+=== Device Tree options
+
+|===
+| Parameter | Action
+
+| `sizex`
+| Sets the touch horizontal resolution (default 720)
+
+| `sizey`
+| Sets the touch vertical resolution (default 1280)
+
+| `invx`
+| Inverts the touch X-axis (left/right)
+
+| `invy`
+| Inverts the touch Y-axis (up/down)
+
+| `swapxy`
+| Swaps the touch X and Y axes (rotate 90° logically)
+
+| `disable_touch`
+| Disables the touchscreen functionality
+|===
+
+=== Example
+
+In the following example, `invx` flips the X axis and `invy` flips the Y axis for a 7-inch Touch Display 2:
+
+[source,ini]
+----
+dtoverlay=vc4-kms-dsi-ili9881-7inch,invx,invy
+----
+
+
+== Connect to a Compute Module
+
+Unlike Raspberry Pi single board computers (SBC), which automatically detect the official Raspberry Pi Touch displays, Raspberry Pi Compute Modules don't automatically detect connected devices; you must tell it what display is attached.
+
+This is because the connections between the SoC and DSI connectors on a Raspberry Pi are fixed and the system knows what hardware is connected; auto-detection ensures that the correct Device Tree settings are passed to the Linux kernel, so the display works without additional configuration.
+
+Compute Modules, intended for industrial and custom applications, expose all GPIOs and interfaces. This provides greater flexibility for connecting hardware, but means that a Compute Module can't automatically detect devices like the Touch Display 2. This means that, for Compute Modules, the Device Tree fragments, which tell the kernel how to interact with the display, must be manually specified. You can do this in three ways:
+
+- By adding an overlay entry in `config.txt`. This is the simplest option. For configuration instructions, see the xref:../computers/compute-module.adoc#attaching-the-touch-display-2-lcd-panel[Compute Module hardware documentation].
+- Using a custom base device tree file. This is an advanced method not covered in this online documentation.
+- Using a HAT EEPROM (if present).
+
+
diff --git a/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-gpio-connection.png b/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-gpio-connection.png
new file mode 100644
index 000000000..41e59bc42
Binary files /dev/null and b/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-gpio-connection.png differ
diff --git a/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-hero.jpg b/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-hero.jpg
new file mode 100644
index 000000000..45779c6e2
Binary files /dev/null and b/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-hero.jpg differ
diff --git a/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-installation-diagram.png b/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-installation-diagram.png
new file mode 100644
index 000000000..f3167f5e6
Binary files /dev/null and b/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-installation-diagram.png differ
diff --git a/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-whats-in-the-booooox.jpg b/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-whats-in-the-booooox.jpg
new file mode 100644
index 000000000..e28fd789c
Binary files /dev/null and b/documentation/asciidoc/accessories/touch-display-2/images/touch-display-2-whats-in-the-booooox.jpg differ
diff --git a/documentation/asciidoc/accessories/tv-hat.adoc b/documentation/asciidoc/accessories/tv-hat.adoc
index d8d9232fe..be04ece4c 100644
--- a/documentation/asciidoc/accessories/tv-hat.adoc
+++ b/documentation/asciidoc/accessories/tv-hat.adoc
@@ -1,61 +1 @@
-== Getting Started
-
-The TV HAT has an on-board DVB-T2 tuner that allows you to receive and decode digital television streams on your Raspberry Pi. Then you can watch these streams on the Raspberry Pi or on any computer connected to the same network as the Raspberry Pi.
-
-The software we recommend to decode the streams (known as multiplexes, or muxes for short) and view content is called TVHeadend, and instructions for setting it up are below. The TV HAT can decode one mux at a time, and each mux can contain several channels to choose from. Content can either be viewed on the Raspberry Pi to which the TV-HAT is connected, or sent to another device on the same network.
-
-*You will need:*
-
-* A TV aerial
-* A Raspberry Pi TV HAT with its stand-offs, screws, and aerial adaptor
-* A Raspberry Pi that is connected to the internet (plus a mouse, keyboard, and display, if
-you are not accessing the Raspberry Pi remotely)
-* Optional: another computer connected to the same network
-
-== Setup Instructions
-
-*On your Raspberry Pi:*
-
-* Connect the aerial adaptor to the TV HAT:
- ** With the adaptor pointing away from the USB ports, press the HAT gently down over the Raspberry Pi's GPIO pins
- ** Place the spacers at two or three of the corners of the HAT, and tighten the screws through the mounting
-holes to hold them in place.
-* Connect the TV HAT's aerial adaptor to the cable from your TV aerial.
-* Set up the Raspberry Pi with the newest version of the Raspberry Pi OS operating system, which you can download from our https://www.raspberrypi.com/software/operating-systems/#raspberry-pi-os-32-bit[downloads page].
-* Start up your Raspberry Pi, open a terminal window, and run the following two commands to install the `tvheadend` software:
-+
-----
-sudo apt update
-sudo apt install tvheadend
-----
-
-* During the `tvheadend` installation, you will be asked to choose an administrator account name and password. You'll need these later, so make sure to pick something you can remember.
-
-*In a web browser on a different computer:*
-
-* Type the following into the address bar: `+http://raspberrypi.local:9981/extjs.html+`
-* This should connect to `tvheadend` running on the Raspberry Pi.
- ** If the address above doesn't work, you'll need to find out the IP address of the Raspberry Pi. Open a terminal window on your Raspberry Pi, and run the command `hostname -I`
- ** You'll see the IP address in one or two formats: a string of four numbers separated by dots, then, if you are on a IPv6 network, a space, then a long string of numbers and letters separated by colons.
- ** Note down everything before the space (the four numbers and dots), and type this into the address bar instead of the `raspberrypi.local` part of the address.
-* Once you have connected to `tvheadend` via the browser, you will be prompted to sign in. Use the account name and password you chose when you installed `tvheadend` on the Raspberry Pi. A setup wizard should appear.
-* First, set the language you want `tvheadend` to use (*English (GB)* worked for us; we have not yet tested other languages).
-* Next, set up network, user, and administrator access. If you don't have specific preferences, leave *Allowed network* blank, and enter an asterisk (*) in the *username* and *password* fields. This will let anyone connected to your local network access `tvheadend`.
-* You should see a window titled *Network settings*. Under *Network 2*, you should see `Tuner: Sony CDX2880 #0 : DVB-T #0`. For *Network type*, choose `DVB-T Network`.
-* The next window is *Assign predefined muxes to networks*; here, you select the TV stream to receive and decode. Under Network 1, for predefined muxes, select your local TV transmitter.
- ** Your local transmitter can be found using the https://www.freeview.co.uk/help[Freeview website]. Enter your postcode to see which transmitter should give you a good signal.
-* When you click *Save & Next*, the software will start scanning for the selected mux, and will show a progress bar. After about two minutes, you should see something like:
-+
-----
-Found muxes: 8
-Found services: 172
-----
-
-* In the next window, titled *Service mapping*, tick all three boxes: *Map all services*, *Create provider tags*, and *Create network tags*.
-* Next you should see a list of TV channels you can watch, along with the programmes they're currently showing.
-* To watch a TV channel in the browser, click the little TV icon to the left of the channel listing, just to the right of the *i* icon. This brings up an in-browser media player. Depending on the decoding facilities built into your browser and the type of stream being played, you may find that playback can be jerky. In these cases, we recommend using a local media player as the playback application.
-* To watch a TV channel in a local media player, e.g. VLC https://www.videolan.org/vlc[www.videolan.org/vlc], you'll need to download it as a stream. Click the `i` icon to the left of a channel listing to bring up the information panel for that channel. Here you can see a stream file that you can download.
-
-`tvheadend` is supported by numerous apps, such as TvhClient for iOS, which will play TV from the Raspberry Pi. OMXPlayer, supplied with Raspberry Pi OS, also supports viewing TV streams from `tvheadend`. Kodi, available in the Raspberry Pi OS repos, provides excellent facilities for playing live TV, along with previously recorded channels and timed series recording.
-
-To discuss other features or uses of the TV HAT, please visit our https://forums.raspberrypi.com/[forums].
+include::tv-hat/about-tv-hat.adoc[]
diff --git a/documentation/asciidoc/accessories/tv-hat/about-tv-hat.adoc b/documentation/asciidoc/accessories/tv-hat/about-tv-hat.adoc
new file mode 100644
index 000000000..e1cb7efa6
--- /dev/null
+++ b/documentation/asciidoc/accessories/tv-hat/about-tv-hat.adoc
@@ -0,0 +1,77 @@
+[[tv-hat]]
+== About
+
+.The Raspberry Pi TV HAT
+image::images/tv-hat.jpg[width="80%"]
+
+The Raspberry Pi TV HAT allows you to receive digital terrestrial TV broadcast systems, using an onboard DVB-T and DVB-T2 tuner, on a Raspberry Pi. With the board you can receive and view TV on a Raspberry Pi, or create a TV server that allows you to stream received TV over a network to other devices. The TV HAT can be used with any 40-pin Raspberry Pi board as a server for other devices on the network. Performance when receiving and viewing TV on the Pi itself can vary, and we recommend using a Raspberry Pi 2 or later for this purpose
+
+Key features:
+
+* Sony CXD2880 TV tuner
+* Supported TV standards: DVB-T2, DVB-T
+* Reception frequency: VHF III, UHF IV, UHF V
+* Channel bandwidth:
+** DVB-T2: 1.7MHz, 5MHz, 6MHz, 7MHz, 8MHz
+** DVB-T: 5MHz, 6MHz, 7MHz, 8MHz
+
+== About DVB-T
+
+WARNING: The TV HAT does not support ATSC, the digital TV standard used in North America.
+
+Digital Video Broadcasting – Terrestrial (DVB-T) is the DVB European-based consortium standard for the broadcast transmission of digital terrestrial television. There are other digital TV standards used elsewhere in the world, e.g. ATSC which is used in North America. However the TV HAT only supports the DVB-T and DVB-T2 standards.
+
+.DTT system implemented or adopted (Source: DVB/EBU/BNE DTT Deployment Database, March 2023)
+image::images/dvbt-map.png[width="80%"]
+
+[[tv-hat-installation]]
+== Install
+
+Follow our xref:../computers/getting-started.adoc[getting started] documentation and set up the Raspberry Pi with the newest version of Raspberry Pi OS.
+
+Connect the aerial adaptor to the TV HAT and with the adaptor pointing away from the USB ports, press the HAT gently down over the Raspberry Pi's GPIO pins, and place the spacers at two or three of the corners of the HAT, and tighten the screws through the mounting holes to hold them in place. Then connect the TV HAT's aerial adaptor to the cable from your TV aerial.
+
+The software we recommend to decode the streams (known as multiplexes, or muxes for short) and view content is called TVHeadend. The TV HAT can decode one mux at a time, and each mux can contain several channels to choose from. Content can either be viewed on the Raspberry Pi to which the TV-HAT is connected, or sent to another device on the same network.
+
+Boot your Raspberry Pi and then go ahead open a terminal window, and run the following two commands to install the `tvheadend` software:
+
+[source,console]
+----
+$ sudo apt update
+$ sudo apt install tvheadend
+----
+
+During the `tvheadend` installation, you will be asked to choose an administrator account name and password. You'll need these later, so make sure to pick something you can remember.
+
+On another computer on your network, open up a web browser and type the following into the address bar: `http://raspberrypi.local:9981/extjs.html`
+
+This should connect to `tvheadend` running on the Raspberry Pi. Once you have connected to `tvheadend` via the browser, you will be prompted to sign in using the account name and password you chose when you installed `tvheadend` on the Raspberry Pi.
+
+A setup wizard should appear.
+
+You will be first ask to set the language you want `tvheadend` to use, and then to set up network, user, and administrator access. If you don't have specific preferences, leave *Allowed network* blank, and enter an asterisk (*) in the *username* and *password* fields. This will let anyone connected to your local network access `tvheadend`.
+
+You should see a window titled *Network settings*. Under *Network 2*, you should see `Tuner: Sony CDX2880 #0 : DVB-T #0`. For *Network type*, choose `DVB-T Network`. The next window is *Assign predefined muxes to networks*; here, you select the TV stream to receive and decode. Under Network 1, for predefined muxes, select your local TV transmitter.
+
+NOTE: Your local transmitter can be found using the https://www.freeview.co.uk/help[Freeview website]. Enter your postcode to see which transmitter should give you a good signal.
+
+When you click *Save & Next*, the software will start scanning for the selected mux, and will show a progress bar. After about two minutes, you should see something like:
+
+[source,console]
+----
+Found muxes: 8
+Found services: 172
+----
+
+In the next window, titled *Service mapping*, tick all three boxes: *Map all services*, *Create provider tags*, and *Create network tags*. You should see a list of TV channels you can watch, along with the programmes they're currently showing.
+
+To watch a TV channel in the browser, click the little TV icon to the left of the channel listing, just to the right of the *i* icon. This brings up an in-browser media player. Depending on the decoding facilities built into your browser and the type of stream being played, you may find that playback can be jerky. In these cases, we recommend using a local media player as the playback application.
+
+To watch a TV channel in a local media player, e.g. https://www.videolan.org/vlc[VLC], you'll need to download it as a stream. Click the `i` icon to the left of a channel listing to bring up the information panel for that channel. Here you can see a stream file that you can download.
+
+NOTE: `tvheadend` is supported by numerous apps, such as TvhClient for iOS, which will play TV from the Raspberry Pi.
+
+== Mechanical Drawing
+
+.The Raspberry Pi TV HAT
+image::images/mechanical.png[]
diff --git a/documentation/asciidoc/accessories/tv-hat/images/dvbt-map.png b/documentation/asciidoc/accessories/tv-hat/images/dvbt-map.png
new file mode 100644
index 000000000..f38d3f895
Binary files /dev/null and b/documentation/asciidoc/accessories/tv-hat/images/dvbt-map.png differ
diff --git a/documentation/asciidoc/accessories/tv-hat/images/mechanical.png b/documentation/asciidoc/accessories/tv-hat/images/mechanical.png
new file mode 100644
index 000000000..1f4aac206
Binary files /dev/null and b/documentation/asciidoc/accessories/tv-hat/images/mechanical.png differ
diff --git a/documentation/asciidoc/accessories/tv-hat/images/tv-hat.jpg b/documentation/asciidoc/accessories/tv-hat/images/tv-hat.jpg
new file mode 100644
index 000000000..978db8980
Binary files /dev/null and b/documentation/asciidoc/accessories/tv-hat/images/tv-hat.jpg differ
diff --git a/documentation/asciidoc/accessories/usb-3-hub.adoc b/documentation/asciidoc/accessories/usb-3-hub.adoc
new file mode 100644
index 000000000..44c1bec1a
--- /dev/null
+++ b/documentation/asciidoc/accessories/usb-3-hub.adoc
@@ -0,0 +1 @@
+include::usb-3-hub/about.adoc[]
diff --git a/documentation/asciidoc/accessories/usb-3-hub/about.adoc b/documentation/asciidoc/accessories/usb-3-hub/about.adoc
new file mode 100644
index 000000000..c67d1f770
--- /dev/null
+++ b/documentation/asciidoc/accessories/usb-3-hub/about.adoc
@@ -0,0 +1,17 @@
+== About
+
+The https://www.raspberrypi.com/products/usb-3-hub/[Raspberry Pi USB 3 Hub] provides extra connectivity for your devices, extending one USB-A port into four. An optional external USB-C power input supports high-power peripherals. You can use the USB 3 Hub to power low-power peripherals, such as most mice and keyboards, using no external power.
+
+.The Raspberry Pi USB 3.0 Hub
+image::images/usb-3-hub-hero.png[width="80%"]
+
+== Specification
+
+* 1× upstream USB 3.0 Type-A male connector on 8cm captive cable
+* 4× downstream USB 3.0 Type-A ports
+* Data transfer speeds up to 5Gbps
+* Power transfer up to 900 mA (4.5 W); optional external USB-C power input provides up to 5V @ 3A for high-power downstream peripherals
+* Compatible with USB 3.0 and USB 2.0 Type-A host ports
+
+.Physical specification
+image::images/usb-3-hub-physical-specification.png[]
diff --git a/documentation/asciidoc/accessories/usb-3-hub/images/usb-3-hub-hero.png b/documentation/asciidoc/accessories/usb-3-hub/images/usb-3-hub-hero.png
new file mode 100644
index 000000000..7f3bc2b9a
Binary files /dev/null and b/documentation/asciidoc/accessories/usb-3-hub/images/usb-3-hub-hero.png differ
diff --git a/documentation/asciidoc/accessories/usb-3-hub/images/usb-3-hub-physical-specification.png b/documentation/asciidoc/accessories/usb-3-hub/images/usb-3-hub-physical-specification.png
new file mode 100644
index 000000000..7b469d14c
Binary files /dev/null and b/documentation/asciidoc/accessories/usb-3-hub/images/usb-3-hub-physical-specification.png differ
diff --git a/documentation/asciidoc/computers/ai.adoc b/documentation/asciidoc/computers/ai.adoc
new file mode 100644
index 000000000..af8f6182d
--- /dev/null
+++ b/documentation/asciidoc/computers/ai.adoc
@@ -0,0 +1,2 @@
+include::ai/getting-started.adoc[]
+
diff --git a/documentation/asciidoc/computers/ai/getting-started.adoc b/documentation/asciidoc/computers/ai/getting-started.adoc
new file mode 100644
index 000000000..3a9b7263c
--- /dev/null
+++ b/documentation/asciidoc/computers/ai/getting-started.adoc
@@ -0,0 +1,219 @@
+== Getting Started
+
+This guide will help you set up a Hailo NPU with your Raspberry Pi 5. This will enable you to run `rpicam-apps` camera demos using an AI neural network accelerator.
+
+=== Prerequisites
+
+For this guide, you will need the following:
+
+* a Raspberry Pi 5
+* one of the following NPUs:
+** a xref:../accessories/ai-kit.adoc[Raspberry Pi AI Kit], which includes:
+*** an M.2 HAT+
+*** a pre-installed Hailo-8L AI module
+** a xref:../accessories/ai-hat-plus.adoc[Raspberry Pi AI HAT+]
+* a 64-bit Raspberry Pi OS Bookworm install
+* any official Raspberry Pi camera (e.g. Camera Module 3 or High Quality Camera)
+
+=== Hardware setup
+
+. Attach the camera to your Raspberry Pi 5 board following the instructions at xref:../accessories/camera.adoc#install-a-raspberry-pi-camera[Install a Raspberry Pi Camera]. You can skip reconnecting your Raspberry Pi to power, because you'll need to disconnect your Raspberry Pi from power for the next step.
+
+. Depending on your NPU, follow the installation instructions for the xref:../accessories/ai-kit.adoc#ai-kit-installation[AI Kit] or xref:../accessories/ai-hat-plus.adoc#ai-hat-plus-installation[AI HAT+], to get your hardware connected to your Raspberry Pi 5.
+
+. Follow the instructions to xref:raspberry-pi.adoc#pcie-gen-3-0[enable PCIe Gen 3.0]. This step is optional, but _highly recommended_ to achieve the best performance with your NPU.
+
+. Install the dependencies required to use the NPU. Run the following command from a terminal window:
++
+[source,console]
+----
+$ sudo apt install hailo-all
+----
++
+This installs the following dependencies:
++
+* Hailo kernel device driver and firmware
+* HailoRT middleware software
+* Hailo Tappas core post-processing libraries
+* The `rpicam-apps` Hailo post-processing software demo stages
+
+. Finally, reboot your Raspberry Pi with `sudo reboot` for these settings to take effect.
+
+. To ensure everything is running correctly, run the following command:
++
+[source,console]
+----
+$ hailortcli fw-control identify
+----
++
+If you see output similar to the following, you've successfully installed the NPU and its software dependencies:
++
+----
+Executing on device: 0000:01:00.0
+Identifying board
+Control Protocol Version: 2
+Firmware Version: 4.17.0 (release,app,extended context switch buffer)
+Logger Version: 0
+Board Name: Hailo-8
+Device Architecture: HAILO8L
+Serial Number: HLDDLBB234500054
+Part Number: HM21LB1C2LAE
+Product Name: HAILO-8L AI ACC M.2 B+M KEY MODULE EXT TMP
+----
++
+NOTE: AI HAT+ devices may show `` for `Serial Number`, `Part Number` and `Product Name`. This is expected, and does not impact functionality.
++
+Additionally, you can run `dmesg | grep -i hailo` to check the kernel logs, which should yield output similar to the following:
++
+----
+[ 3.049657] hailo: Init module. driver version 4.17.0
+[ 3.051983] hailo 0000:01:00.0: Probing on: 1e60:2864...
+[ 3.051989] hailo 0000:01:00.0: Probing: Allocate memory for device extension, 11600
+[ 3.052006] hailo 0000:01:00.0: enabling device (0000 -> 0002)
+[ 3.052011] hailo 0000:01:00.0: Probing: Device enabled
+[ 3.052028] hailo 0000:01:00.0: Probing: mapped bar 0 - 000000000d8baaf1 16384
+[ 3.052034] hailo 0000:01:00.0: Probing: mapped bar 2 - 000000009eeaa33c 4096
+[ 3.052039] hailo 0000:01:00.0: Probing: mapped bar 4 - 00000000b9b3d17d 16384
+[ 3.052044] hailo 0000:01:00.0: Probing: Force setting max_desc_page_size to 4096 (recommended value is 16384)
+[ 3.052052] hailo 0000:01:00.0: Probing: Enabled 64 bit dma
+[ 3.052055] hailo 0000:01:00.0: Probing: Using userspace allocated vdma buffers
+[ 3.052059] hailo 0000:01:00.0: Disabling ASPM L0s
+[ 3.052070] hailo 0000:01:00.0: Successfully disabled ASPM L0s
+[ 3.221043] hailo 0000:01:00.0: Firmware was loaded successfully
+[ 3.231845] hailo 0000:01:00.0: Probing: Added board 1e60-2864, /dev/hailo0
+----
+
+. To ensure the camera is operating correctly, run the following command:
++
+[source,console]
+----
+$ rpicam-hello -t 10s
+----
++
+This starts the camera and shows a preview window for ten seconds. Once you have verified everything is installed correctly, it's time to run some demos.
+
+=== Demos
+
+The `rpicam-apps` suite of camera applications implements a xref:camera_software.adoc#post-processing-with-rpicam-apps[post-processing framework]. This section contains a few demo post-processing stages that highlight some of the capabilities of the NPU.
+
+The following demos use xref:camera_software.adoc#rpicam-hello[`rpicam-hello`], which by default displays a preview window. However, you can use other `rpicam-apps` instead, including xref:camera_software.adoc#rpicam-vid[`rpicam-vid`] and xref:camera_software.adoc#rpicam-still[`rpicam-still`]. You may need to add or modify some command line options to make the demo commands compatible with alternative applications.
+
+To begin, run the following command to install the latest `rpicam-apps` software package:
+
+[source,console]
+----
+$ sudo apt update && sudo apt install rpicam-apps
+----
+
+==== Object Detection
+
+This demo displays bounding boxes around objects detected by a neural network. To disable the viewfinder, use the xref:camera_software.adoc#nopreview[`-n`] flag. To return purely textual output describing the objects detected, add the `-v 2` option. Run the following command to try the demo on your Raspberry Pi:
+
+[source,console]
+----
+$ rpicam-hello -t 0 --post-process-file /usr/share/rpi-camera-assets/hailo_yolov6_inference.json
+----
+
+Alternatively, you can try another model with different trade-offs in performance and efficiency.
+
+To run the demo with the Yolov8 model, run the following command:
+
+[source,console]
+----
+$ rpicam-hello -t 0 --post-process-file /usr/share/rpi-camera-assets/hailo_yolov8_inference.json
+----
+
+To run the demo with the YoloX model, run the following command:
+
+[source,console]
+----
+$ rpicam-hello -t 0 --post-process-file /usr/share/rpi-camera-assets/hailo_yolox_inference.json
+----
+
+To run the demo with the Yolov5 Person and Face model, run the following command:
+
+[source,console]
+----
+$ rpicam-hello -t 0 --post-process-file /usr/share/rpi-camera-assets/hailo_yolov5_personface.json
+----
+
+==== Image Segmentation
+
+This demo performs object detection and segments the object by drawing a colour mask on the viewfinder image. Run the following command to try the demo on your Raspberry Pi:
+
+[source,console]
+----
+$ rpicam-hello -t 0 --post-process-file /usr/share/rpi-camera-assets/hailo_yolov5_segmentation.json --framerate 20
+----
+
+==== Pose Estimation
+
+This demo performs 17-point human pose estimation, drawing lines connecting the detected points. Run the following command to try the demo on your Raspberry Pi:
+
+[source,console]
+----
+$ rpicam-hello -t 0 --post-process-file /usr/share/rpi-camera-assets/hailo_yolov8_pose.json
+----
+
+=== Alternative Package Versions
+
+The AI Kit and AI HAT+ do not function if there is a version mismatch between the Hailo software packages and device drivers. In addition, Hailo's neural network tooling may require a particular version for generated model files. If you require a specific version, complete the following steps to install the proper versions of all of the dependencies:
+
+. If you have previously used `apt-mark` to hold any of the relevant packages, you may need to unhold them:
++
+[source,console]
+----
+$ sudo apt-mark unhold hailo-tappas-core hailort hailo-dkms
+----
+
+. Install the required version of the software packages:
+
+[tabs]
+======
+v4.19::
+To install version 4.19 of Hailo's neural network tooling, run the following commands:
++
+[source,console]
+----
+sudo apt install hailo-tappas-core=3.30.0-1 hailort=4.19.0-3 hailo-dkms=4.19.0-1 python3-hailort=4.19.0-2
+----
++
+[source,console]
+----
+$ sudo apt-mark hold hailo-tappas-core hailort hailo-dkms python3-hailort
+----
+
+4.18::
+To install version 4.18 of Hailo's neural network tooling, run the following commands:
++
+[source,console]
+----
+$ sudo apt install hailo-tappas-core=3.29.1 hailort=4.18.0 hailo-dkms=4.18.0-2
+----
++
+[source,console]
+----
+$ sudo apt-mark hold hailo-tappas-core hailort hailo-dkms
+----
+
+4.17::
+To install version 4.17 of Hailo's neural network tooling, run the following commands:
++
+[source,console]
+----
+$ sudo apt install hailo-tappas-core=3.28.2 hailort=4.17.0 hailo-dkms=4.17.0-1
+----
++
+[source,console]
+----
+$ sudo apt-mark hold hailo-tappas-core hailort hailo-dkms
+----
+======
+
+=== Further Resources
+
+Hailo has also created a set of demos that you can run on a Raspberry Pi 5, available in the https://github.com/hailo-ai/hailo-rpi5-examples[hailo-ai/hailo-rpi5-examples GitHub repository].
+
+You can find Hailo's extensive model zoo, which contains a large number of neural networks, in the https://github.com/hailo-ai/hailo_model_zoo/tree/master/docs/public_models/HAILO8L[hailo-ai/hailo_model_zoo GitHub repository].
+
+Check out the https://community.hailo.ai/[Hailo community forums and developer zone] for further discussions on the Hailo hardware and tooling.
diff --git a/documentation/asciidoc/computers/camera/camera_usage.adoc b/documentation/asciidoc/computers/camera/camera_usage.adoc
index 57f6b294c..722f37c82 100644
--- a/documentation/asciidoc/computers/camera/camera_usage.adoc
+++ b/documentation/asciidoc/computers/camera/camera_usage.adoc
@@ -1,13 +1,19 @@
-== Introducing the Raspberry Pi Cameras
+This documentation describes how to use supported camera modules with our software tools. All Raspberry Pi cameras can record high-resolution photographs and full HD 1080p video (or better) with our software tools.
-There are now several official Raspberry Pi camera modules. The original 5-megapixel model was https://www.raspberrypi.com/news/camera-board-available-for-sale/[released] in 2013, it was followed by an 8-megapixel https://www.raspberrypi.com/products/camera-module-v2/[Camera Module 2] which was https://www.raspberrypi.com/news/new-8-megapixel-camera-board-sale-25/[released] in 2016. The latest camera model is the 12-megapixel https://raspberrypi.com/products/camera-module-3/[Camera Module 3] which was https://www.raspberrypi.com/news/new-autofocus-camera-modules/[released] in 2023. The original 5MP device is no longer available from Raspberry Pi.
+Raspberry Pi produces several official camera modules, including:
-Aditionally a 12-megapixel https://www.raspberrypi.com/products/raspberry-pi-high-quality-camera/[High Quality Camera] with CS- or M12-mount variants for use with external lenses was https://www.raspberrypi.com/news/new-product-raspberry-pi-high-quality-camera-on-sale-now-at-50/[released in 2020] and https://www.raspberrypi.com/news/new-autofocus-camera-modules/[2023] respectively. There is no infrared version of the HQ Camera.
+* the original 5-megapixel Camera Module 1 (discontinued)
+* the 8-megapixel https://www.raspberrypi.com/products/camera-module-v2/[Camera Module 2], with or without an infrared filter
+* the 12-megapixel https://raspberrypi.com/products/camera-module-3/[Camera Module 3], with both standard and wide lenses, with or without an infrared filter
+* the 12-megapixel https://www.raspberrypi.com/products/raspberry-pi-high-quality-camera/[High Quality Camera] with CS and M12 mount variants for use with external lenses
+* the 1.6-megapixel https://www.raspberrypi.com/products/raspberry-pi-global-shutter-camera/[Global Shutter Camera] for fast motion photography
+* the 12-megapixel https://www.raspberrypi.com/products/ai-camera/[AI Camera] uses the Sony IMX500 imaging sensor to provide low-latency, high-performance AI capabilities to any camera application
-All of these cameras come in visible light and infrared versions, while the Camera Module 3 also comes as a standard or wide FoV model for a total of four different variants.
-
-Further details on the camera modules can be found in the xref:../accessories/camera.adoc#camera-modules[camera hardware] page.
+For more information about camera hardware, see the xref:../accessories/camera.adoc#about-the-camera-modules[camera hardware documentation].
-All Raspberry Pi cameras are capable of taking high-resolution photographs, along with full HD 1080p video, and can be fully controlled programmatically. This documentation describes how to use the camera in various scenarios, and how to use the various software tools.
+First, xref:../accessories/camera.adoc#install-a-raspberry-pi-camera[install your camera module]. Then, follow the guides in this section to put your camera module to use.
-Once you've xref:../accessories/camera.adoc#installing-a-raspberry-pi-camera[installed your camera module], there are various ways the cameras can be used. The simplest option is to use one of the provided camera applications, such as `libcamera-still` or `libcamera-vid`.
+[WARNING]
+====
+This guide no longer covers the _legacy camera stack_ which was available in Bullseye and earlier Raspberry Pi OS releases. The legacy camera stack, using applications like `raspivid`, `raspistill` and the original `Picamera` (_not_ `Picamera2`) Python library, has been deprecated for many years, and is now unsupported. If you are using the legacy camera stack, it will only have support for the Camera Module 1, Camera Module 2 and the High Quality Camera, and will never support any newer camera modules. Nothing in this document is applicable to the legacy camera stack.
+====
diff --git a/documentation/asciidoc/computers/camera/csi-2-usage.adoc b/documentation/asciidoc/computers/camera/csi-2-usage.adoc
index 8791dcc18..f3515ae94 100644
--- a/documentation/asciidoc/computers/camera/csi-2-usage.adoc
+++ b/documentation/asciidoc/computers/camera/csi-2-usage.adoc
@@ -1,32 +1,18 @@
-== Camera Serial Interface 2 (CSI2) "Unicam"
+== Unicam
-The SoC's used on the Raspberry Pi range all have two camera interfaces that support either CSI-2 D-PHY 1.1 or CCP2 (Compact Camera Port 2) sources. This interface is known by the codename "Unicam". The first instance of Unicam supports 2 CSI-2 data lanes, whilst the second supports 4. Each lane can run at up to 1Gbit/s (DDR, so the max link frequency is 500MHz).
+Raspberry Pi SoCs all have two camera interfaces that support either CSI-2 D-PHY 1.1 or Compact Camera Port 2 (CCP2) sources. This interface is known by the codename Unicam. The first instance of Unicam supports two CSI-2 data lanes, while the second supports four. Each lane can run at up to 1Gbit/s (DDR, so the max link frequency is 500MHz).
-However, the normal variants of the Raspberry Pi only expose the second instance, and route out _only_ 2 of the data lanes to the camera connector. The Compute Module range route out all lanes from both peripherals.
+Compute Modules and Raspberry Pi 5 route out all lanes from both peripherals. Other models prior to Raspberry Pi 5 only expose the second instance, routing out only two of the data lanes to the camera connector.
-=== Software Interfaces
+=== Software interfaces
-There are 3 independent software interfaces available for communicating with the Unicam peripheral:
-
-==== Firmware
-
-NOTE: This interface is available only when using the legacy camera stack.
-
-The closed source GPU firmware has drivers for Unicam and three camera sensors plus a bridge chip. They are the Raspberry Pi Camera v1.3 (Omnivision OV5647), Raspberry Pi Camera v2.1 (Sony IMX219), Raspberry Pi HQ camera (Sony IMX477), and an unsupported driver for the Toshiba TC358743 HDMI\->CSI2 bridge chip. There is no support for more recent cameras, such as the Camera Module 3 (Sony IMX708).
-
-This driver integrates the source driver, Unicam, ISP, and tuner control into a full camera stack delivering processed output images. It can be used via MMAL, OpenMAX IL and V4L2 using the bcm2835-v4l2 kernel module. Only Raspberry Pi cameras are supported via this interface.
-
-==== MMAL rawcam component
-
-NOTE: This interface is available only when using the legacy camera stack.
-
-This was an interim option before the V4L2 driver was available. The MMAL component `vc.ril.rawcam` allows receiving of the raw CSI2 data in the same way as the V4L2 driver, but all source configuration has to be done by userland over whatever interface the source requires. The raspiraw application is available on https://github.com/raspberrypi/raspiraw[github]. It uses this component and the standard I2C register sets for OV5647, IMX219, and ADV7282M to support streaming.
+The V4L2 software interface is the only means of communicating with the Unicam peripheral. There used to also be firmware and MMAL rawcam component interfaces, but these are no longer supported.
==== V4L2
NOTE: The V4L2 interface for Unicam is available only when using `libcamera`.
-There is a fully open source kernel driver available for the Unicam block; this is a kernel module called bcm2835-unicam. This interfaces to V4L2 subdevice drivers for the source to deliver the raw frames. This bcm2835-unicam driver controls the sensor, and configures the CSI-2 receiver so that the peripheral will write the raw frames (after Debayer) to SDRAM for V4L2 to deliver to applications. Except for this ability to unpack the CSI-2 Bayer formats to 16bits/pixel, there is no image processing between the image source (e.g. camera sensor) and bcm2835-unicam placing the image data in SDRAM.
+There is a fully open-source kernel driver available for the Unicam block; this kernel module, called `bcm2835-unicam`, interfaces with V4L2 subdevice drivers to deliver raw frames. This `bcm2835-unicam` driver controls the sensor and configures the Camera Serial Interface 2 (CSI-2) receiver. Peripherals write raw frames (after Debayer) to SDRAM for V4L2 to deliver to applications. There is no image processing between the camera sensor capturing the image and the `bcm2835-unicam` driver placing the image data in SDRAM except for Bayer unpacking to 16bits/pixel.
----
|------------------------|
@@ -47,7 +33,7 @@ ccp2 | |
|-----------------|
----
-Mainline Linux has a range of existing drivers. The Raspberry Pi kernel tree has some additional drivers and device tree overlays to configure them that have all been tested and confirmed to work. They include:
+Mainline Linux contains a range of existing drivers. The Raspberry Pi kernel tree has some additional drivers and Device Tree overlays to configure them:
|===
| Device | Type | Notes
@@ -68,6 +54,10 @@ Mainline Linux has a range of existing drivers. The Raspberry Pi kernel tree has
| 12MP Camera
| Raspberry Pi Camera Module 3
+| Sony IMX296
+| 1.6MP Camera
+| Raspberry Pi Global Shutter Camera Module
+
| Toshiba TC358743
| HDMI to CSI-2 bridge
|
@@ -81,62 +71,62 @@ Mainline Linux has a range of existing drivers. The Raspberry Pi kernel tree has
| Supported by a third party
|===
-As the subdevice driver is also a kernel driver, with a standardised API, 3rd parties are free to write their own for any source of their choosing.
+As the subdevice driver is also a kernel driver with a standardised API, third parties are free to write their own for any source of their choosing.
-=== Developing Third-Party Drivers
+=== Write a third-party driver
This is the recommended approach to interfacing via Unicam.
-When developing a driver for a new device intended to be used with the bcm2835-unicam module, you need the driver and corresponding device tree overlays. Ideally the driver should be submitted to the http://vger.kernel.org/vger-lists.html#linux-media[linux-media] mailing list for code review and merging into mainline, then moved to the https://github.com/raspberrypi/linux[Raspberry Pi kernel tree], but exceptions may be made for the driver to be reviewed and merged directly to the Raspberry Pi kernel.
+When developing a driver for a new device intended to be used with the `bcm2835-unicam` module, you need the driver and corresponding device tree overlays. Ideally, the driver should be submitted to the http://vger.kernel.org/vger-lists.html#linux-media[linux-media] mailing list for code review and merging into mainline, then moved to the https://github.com/raspberrypi/linux[Raspberry Pi kernel tree]; but exceptions may be made for the driver to be reviewed and merged directly to the Raspberry Pi kernel.
-Please note that all kernel drivers are licensed under the GPLv2 licence, therefore source code *MUST* be available. Shipping of binary modules only is a violation of the GPLv2 licence under which the Linux kernel is licensed.
+NOTE: All kernel drivers are licensed under the GPLv2 licence, therefore source code must be available. Shipping of binary modules only is a violation of the GPLv2 licence under which the Linux kernel is licensed.
-The bcm2835-unicam has been written to try and accommodate all types of CSI-2 source driver as are currently found in the mainline Linux kernel. Broadly these can be split into camera sensors and bridge chips. Bridge chips allow for conversion between some other format and CSI-2.
+The `bcm2835-unicam` module has been written to try and accommodate all types of CSI-2 source driver that are currently found in the mainline Linux kernel. These can be split broadly into camera sensors and bridge chips. Bridge chips allow for conversion between some other format and CSI-2.
==== Camera sensors
The sensor driver for a camera sensor is responsible for all configuration of the device, usually via I2C or SPI. Rather than writing a driver from scratch, it is often easier to take an existing driver as a basis and modify it as appropriate.
-The https://github.com/raspberrypi/linux/blob/rpi-5.4.y/drivers/media/i2c/imx219.c[IMX219 driver] is a good starting point. This driver supports both 8bit and 10bit Bayer readout, so enumerating frame formats and frame sizes is slightly more involved.
+The https://github.com/raspberrypi/linux/blob/rpi-6.1.y/drivers/media/i2c/imx219.c[IMX219 driver] is a good starting point. This driver supports both 8bit and 10bit Bayer readout, so enumerating frame formats and frame sizes is slightly more involved.
Sensors generally support https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/control.html[V4L2 user controls]. Not all these controls need to be implemented in a driver. The IMX219 driver only implements a small subset, listed below, the implementation of which is handled by the `imx219_set_ctrl` function.
-* `V4L2_CID_PIXEL_RATE` / `V4L2_CID_VBLANK` / `V4L2_CID_HBLANK`: allows the application to set the frame rate.
-* `V4L2_CID_EXPOSURE`: sets the exposure time in lines. The application needs to use `V4L2_CID_PIXEL_RATE`, `V4L2_CID_HBLANK`, and the frame width to compute the line time.
-* `V4L2_CID_ANALOGUE_GAIN`: analogue gain in sensor specific units.
-* `V4L2_CID_DIGITAL_GAIN`: optional digital gain in sensor specific units.
-* `V4L2_CID_HFLIP / V4L2_CID_VFLIP`: flips the image either horizontally or vertically. Note that this operation may change the Bayer order of the data in the frame, as is the case on the imx219.
-* `V4L2_CID_TEST_PATTERN` / `V4L2_CID_TEST_PATTERN_*`: Enables output of various test patterns from the sensor. Useful for debugging.
+* `V4L2_CID_PIXEL_RATE` / `V4L2_CID_VBLANK` / `V4L2_CID_HBLANK`: allows the application to set the frame rate
+* `V4L2_CID_EXPOSURE`: sets the exposure time in lines; the application needs to use `V4L2_CID_PIXEL_RATE`, `V4L2_CID_HBLANK`, and the frame width to compute the line time
+* `V4L2_CID_ANALOGUE_GAIN`: analogue gain in sensor specific units
+* `V4L2_CID_DIGITAL_GAIN`: optional digital gain in sensor specific units
+* `V4L2_CID_HFLIP / V4L2_CID_VFLIP`: flips the image either horizontally or vertically; this operation may change the Bayer order of the data in the frame, as is the case on the IMX219.
+* `V4L2_CID_TEST_PATTERN` / `V4L2_CID_TEST_PATTERN_*`: enables output of various test patterns from the sensor; useful for debugging
In the case of the IMX219, many of these controls map directly onto register writes to the sensor itself.
-Further guidance can be found in libcamera's https://git.linuxtv.org/libcamera.git/tree/Documentation/sensor_driver_requirements.rst[sensor driver requirements], and also in chapter 3 of the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Raspberry Pi Camera Tuning Guide].
+Further guidance can be found in the `libcamera` https://git.linuxtv.org/libcamera.git/tree/Documentation/sensor_driver_requirements.rst[sensor driver requirements], and in chapter 3 of the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Raspberry Pi Camera tuning guide].
===== Device Tree
-Device tree is used to select the sensor driver and configure parameters such as number of CSI-2 lanes, continuous clock lane operation, and link frequency (often only one is supported).
+Device Tree is used to select the sensor driver and configure parameters such as number of CSI-2 lanes, continuous clock lane operation, and link frequency (often only one is supported).
-* The IMX219 https://github.com/raspberrypi/linux/blob/rpi-5.4.y/arch/arm/boot/dts/overlays/imx219-overlay.dts[device tree overlay] for the 5.4 kernel
+The IMX219 https://github.com/raspberrypi/linux/blob/rpi-6.1.y/arch/arm/boot/dts/overlays/imx219-overlay.dts[Device Tree overlay] for the 6.1 kernel is available on GitHub.
==== Bridge chips
These are devices that convert an incoming video stream, for example HDMI or composite, into a CSI-2 stream that can be accepted by the Raspberry Pi CSI-2 receiver.
-Handling bridge chips is more complicated, as unlike camera sensors they have to respond to the incoming signal and report that to the application.
+Handling bridge chips is more complicated. Unlike camera sensors, they have to respond to the incoming signal and report that to the application.
-The mechanisms for handling bridge chips can be broadly split into either analogue or digital.
+The mechanisms for handling bridge chips can be split into two categories: either analogue or digital.
-When using `ioctls` in the sections below, an `_S_` in the `ioctl` name means it is a set function, whilst `_G_` is a get function and `_ENUM` enumerates a set of permitted values.
+When using `ioctls` in the sections below, an `_S_` in the `ioctl` name means it is a set function, while `_G_` is a get function and `_ENUM_` enumerates a set of permitted values.
===== Analogue video sources
-Analogue video sources use the standard `ioctls` for detecting and setting video standards. https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-g-std.html[`VIDIOC_G_STD`], https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-g-std.html[`VIDIOC_S_STD`], https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-enumstd.html[`VIDIOC_ENUMSTD`], and https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-querystd.html[`VIDIOC_QUERYSTD`]
+Analogue video sources use the standard `ioctls` for detecting and setting video standards. https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-g-std.html[`VIDIOC_G_STD`], https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-g-std.html[`VIDIOC_S_STD`], https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-enumstd.html[`VIDIOC_ENUMSTD`], and https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-querystd.html[`VIDIOC_QUERYSTD`] are available.
-Selecting the wrong standard will generally result in corrupt images. Setting the standard will typically also set the resolution on the V4L2 CAPTURE queue. It can not be set via `VIDIOC_S_FMT`. Generally requesting the detected standard via `VIDIOC_QUERYSTD` and then setting it with `VIDIOC_S_STD` before streaming is a good idea.
+Selecting the wrong standard will generally result in corrupt images. Setting the standard will typically also set the resolution on the V4L2 CAPTURE queue. It can not be set via `VIDIOC_S_FMT`. Generally, requesting the detected standard via `VIDIOC_QUERYSTD` and then setting it with `VIDIOC_S_STD` before streaming is a good idea.
===== Digital video sources
-For digital video sources, such as HDMI, there is an alternate set of calls that allow specifying of all the digital timing parameters (https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-g-dv-timings.html[`VIDIOC_G_DV_TIMINGS`], https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-g-dv-timings.html[`VIDIOC_S_DV_TIMINGS`], https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-enum-dv-timings.html[`VIDIOC_ENUM_DV_TIMINGS`], and https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-query-dv-timings.html[`VIDIOC_QUERY_DV_TIMINGS`]).
+For digital video sources, such as HDMI, there is an alternate set of calls that allow specifying of all the digital timing parameters: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-g-dv-timings.html[`VIDIOC_G_DV_TIMINGS`], https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-g-dv-timings.html[`VIDIOC_S_DV_TIMINGS`], https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-enum-dv-timings.html[`VIDIOC_ENUM_DV_TIMINGS`], and https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-query-dv-timings.html[`VIDIOC_QUERY_DV_TIMINGS`].
As with analogue bridges, the timings typically fix the V4L2 CAPTURE queue resolution, and calling `VIDIOC_S_DV_TIMINGS` with the result of `VIDIOC_QUERY_DV_TIMINGS` before streaming should ensure the format is correct.
@@ -144,42 +134,35 @@ Depending on the bridge chip and the driver, it may be possible for changes in t
===== Currently supported devices
-There are 2 bridge chips that are currently supported by the Raspberry Pi Linux kernel, the Analog Devices ADV728x-M for analogue video sources, and the Toshiba TC358743 for HDMI sources.
-
-_Analog Devices ADV728x(A)-M Analogue video to CSI2 bridge_
+There are two bridge chips which are currently supported by the Raspberry Pi Linux kernel: the Analog Devices ADV728x-M for analogue video sources, and the Toshiba TC358743 for HDMI sources.
-These chips convert composite, S-video (Y/C), or component (YPrPb) video into a single lane CSI-2 interface, and are supported by the https://github.com/raspberrypi/linux/blob/rpi-5.4.y/drivers/media/i2c/adv7180.c[ADV7180 kernel driver].
+Analog Devices ADV728x(A)-M analogue video to CSI2 bridge chips convert composite S-video (Y/C), or component (YPrPb) video into a single lane CSI-2 interface, and are supported by the https://github.com/raspberrypi/linux/blob/rpi-6.1.y/drivers/media/i2c/adv7180.c[ADV7180 kernel driver].
-Product details for the various versions of this chip can be found on the Analog Devices website.
-
-https://www.analog.com/en/products/adv7280a.html[ADV7280A], https://www.analog.com/en/products/adv7281a.html[ADV7281A], https://www.analog.com/en/products/adv7282a.html[ADV7282A]
+Product details for the various versions of this chip can be found on the Analog Devices website: https://www.analog.com/en/products/adv7280a.html[ADV7280A], https://www.analog.com/en/products/adv7281a.html[ADV7281A], and https://www.analog.com/en/products/adv7282a.html[ADV7282A].
Because of some missing code in the current core V4L2 implementation, selecting the source fails, so the Raspberry Pi kernel version adds a kernel module parameter called `dbg_input` to the ADV7180 kernel driver which sets the input source every time VIDIOC_S_STD is called. At some point mainstream will fix the underlying issue (a disjoin between the kernel API call s_routing, and the userspace call `VIDIOC_S_INPUT`) and this modification will be removed.
-Please note that receiving interlaced video is not supported, therefore the ADV7281(A)-M version of the chip is of limited use as it doesn't have the necessary I2P deinterlacing block. Also ensure when selecting a device to specify the -M option. Without that you will get a parallel output bus which can not be interfaced to the Raspberry Pi.
+Receiving interlaced video is not supported, therefore the ADV7281(A)-M version of the chip is of limited use as it doesn't have the necessary I2P deinterlacing block. Also ensure when selecting a device to specify the -M option. Without that you will get a parallel output bus which can not be interfaced to the Raspberry Pi.
-There are no known commercially available boards using these chips, but this driver has been tested via the Analog Devices https://www.analog.com/en/design-center/evaluation-hardware-and-software/evaluation-boards-kits/EVAL-ADV7282A-M.html[EVAL-ADV7282-M evaluation board]
+There are no known commercially available boards using these chips, but this driver has been tested via the Analog Devices https://www.analog.com/en/design-center/evaluation-hardware-and-software/evaluation-boards-kits/EVAL-ADV7282A-M.html[EVAL-ADV7282-M evaluation board].
-This driver can be loaded using the `config.txt` dtoverlay `adv7282m` if you are using the `ADV7282-M` chip variant; or `adv728x-m` with a parameter of either `adv7280m=1`, `adv7281m=1`, or `adv7281ma=1` if you are using a different variant. e.g.
+This driver can be loaded using the `config.txt` dtoverlay `adv7282m` if you are using the `ADV7282-M` chip variant; or `adv728x-m` with a parameter of either `adv7280m=1`, `adv7281m=1`, or `adv7281ma=1` if you are using a different variant.
----
dtoverlay=adv728x-m,adv7280m=1
----
-_Toshiba TC358743 HDMI to CSI2 bridge_
-
-This is a HDMI to CSI-2 bridge chip, capable of converting video data at up to 1080p60.
+The Toshiba TC358743 is an HDMI to CSI-2 bridge chip, capable of converting video data at up to 1080p60.
-Information on this bridge chip can be found on the https://toshiba.semicon-storage.com/ap-en/semiconductor/product/interface-bridge-ics-for-mobile-peripheral-devices/hdmir-interface-bridge-ics/detail.TC358743XBG.html[Toshiba Website]
+Information on this bridge chip can be found on the https://toshiba.semicon-storage.com/ap-en/semiconductor/product/interface-bridge-ics-for-mobile-peripheral-devices/hdmir-interface-bridge-ics/detail.TC358743XBG.html[Toshiba website].
-The TC358743 interfaces HDMI in to CSI-2 and I2S outputs. It is supported by the https://github.com/raspberrypi/linux/blob/rpi-5.4.y/drivers/media/i2c/tc358743.c[TC358743 kernel module].
+The TC358743 interfaces HDMI into CSI-2 and I2S outputs. It is supported by the https://github.com/raspberrypi/linux/blob/rpi-6.1.y/drivers/media/i2c/tc358743.c[TC358743 kernel module].
-The chip supports incoming HDMI signals as either RGB888, YUV444, or YUV422, at up to 1080p60. It can forward RGB888, or convert it to YUV444 or YUV422, and convert either way between YUV444 and YUV422. Only RGB888 and YUV422 support has been tested. When using 2 CSI-2 lanes, the maximum rates that can be supported are 1080p30 as RGB888, or 1080p50 as YUV422. When using 4 lanes on a Compute Module, 1080p60 can be received in either format.
+The chip supports incoming HDMI signals as either RGB888, YUV444, or YUV422, at up to 1080p60. It can forward RGB888, or convert it to YUV444 or YUV422, and convert either way between YUV444 and YUV422. Only RGB888 and YUV422 support has been tested. When using two CSI-2 lanes, the maximum rates that can be supported are 1080p30 as RGB888, or 1080p50 as YUV422. When using four lanes on a Compute Module, 1080p60 can be received in either format.
-HDMI negotiates the resolution by a receiving device advertising an https://en.wikipedia.org/wiki/Extended_Display_Identification_Data[EDID] of all the modes that it can support. The kernel driver has no knowledge of the resolutions, frame rates, or formats that you wish to receive, therefore it is up to the user to provide a suitable file.
-This is done via the VIDIOC_S_EDID ioctl, or more easily using `v4l2-ctl --fix-edid-checksums --set-edid=file=filename.txt` (adding the --fix-edid-checksums option means that you don't have to get the checksum values correct in the source file). Generating the required EDID file (a textual hexdump of a binary EDID file) is not too onerous, and there are tools available to generate them, but it is beyond the scope of this page.
+HDMI negotiates the resolution by a receiving device advertising an https://en.wikipedia.org/wiki/Extended_Display_Identification_Data[EDID] of all the modes that it can support. The kernel driver has no knowledge of the resolutions, frame rates, or formats that you wish to receive, so it is up to the user to provide a suitable file via the VIDIOC_S_EDID ioctl, or more easily using `v4l2-ctl --fix-edid-checksums --set-edid=file=filename.txt` (adding the --fix-edid-checksums option means that you don't have to get the checksum values correct in the source file). Generating the required EDID file (a textual hexdump of a binary EDID file) is not too onerous, and there are tools available to generate them, but it is beyond the scope of this page.
-As described above, use the `DV_TIMINGS` ioctls to configure the driver to match the incoming video. The easiest approach for this is to use the command `v4l2-ctl --set-dv-bt-timings query`. The driver does support generating the SOURCE_CHANGED events should you wish to write an application to handle a changing source. Changing the output pixel format is achieved by setting it via VIDIOC_S_FMT, however only the pixel format field will be updated as the resolution is configured by the dv timings.
+As described above, use the `DV_TIMINGS` ioctls to configure the driver to match the incoming video. The easiest approach for this is to use the command `v4l2-ctl --set-dv-bt-timings query`. The driver does support generating the `SOURCE_CHANGED` events, should you wish to write an application to handle a changing source. Changing the output pixel format is achieved by setting it via `VIDIOC_S_FMT`, but only the pixel format field will be updated as the resolution is configured by the DV timings.
There are a couple of commercially available boards that connect this chip to the Raspberry Pi. The Auvidea B101 and B102 are the most widely obtainable, but other equivalent boards are available.
@@ -213,4 +196,5 @@ The chip also supports capturing stereo HDMI audio via I2S. The Auvidea boards b
|===
The `tc358743-audio` overlay is required _in addition to_ the `tc358743` overlay. This should create an ALSA recording device for the HDMI audio.
-Please note that there is no resampling of the audio. The presence of audio is reflected in the V4L2 control TC358743_CID_AUDIO_PRESENT / "audio-present", and the sample rate of the incoming audio is reflected in the V4L2 control TC358743_CID_AUDIO_SAMPLING_RATE / "Audio sampling-frequency". Recording when no audio is present will generate warnings, as will recording at a sample rate different from that reported.
+
+There is no resampling of the audio. The presence of audio is reflected in the V4L2 control `TC358743_CID_AUDIO_PRESENT` (audio-present), and the sample rate of the incoming audio is reflected in the V4L2 control `TC358743_CID_AUDIO_SAMPLING_RATE` (audio sampling-frequency). Recording when no audio is present or at a sample rate different from that reported emits a warning.
diff --git a/documentation/asciidoc/computers/camera/gstreamer.adoc b/documentation/asciidoc/computers/camera/gstreamer.adoc
deleted file mode 100644
index 81ef89e48..000000000
--- a/documentation/asciidoc/computers/camera/gstreamer.adoc
+++ /dev/null
@@ -1,58 +0,0 @@
-=== Using Gstreamer
-
-_Gstreamer_ is a Linux framework for reading, processing and playing multimedia files. There is a lot of information and many tutorials at the https://gstreamer.freedesktop.org/[_gstreamer_ website]. Here we show how `libcamera-vid` (and similarly `raspivid`) can be used to stream video over a network.
-
-On the server we need `libcamera-vid` to output an encoded h.264 bitstream to _stdout_ and can use the _gstreamer_ `fdsrc` element to receive it. Then extra _gstreamer_ elements can send this over the network. As an example we can simply send and receive the stream on the same device over a UDP link. On the server:
-
-[,bash]
-----
-libcamera-vid -t 0 -n --inline -o - | gst-launch-1.0 fdsrc fd=0 ! udpsink host=localhost port=5000
-----
-
-For the client (type this into another console window) we can use:
-
-[,bash]
-----
-gst-launch-1.0 udpsrc address=localhost port=5000 ! h264parse ! v4l2h264dec ! autovideosink
-----
-
-==== Using RTP
-
-To stream using the RTP protocol, on the server you could use:
-
-[,bash]
-----
-libcamera-vid -t 0 -n --inline -o - | gst-launch-1.0 fdsrc fd=0 ! h264parse ! rtph264pay ! udpsink host=localhost port=5000
-----
-
-And in the client window:
-
-[,bash]
-----
-gst-launch-1.0 udpsrc address=localhost port=5000 caps=application/x-rtp ! rtph264depay ! h264parse ! v4l2h264dec ! autovideosink
-----
-
-We conclude with an example that streams from one machine to another. Let us assume that the client machine has the IP address `192.168.0.3`. On the server (a Raspberry Pi) the pipeline is identical, but for the destination address:
-
-[,bash]
-----
-libcamera-vid -t 0 -n --inline -o - | gst-launch-1.0 fdsrc fd=0 ! h264parse ! rtph264pay ! udpsink host=192.168.0.3 port=5000
-----
-
-If the client is not a Raspberry Pi it may have different _gstreamer_ elements available. For a Linux PC we might use:
-
-[,bash]
-----
-gst-launch-1.0 udpsrc address=192.168.0.3 port=5000 caps=application/x-rtp ! rtph264depay ! h264parse ! avdec_h264 ! autovideosink
-----
-
-==== The `libcamerasrc` element
-
-`libcamera` provides a `libcamerasrc` _gstreamer_ element which can be used directly instead of `libcamera-vid`. On the server you could use:
-
-[,bash]
-----
-gst-launch-1.0 libcamerasrc ! capsfilter caps=video/x-raw,width=1280,height=720,format=NV12 ! v4l2convert ! v4l2h264enc extra-controls="controls,repeat_sequence_header=1" ! h264parse ! rtph264pay ! udpsink host=localhost port=5000
-----
-
-and on the client we use the same playback pipeline as previously.
diff --git a/documentation/asciidoc/computers/camera/images/cam.jpg b/documentation/asciidoc/computers/camera/images/cam.jpg
deleted file mode 100644
index 38963884d..000000000
Binary files a/documentation/asciidoc/computers/camera/images/cam.jpg and /dev/null differ
diff --git a/documentation/asciidoc/computers/camera/images/cam2.jpg b/documentation/asciidoc/computers/camera/images/cam2.jpg
deleted file mode 100644
index 01d39ca9c..000000000
Binary files a/documentation/asciidoc/computers/camera/images/cam2.jpg and /dev/null differ
diff --git a/documentation/asciidoc/computers/os/images/image2.jpg b/documentation/asciidoc/computers/camera/images/webcam-image-high-resolution.jpg
similarity index 100%
rename from documentation/asciidoc/computers/os/images/image2.jpg
rename to documentation/asciidoc/computers/camera/images/webcam-image-high-resolution.jpg
diff --git a/documentation/asciidoc/computers/os/images/image3.jpg b/documentation/asciidoc/computers/camera/images/webcam-image-no-banner.jpg
similarity index 100%
rename from documentation/asciidoc/computers/os/images/image3.jpg
rename to documentation/asciidoc/computers/camera/images/webcam-image-no-banner.jpg
diff --git a/documentation/asciidoc/computers/os/images/image.jpg b/documentation/asciidoc/computers/camera/images/webcam-image.jpg
similarity index 100%
rename from documentation/asciidoc/computers/os/images/image.jpg
rename to documentation/asciidoc/computers/camera/images/webcam-image.jpg
diff --git a/documentation/asciidoc/computers/camera/libcamera_3rd_party_tuning.adoc b/documentation/asciidoc/computers/camera/libcamera_3rd_party_tuning.adoc
deleted file mode 100644
index a20bd82bd..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_3rd_party_tuning.adoc
+++ /dev/null
@@ -1,15 +0,0 @@
-=== Camera Tuning and supporting 3rd Party Sensors
-
-==== The Camera Tuning File
-
-Most of the image processing applied to frames from the sensor is done by the hardware ISP (Image Signal Processor). This processing is governed by a set of _control algorithms_ and these in turn must have a wide range of parameters supplied to them. These parameters are tuned specifically for each sensor and are collected together in a JSON file known as the _camera tuning file_.
-
-This _tuning file_ can be inspected and edited by users. Using the `--tuning-file` command line option, users can point the system at completely custom camera tuning files.
-
-==== 3rd Party Sensors
-
-`libcamera` makes it possible to support 3rd party sensors (that is, sensors other than Raspberry Pi's officially supported sensors) on the Raspberry Pi platform. To accomplish this, a working open source sensor driver must be provided, which the authors are happy to submit to the Linux kernel. There are a couple of extra files need to be added to `libcamera` which supply device-specific information that is available from the kernel drivers, including the previously discussed camera tuning file.
-
-Raspberry Pi also supplies a _tuning tool_ which automates the generation of the tuning file from a few simple calibration images.
-
-Both these topics are rather beyond the scope of the documentation here, however, full information is available in the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Tuning Guide for the Raspberry Pi cameras and libcamera].
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_building.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_building.adoc
deleted file mode 100644
index 4c1c8d776..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_building.adoc
+++ /dev/null
@@ -1,183 +0,0 @@
-=== Building `libcamera` and `libcamera-apps`
-
-Building `libcamera` and `libcamera-apps` for yourself can bring the following benefits.
-
-* You can pick up the latest enhancements and features.
-
-* `libcamera-apps` can be compiled with extra optimisation for Raspberry Pi 3 and Raspberry Pi 4 devices running a 32-bit OS.
-
-* You can include the various optional OpenCV and/or TFLite post-processing stages (or add your own).
-
-* You can customise or add your own applications derived from `libcamera-apps`.
-
-NOTE: When building on a Raspberry Pi with 1GB or less of RAM, there is a risk that the device may run out of swap and fail. We recommend either increasing the amount of swap, or building with fewer threads (the `-j` option to `ninja` and to `make`).
-
-==== Building `libcamera-apps` without rebuilding `libcamera`
-
-You can rebuild `libcamera-apps` _without_ first rebuilding the whole of `libcamera` and `libepoxy`. If you do not need support for the X11/GLES preview window then `libepoxy` can be omitted entirely. Mostly this will include Raspberry Pi OS Lite users, and they must be sure to use `-DENABLE_X11=0` when running `cmake` later. These users should run:
-
-----
-sudo apt install -y libcamera-dev libjpeg-dev libtiff5-dev
-----
-
-All other users should execute:
-
-----
-sudo apt install -y libcamera-dev libepoxy-dev libjpeg-dev libtiff5-dev
-----
-
-If you want to use the Qt preview window, please also execute
-
-----
-sudo apt install -y qtbase5-dev libqt5core5a libqt5gui5 libqt5widgets5
-----
-
-If you want xref:camera_software.adoc#libav-integration-with-libcamera-vid[libav] support in `libcamera-vid`, additional libraries must be installed:
-
-----
-sudo apt install libavcodec-dev libavdevice-dev libavformat-dev libswresample-dev
-----
-
-Now proceed directly to the instructions for xref:camera_software.adoc#building-libcamera-apps[building `libcamera-apps`]. Raspberry Pi OS Lite users should check that _git_ is installed first (`sudo apt install -y git`).
-
-==== Building `libcamera`
-
-Rebuilding `libcamera` from scratch should be necessary only if you need the latest features that may not yet have reached the `apt` repositories, or if you need to customise its behaviour in some way.
-
-First install all the necessary dependencies for `libcamera`.
-
-NOTE: Raspberry Pi OS Lite users will first need to install the following additional packages if they have not done so previously:
-
-----
-sudo apt install -y python3-pip git
-sudo pip3 install jinja2
-----
-
-All users should then install the following:
-
-----
-sudo apt install -y libboost-dev
-sudo apt install -y libgnutls28-dev openssl libtiff5-dev
-sudo apt install -y qtbase5-dev libqt5core5a libqt5gui5 libqt5widgets5
-sudo apt install -y meson
-sudo pip3 install pyyaml ply
-sudo pip3 install --upgrade meson
-----
-
-In the `meson` commands below we have enabled the _gstreamer_ plugin. If you _do not_ need this you can set `-Dgstreamer=disabled` instead and the next pair of dependencies will not be required. But if you do leave _gstreamer_ enabled, then you will need the following:
-
-----
-sudo apt install -y libglib2.0-dev libgstreamer-plugins-base1.0-dev
-----
-
-Now we can check out and build `libcamera` itself. We check out Raspberry Pi's fork of libcamera which tracks the official repository but lets us control exactly when we pick up new features.
-
-----
-cd
-git clone https://github.com/raspberrypi/libcamera.git
-cd libcamera
-----
-
-Next we recommend that Raspberry Pi OS Lite users run
-
-----
-meson build --buildtype=release -Dpipelines=raspberrypi -Dipas=raspberrypi -Dv4l2=true -Dgstreamer=enabled -Dtest=false -Dlc-compliance=disabled -Dcam=disabled -Dqcam=disabled -Ddocumentation=disabled
-----
-
-Users of Raspberry Pi OS can instead use
-
-----
-meson build --buildtype=release -Dpipelines=raspberrypi -Dipas=raspberrypi -Dv4l2=true -Dgstreamer=enabled -Dtest=false -Dlc-compliance=disabled -Dcam=disabled -Dqcam=enabled -Ddocumentation=disabled
-----
-
-The only difference is that the latter also builds the `qcam` test application, which has dependencies on Qt and X Windows (after completing the `libcamera` build users can run `build/src/qcam/qcam` to verify that `libcamera` is functioning correctly).
-
-To complete the `libcamera` build, please run
-
-----
-ninja -C build # use -j 2 on Raspberry Pi 3 or earlier devices
-sudo ninja -C build install
-----
-
-NOTE: At the time of writing `libcamera` does not yet have a stable binary interface. Therefore, if you have rebuilt `libcamera` we recommend continuing and rebuilding `libcamera-apps` from scratch too.
-
-==== Building `libepoxy`
-
-Rebuilding `libepoxy` should not normally be necessary as this library changes only very rarely. If you do want to build it from scratch, however, please follow the instructions below.
-
-Start by installing the necessary dependencies.
-
-----
-sudo apt install -y libegl1-mesa-dev
-----
-
-Next, check out and build `libepoxy`.
-
-----
-cd
-git clone https://github.com/anholt/libepoxy.git
-cd libepoxy
-mkdir _build
-cd _build
-meson
-ninja
-sudo ninja install
-----
-
-==== Building `libcamera-apps`
-
-First fetch the necessary dependencies for `libcamera-apps`.
-
-----
-sudo apt install -y cmake libboost-program-options-dev libdrm-dev libexif-dev
-----
-
-The `libcamera-apps` build process begins with the following:
-
-----
-cd
-git clone https://github.com/raspberrypi/libcamera-apps.git
-cd libcamera-apps
-mkdir build
-cd build
-----
-
-At this point you will need to run `cmake` after deciding what extra flags to pass it. The valid flags are:
-
-* `-DENABLE_COMPILE_FLAGS_FOR_TARGET=armv8-neon` - you may supply this when building for Raspberry Pi 3 or Raspberry Pi 4 devices running a 32-bit OS. Some post-processing features may run more quickly.
-
-* `-DENABLE_DRM=1` or `-DENABLE_DRM=0` - this enables or disables the DRM/KMS preview rendering. This is what implements the preview window when X Windows is not running.
-
-* `-DENABLE_X11=1` or `-DENABLE_X11=0` - this enables or disables the X Windows based preview. You should disable this if your system does not have X Windows installed.
-
-* `-DENABLE_QT=1` or `-DENABLE_QT=0` - this enables or disables support for the Qt-based implementation of the preview window. You should disable it if you do not have X Windows installed, or if you have no intention of using the Qt-based preview window. The Qt-based preview is normally not recommended because it is computationally very expensive, however it does work with X display forwarding.
-
-* `-DENABLE_OPENCV=1` or `-DENABLE_OPENCV=0` - you may choose one of these to force OpenCV-based post-processing stages to be linked (or not). If you enable them, then OpenCV must be installed on your system. Normally they will be built by default if OpenCV is available.
-
-* `-DENABLE_TFLITE=1` or `-DENABLE_TFLITE=0` - choose one of these to enable TensorFlow Lite post-processing stages (or not). By default they will not be enabled. If you enable them then TensorFlow Lite must be available on your system. Depending on how you have built and/or installed TFLite, you may need to tweak the `CMakeLists.txt` file in the `post_processing_stages` directory.
-
-For Raspberry Pi OS users we recommend the following `cmake` command:
-
-----
-cmake .. -DENABLE_DRM=1 -DENABLE_X11=1 -DENABLE_QT=1 -DENABLE_OPENCV=0 -DENABLE_TFLITE=0
-----
-
-and for Raspberry Pi OS Lite users:
-
-----
-cmake .. -DENABLE_DRM=1 -DENABLE_X11=0 -DENABLE_QT=0 -DENABLE_OPENCV=0 -DENABLE_TFLITE=0
-----
-
-In both cases, consider `-DENABLE_COMPILE_FLAGS_FOR_TARGET=armv8-neon` if you are using a 32-bit OS on a Raspberry Pi 3 or Raspberry Pi 4. Consider `-DENABLE_OPENCV=1` if you have installed _OpenCV_ and wish to use OpenCV-based post-processing stages. Finally also consider `-DENABLE_TFLITE=1` if you have installed _TensorFlow Lite_ and wish to use it in post-processing stages.
-
-After executing the `cmake` command of your choice, the whole process concludes with the following:
-
-----
-make -j4 # use -j1 on Raspberry Pi 3 or earlier devices
-sudo make install
-sudo ldconfig # this is only necessary on the first build
-----
-
-NOTE: If you are using an image where `libcamera-apps` have been previously installed as an `apt` package, and you want to run the new `libcamera-apps` executables from the same terminal window where you have just built and installed them, you may need to run `hash -r` to be sure to pick up the new ones over the system supplied ones.
-
-Finally, if you have not already done so, please be sure to follow the `dtoverlay` and display driver instructions in the xref:camera_software.adoc#getting-started[Getting Started section] (and rebooting if you changed anything there).
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_getting_help.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_getting_help.adoc
deleted file mode 100644
index a4bf355ac..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_getting_help.adoc
+++ /dev/null
@@ -1,19 +0,0 @@
-=== Getting Help
-
-For further help with `libcamera` and the `libcamera-apps`, the first port of call will usually be the https://forums.raspberrypi.com/viewforum.php?f=43[Raspberry Pi Camera Forum]. Before posting, it's helpful to:
-
-* Ensure your xref:../computers/os.adoc#using-apt[software is up to date].
-
-* If you are using _Buster_ please upgrade to the latest OS, as `libcamera-apps` is no longer supported there.
-
-* Make a note of your operating system version (`uname -a`).
-
-* Make a note of your `libcamera` and `libcamera-apps` versions (`libcamera-hello --version`).
-
-* Please report the make and model of the camera module you are using. Note that when third party camera module vendors supply their own software then we are normally unable to offer any support and all queries should be directed back to the vendor.
-
-* Please also provide information on what kind of a Raspberry Pi you have, including memory size.
-
-* If it seems like it might be relevant, please include any excerpts from the application's console output.
-
-When it seems likely that there are specific problems in the camera software (such as crashes) then it may be more appropriate to https://github.com/raspberrypi/libcamera-apps[create an issue in the `libcamera-apps` Github repository]. Again, please include all the helpful details that you can.
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_getting_started.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_getting_started.adoc
deleted file mode 100644
index 0536ad3d2..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_getting_started.adoc
+++ /dev/null
@@ -1,59 +0,0 @@
-=== Getting Started
-
-==== Using the camera for the first time
-
-NOTE: On Raspberry Pi 3 and earlier devices running _Bullseye_ you need to re-enable _Glamor_ in order to make the X-Windows hardware accelerated preview window work. To do this enter `sudo raspi-config` at a terminal window and then choose `Advanced Options`, `Glamor` and `Yes`. Finally quit `raspi-config` and let it reboot your Raspberry Pi.
-
-When running a Raspberry Pi OS based on _Bullseye_, the 5 basic `libcamera-apps` are already installed. In this case, official Raspberry Pi cameras will also be detected and enabled automatically.
-
-You can check that everything is working by entering:
-
-[,bash]
-----
-libcamera-hello
-----
-
-You should see a camera preview window for about 5 seconds.
-
-Users who are still running _Buster_ should upgrade to _Bullseye_. The new _libcamera_-based stack is no longer supported there, and anyone still using _Buster_ should stay with the legacy camera stack.
-
-NOTE: Raspberry Pi 3 and older devices may not by default be using the correct display driver. Refer to the `/boot/config.txt` file and ensure that either `dtoverlay=vc4-fkms-v3d` or `dtoverlay=vc4-kms-v3d` is currently active. Please reboot if you needed to change this.
-
-==== If you do need to alter the configuration
-
-You may need to alter the camera configuration in your `/boot/config.txt` file if:
-
-* You are using a 3rd party camera (the manufacturer's instructions should explain the changes you need to make).
-
-* You are using an official Raspberry Pi camera but wish to use a non-standard driver/overlay.
-
-If you do need to add your own `dtoverlay`, the following are currently recognised.
-
-|===
-| Camera Module | In `/boot/config.txt`
-
-| V1 camera (OV5647)
-| `dtoverlay=ov5647`
-
-| V2 camera (IMX219)
-| `dtoverlay=imx219`
-
-| HQ camera (IMX477)
-| `dtoverlay=imx477`
-
-| Camera Module 3 (IMX708)
-| `dtoverlay=imx708`
-
-| IMX290 and IMX327
-| `dtoverlay=imx290,clock-frequency=74250000` or `dtoverlay=imx290,clock-frequency=37125000` (both modules share the imx290 kernel driver; please refer to instructions from the module vendor for the correct frequency)
-
-| IMX378
-| `dtoverlay=imx378`
-
-| OV9281
-| `dtoverlay=ov9281`
-|===
-
-To override the automatic camera detection, _Bullseye_ users will also need to delete the entry `camera_auto_detect=1` if present in the `config.txt` file. Your Raspberry Pi will need to be rebooted after editing this file.
-
-NOTE: Setting `camera_auto_detect=0` disables the boot time detection completely.
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_intro.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_intro.adoc
deleted file mode 100644
index 2fc148b44..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_intro.adoc
+++ /dev/null
@@ -1,35 +0,0 @@
-== `libcamera` and `libcamera-apps`
-
-=== Introduction
-
-`libcamera` is a new software library aimed at supporting complex camera systems directly from the Linux operating system. In the case of the Raspberry Pi it enables us to drive the camera system directly from open source code running on ARM processors. The proprietary code running on the Broadcom GPU, and to which users have no access at all, is almost completely by-passed.
-
-`libcamera` presents a {cpp} API to applications and works at the level of configuring the camera and then allowing an application to request image frames. These image buffers reside in system memory and can be passed directly to still image encoders (such as JPEG) or to video encoders (such as h.264), though such ancillary functions as encoding images or displaying them are strictly beyond the purview of `libcamera` itself.
-
-For this reason Raspberry Pi supplies a small set of example `libcamera-apps`. These are simple applications, built on top of `libcamera`, and are designed largely to emulate the function of the legacy stack built on Broadcom's proprietary GPU code (some users will recognise these legacy applications as `raspstill` and `raspivid`). The applications we provide are:
-
-* _libcamera-hello_ A simple "hello world" application which starts a camera preview stream and displays it on the screen.
-* _libcamera-jpeg_ A simple application to run a preview window and then capture high resolution still images.
-* _libcamera-still_ A more complex still image capture application which emulates more of the features of `raspistill`.
-* _libcamera-vid_ A video capture application.
-* _libcamera-raw_ A basic application for capturing raw (unprocessed Bayer) frames directly from the sensor.
-* _libcamera-detect_ This application is not built by default, but users can build it if they have TensorFlow Lite installed on their Raspberry Pi. It captures JPEG images when certain objects are detected.
-
-Raspberry Pi's `libcamera-apps` are not only command line applications that make it easy to capture images and video from the camera, they are also examples of how users can create their own libcamera-based applications with custom functionality to suit their own requirements. The source code for the `libcamera-apps` is freely available under a BSD 2-Clause licence at https://github.com/raspberrypi/libcamera-apps[].
-
-==== More about `libcamera`
-
-`libcamera` is an open source Linux community project. More information is available at the https://libcamera.org[`libcamera` website].
-
-The `libcamera` source code can be found and checked out from the https://git.linuxtv.org/libcamera.git/[official libcamera repository], although we work from a https://github.com/raspberrypi/libcamera.git[fork] that lets us control when we get _libcamera_ updates.
-
-Underneath the `libcamera` core, Raspberry Pi provides a custom _pipeline handler_, which is the layer that `libcamera` uses to drive the sensor and ISP (Image Signal Processor) on the Raspberry Pi itself. Also part of this is a collection of well-known _control algorithms_, or _IPAs_ (Image Processing Algorithms) in `libcamera` parlance, such as AEC/AGC (Auto Exposure/Gain Control), AWB (Auto White Balance), ALSC (Auto Lens Shading Correction) and so on.
-
-All this code is open source and now runs on the Raspberry Pi's ARM cores. There is only a very thin layer of code on the GPU which translates Raspberry Pi's own control parameters into register writes for the Broadcom ISP.
-
-Raspberry Pi's implementation of `libcamera` supports not only the four standard Raspberry Pi cameras (the OV5647 or V1 camera, the IMX219 or V2 camera, the IMX477 or HQ camera and the IMX708 or Camera Module 3) but also third party senors such as the IMX290, IMX327, OV9281, IMX378. Raspberry Pi is keen to work with vendors who would like to see their sensors supported directly by `libcamera`.
-
-Moreover, Raspberry Pi supplies a _tuning file_ for each of these sensors which can be edited to change the processing performed by the Raspberry Pi hardware on the raw images received from the image sensor, including aspects like the colour processing, the amount of noise suppression or the behaviour of the control algorithms.
-
-For further information on `libcamera` for the Raspberry Pi, please consult the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Tuning Guide for the Raspberry Pi cameras and libcamera].
-
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_libav.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_libav.adoc
deleted file mode 100644
index c54a6064d..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_libav.adoc
+++ /dev/null
@@ -1,77 +0,0 @@
-=== libav integration with libcamera-vid
-
-`libcamera-vid` can use the ffmpeg/libav codec backend to encode audio and video streams and either save to a local file or stream over the network. At present, video is encoded through the hardware H.264 encoder, and audio is encoded by a number of available software encoders. To list the available output formats, use the `ffmpeg -formats` command.
-
-To enable the libav backend, use the `--codec libav` command line option. Once enabled, the following configuration options are available:
-
-----
- --libav-format, libav output format to be used
-----
-
-Set the libav output format to use. These output formats can be specified as containers (e.g. mkv, mp4, avi) or stream output (e.g. h264 or mpegts). If this option is not provided, libav tries to deduce the output format from the filename specified by the `-o` command line argument.
-
-Example: To save a video in an mkv container, the following commands are equivalent:
-
-----
-libcamera-vid --codec libav -o test.mkv
-libcamera-vid --codec libav --libav-format mkv -o test.raw
-----
-
-----
- --libav-audio, Enable audio recording
-----
-
-Set this option to enable audio encoding together with the video stream. When audio encoding is enabled, an output format that supports audio (e.g. mpegts, mkv, mp4) must be used.
-
-----
- --audio-codec, Selects the audio codec
-----
-
-Selects which software audio codec is used for encoding. By default `aac` is used. To list the available audio codecs, use the `ffmpeg -codec` command.
-
-----
- --audio-bitrate, Selects the audio bitrate
-----
-
-Sets the audio encoding bitrate in bits per second.
-
-Example: To record audio at 16 kilobits/sec with the mp2 codec use `libcamera-vid --codec libav -o test.mp4 --audio_codec mp2 --audio-bitrate 16384`
-
-----
- --audio-samplerate, Set the audio sampling rate
-----
-
-Set the audio sampling rate in Hz for encoding. Set to 0 (default) to use the input sample rate.
-
-----
- --audio-device, Chooses an audio recording device to use
-----
-
-Selects which ALSA input device to use for audio recording. The audio device string can be obtained with the following command:
-
-----
-pi@pi4:~ $ pactl list | grep -A2 'Source #' | grep 'Name: '
- Name: alsa_output.platform-bcm2835_audio.analog-stereo.monitor
- Name: alsa_output.platform-fef00700.hdmi.hdmi-stereo.monitor
- Name: alsa_output.usb-GN_Netcom_A_S_Jabra_EVOLVE_LINK_000736B1214E0A-00.analog-stereo.monitor
- Name: alsa_input.usb-GN_Netcom_A_S_Jabra_EVOLVE_LINK_000736B1214E0A-00.mono-fallback
-----
-
-----
- --av-sync, Audio/Video sync control
-----
-This option can be used to shift the audio sample timestamp by a value given in microseconds relative to the video frame. Negative values may also be used.
-
-==== Network streaming with libav
-
-It is possible to use the libav backend as a network streaming source for audio/video. To do this, the output filename specified by the `-o` argument must be given as a protocol url, see https://ffmpeg.org/ffmpeg-protocols.html[ffmpeg protocols] for more details on protocol usage. Some examples:
-
-To stream audio/video using TCP
-----
-libcamera-vid -t 0 --codec libav --libav-format mpegts --libav-audio -o "tcp://0.0.0.0:1234?listen=1"
-----
-
-To stream audio/video using UDP
-----
-libcamera-vid -t 0 --codec libav --libav-format mpegts --libav-audio -o "udp://:"
-----
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_multicam.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_multicam.adoc
deleted file mode 100644
index 79bf9b50e..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_multicam.adoc
+++ /dev/null
@@ -1,12 +0,0 @@
-=== Multiple Cameras Usage
-
-Basic support for multiple cameras is available within `libcamera-apps`. Multiple cameras may be attached to a Raspberry Pi in the following ways:
-
-* Two cameras connected directly to a Raspberry Pi Compute Module board, see the xref:../computers/compute-module.adoc#attaching-a-raspberry-pi-camera-module[Compute Module documentation] for further details.
-* Two or more cameras attached to a non-compute Raspberry Pi board using a Video Mux board, like https://www.arducam.com/product/multi-camera-v2-1-adapter-raspberry-pi/[this 3rd party product].
-
-In the latter case, only one camera may be used at a time since both cameras are attached to a single Unicam port. For the former, both cameras can run simultaneously.
-
-To list all the cameras available on your platform, use the `--list-cameras` command line option. To choose which camera to use, use the `--camera ` option, and provide the index value of the requested camera.
-
-NOTE: `libcamera` does not yet provide stereoscopic camera support. When running two cameras simultaneously, they must be run in separate processes. This means there is no way to synchronise sensor framing or 3A operation between them. As a workaround, you could synchronise the cameras through an external sync signal for the HQ (IMX477) camera, and switch the 3A to manual mode if necessary.
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_packages.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_packages.adoc
deleted file mode 100644
index e5613cdd6..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_packages.adoc
+++ /dev/null
@@ -1,33 +0,0 @@
-=== `libcamera` and `libcamera-apps` Packages
-
-A number of `apt` packages are provided for convenience. In order to access them, we recommend keeping your OS up to date xref:../computers/os.adoc#using-apt[in the usual way].
-
-==== Binary Packages
-
-There are two `libcamera-apps` packages available, that contain the necessary executables:
-
-* `libcamera-apps` contains the full applications with support for previews using X Windows. This package is pre-installed in the _Bullseye_ release of Raspberry Pi OS.
-
-* `libcamera-apps-lite` omits X Windows support and only the DRM preview is available. This package is pre-installed in the _Bullseye_ release of Raspberry Pi OS Lite.
-
-For _Bullseye_ users, official Raspberry Pi cameras should be detected automatically. Other users will need to xref:camera_software.adoc#if-you-do-need-to-alter-the-configuration[edit their `/boot/config.txt`] file if they have not done so previously.
-
-==== Dependencies
-
-These applications depend on a number of library packages which are named _library-name_ where __ is a version number (actually the ABI, or Application Binary Interface, version), and which stands at zero at the time of writing. Thus we have the following:
-
-* The package `libcamera0` contains the `libcamera` libraries.
-
-* The package `libepoxy0` contains the `libepoxy` libraries.
-
-These will be installed automatically when needed.
-
-==== Dev Packages
-
-`libcamera-apps` can be rebuilt on their own without installing and building `libcamera` and `libepoxy` from scratch. To enable this, the following packages should be installed:
-
-* `libcamera-dev` contains the necessary `libcamera` header files and resources.
-
-* `libepoxy-dev` contains the necessary `libepoxy` header files and resources. You will only need this if you want support for the X11/GLES preview window.
-
-Subsequently `libcamera-apps` can be xref:camera_software.adoc#building-libcamera-apps-without-rebuilding-libcamera[checked out from GitHub and rebuilt].
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_post_processing.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_post_processing.adoc
deleted file mode 100644
index fcc0a17f0..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_post_processing.adoc
+++ /dev/null
@@ -1,216 +0,0 @@
-=== Post-Processing
-
-`libcamera-apps` share a common post-processing framework. This allows them to pass the images received from the camera system through a number of custom image processing and image analysis routines. Each such routine is known as a _post-processing stage_ and the description of exactly which stages should be run, and what configuration they may have, is supplied in a JSON file. Every stage, along with its source code, is supplied with a short example JSON file showing how to enable it.
-
-For example, the simple _negate_ stage (which "negates" all the pixels in an image, turning light pixels dark and vice versa) is supplied with a `negate.json` file that configures the post-processing pipeline to run it:
-
-`libcamera-hello --post-process-file /path/to/negate.json`
-
-TIP: Example JSON files can be found in the `assets` folder of the `libcamera-apps` repository at https://github.com/raspberrypi/libcamera-apps/tree/main/assets[].
-
-The negate stage is particularly trivial and has no configuration parameters of its own, therefore the JSON file merely has to name the stage, with no further information, and it will be run. Thus `negate.json` contains
-
-----
-{
- "negate":
- {
- }
-}
-----
-
-To run multiple post-processing stages, the contents of the example JSON files merely need to be listed together, and the stages will be run in the order given. For example, to run the Sobel stage (which applies a Sobel filter to an image) followed by the negate stage we could create a custom JSON file containing
-
-----
-{
- "sobel_cv":
- {
- "ksize": 5
- },
- "negate":
- {
- }
-}
-----
-
-The Sobel stage is implemented using OpenCV, hence `cv` in its name. Observe how it has a user-configurable parameter, `ksize` that specifies the kernel size of the filter to be used. In this case, the Sobel filter will produce bright edges on a black background, and the negate stage will turn this into dark edges on a white background, as shown.
-
-image::images/sobel_negate.jpg[Image with Sobel and negate]
-
-Some stages actually alter the image in some way, and this is their primary function (such as _negate_). Others are primarily for image analysis, and while they may indicate something on the image, all they really do is generate useful information. For this reason we also have a very flexible form of _metadata_ that can be populated by the post-processing stages, and this will get passed all the way through to the application itself.
-
-Image analysis stages often prefer to work on reduced resolution images. `libcamera-apps` are able to supply applications with a ready-made low resolution image provided directly by the ISP hardware, and this can be helpful in improving performance.
-
-Furthermore, with the post-processing framework being completely open, Raspberry Pi welcomes the contribution of new and interesting stages from the community and would be happy to host them in our `libcamera-apps` repository. The stages that are currently available are documented below.
-
-NOTE: The `libcamera-apps` supplied with the operating system will be built without any optional 3rd party libraries (such as OpenCV or TensorFlow Lite), meaning that certain post-processing stages that rely on them will not be enabled. To use these stages, please follow the instructions for xref:camera_software.adoc#building-libcamera-and-libcamera-apps[building `libcamera-apps` for yourself].
-
-==== `negate` stage
-
-The `negate` stage requires no 3rd party libraries.
-
-On a Raspberry Pi 3 device or a Raspberry Pi 4 running a 32-bit OS, it may execute more quickly if recompiled using `-DENABLE_COMPILE_FLAGS_FOR_TARGET=armv8-neon`. (Please see the xref:camera_software.adoc#building-libcamera-and-libcamera-apps[build instructions].)
-
-The `negate` stage has no user-configurable parameters.
-
-Default `negate.json` file:
-
-----
-{
- "negate":
- {
- }
-}
-----
-
-Example:
-
-image::images/negate.jpg[Image with negate]
-
-==== `hdr` stage
-
-The `hdr` stage implements both HDR (high dynamic range) imaging and DRC (dynamic range compression). The terminology that we use here regards DRC as operating on single images, and HDR works by accumulating multiple under-exposed images and then performing the same algorithm as DRC.
-
-The `hdr` stage has no dependencies on 3rd party libraries, but (like some other stages) may execute more quickly on Raspberry Pi 3 or Raspberry Pi 4 devices running a 32-bit OS if recompiled using `-DENABLE_COMPILE_FLAGS_FOR_TARGET=armv8-neon` (please see the xref:camera_software.adoc#building-libcamera-and-libcamera-apps[build instructions]). Specifically, the image accumulation stage will run quicker and result in fewer frame drops, though the tonemapping part of the process is unchanged.
-
-The basic procedure is that we take the image (which in the case of HDR may be multiple images accumulated together) and apply an edge-preserving smoothing filter to generate a low pass (LP) image. We define the high pass (HP) image to be the difference between the LP image and the original. Next we apply a global tonemap to the LP image and add back the HP image. This procedure, in contrast to applying the tonemap directly to the original image, prevents us from squashing and losing all the local contrast in the resulting image.
-
-It is worth noting that this all happens using fully-processed images, once the ISP has finished with them. HDR normally works better when carried out in the raw (Bayer) domain, as signals are still linear and have greater bit-depth. We expect to implement such functionality once `libcamera` exports an API for "re-processing" Bayer images that do not come from the sensor, but which application code can pass in.
-
-In summary, the user-configurable parameters fall broadly into three groups: those that define the LP filter, those responsible for the global tonemapping, and those responsible for re-applying the local contrast.
-
-[cols=",^"]
-|===
-| num_frames | The number of frames to accumulate. For DRC (in our terminology) this would take the value 1, but for multi-frame HDR we would suggest a value such as 8.
-| lp_filter_strength | The coefficient of the low pass IIR filter.
-| lp_fiter_threshold | A piecewise linear function that relates the pixel level to the threshold that is regarded as being "meaningful detail".
-| global_tonemap_points | A list of points in the input image histogram and targets in the output range where we wish to move them. We define an inter-quantile mean (`q` and `width`), a target as a proportion of the full output range (`target`) and maximum and minimum gains by which we are prepared to move the measured inter-quantile mean (as this prevents us from changing an image too drastically).
-| global_tonemap_strength | Strength of application of the global tonemap.
-| local_pos_strength | A piecewise linear function that defines the gain applied to local contrast when added back to the tonemapped LP image, for positive (bright) detail.
-| local_neg_strength | A piecewise linear function that defines the gain applied to local contrast when added back to the tonemapped LP image, for negative (dark) detail.
-| local_tonemap_strength | An overall gain applied to all local contrast that is added back.
-| local_colour_scale | A factor that allows the output colours to be affected more or less strongly.
-|===
-
-We note that the overall strength of the processing is best controlled by changing the `global_tonemap_strength` and `local_tonemap_strength` parameters.
-
-The full processing takes between 2 and 3 seconds for a 12MP image on a Raspberry Pi 4. The stage runs only on the still image capture, it ignores preview and video images. In particular, when accumulating multiple frames, the stage "swallows" the output images so that the application does not receive them, and finally sends through only the combined and processed image.
-
-Default `drc.json` file for DRC:
-
-----
-{
- "hdr" :
- {
- "num_frames" : 1,
- "lp_filter_strength" : 0.2,
- "lp_filter_threshold" : [ 0, 10.0 , 2048, 205.0, 4095, 205.0 ],
- "global_tonemap_points" :
- [
- { "q": 0.1, "width": 0.05, "target": 0.15, "max_up": 1.5, "max_down": 0.7 },
- { "q": 0.5, "width": 0.05, "target": 0.5, "max_up": 1.5, "max_down": 0.7 },
- { "q": 0.8, "width": 0.05, "target": 0.8, "max_up": 1.5, "max_down": 0.7 }
- ],
- "global_tonemap_strength" : 1.0,
- "local_pos_strength" : [ 0, 6.0, 1024, 2.0, 4095, 2.0 ],
- "local_neg_strength" : [ 0, 4.0, 1024, 1.5, 4095, 1.5 ],
- "local_tonemap_strength" : 1.0,
- "local_colour_scale" : 0.9
- }
-}
-----
-
-Example:
-
-Without DRC:
-
-image::images/nodrc.jpg[Image without DRC processing]
-
-With full-strength DRC: (use `libcamera-still -o test.jpg --post-process-file drc.json`)
-
-image::images/drc.jpg[Image with DRC processing]
-
-Default `hdr.json` file for HDR:
-
-----
-{
- "hdr" :
- {
- "num_frames" : 8,
- "lp_filter_strength" : 0.2,
- "lp_filter_threshold" : [ 0, 10.0 , 2048, 205.0, 4095, 205.0 ],
- "global_tonemap_points" :
- [
- { "q": 0.1, "width": 0.05, "target": 0.15, "max_up": 5.0, "max_down": 0.5 },
- { "q": 0.5, "width": 0.05, "target": 0.45, "max_up": 5.0, "max_down": 0.5 },
- { "q": 0.8, "width": 0.05, "target": 0.7, "max_up": 5.0, "max_down": 0.5 }
- ],
- "global_tonemap_strength" : 1.0,
- "local_pos_strength" : [ 0, 6.0, 1024, 2.0, 4095, 2.0 ],
- "local_neg_strength" : [ 0, 4.0, 1024, 1.5, 4095, 1.5 ],
- "local_tonemap_strength" : 1.0,
- "local_colour_scale" : 0.8
- }
-}
-----
-
-Example:
-
-Without HDR:
-
-image::images/nohdr.jpg[Image without HDR processing]
-
-With HDR: (use `libcamera-still -o test.jpg --ev -2 --denoise cdn_off --post-process-file hdr.json`)
-
-image::images/hdr.jpg[Image with DRC processing]
-
-==== `motion_detect` stage
-
-The `motion_detect` stage works by analysing frames from the low resolution image stream, which must be configured for it to work. It compares a region of interest ("roi") in the frame to the corresponding part of a previous one and if enough pixels are sufficiently different, that will be taken to indicate motion. The result is added to the metadata under "motion_detect.result".
-
-This stage has no dependencies on any 3rd party libraries.
-
-It has the following tunable parameters. The dimensions are always given as a proportion of the low resolution image size.
-
-[cols=",^"]
-|===
-| roi_x | x-offset of the region of interest for the comparison
-| roi_y | y-offset of the region of interest for the comparison
-| roi_width | width of the region of interest for the comparison
-| roi_height | height of the region of interest for the comparison
-| difference_m | Linear coefficient used to construct the threshold for pixels being different
-| difference_c | Constant coefficient used to construct the threshold for pixels being different according to threshold = difference_m * pixel_value + difference_c
-| frame_period | The motion detector will run only this many frames
-| hskip | The pixel tests are subsampled by this amount horizontally
-| vksip | The pixel tests are subsampled by this amount vertically
-| region_threshold | The proportion of pixels (or "regions") which must be categorised as different for them to count as motion
-| verbose | Print messages to the console, including when the "motion"/"no motion" status changes
-|===
-
-Default `motion_detect.json` configuration file:
-
-----
-{
- "motion_detect" :
- {
- "roi_x" : 0.1,
- "roi_y" : 0.1,
- "roi_width" : 0.8,
- "roi_height" : 0.8,
- "difference_m" : 0.1,
- "difference_c" : 10,
- "region_threshold" : 0.005,
- "frame_period" : 5,
- "hskip" : 2,
- "vskip" : 2,
- "verbose" : 0
- }
-}
-----
-
-Note that the field `difference_m` and `difference_c`, and the value of `region_threshold`, can be adjusted to make the algorithm more or less sensitive to motion.
-
-If the amount of computation needs to be reduced (perhaps you have other stages that need a larger low resolution image), the amount of computation can be reduced using the `hskip` and `vskip` parameters.
-
-To use the `motion_detect` stage you might enter the following example command:
-
-`libcamera-hello --lores-width 128 --lores-height 96 --post-process-file motion_detect.json`
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_post_processing_opencv.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_post_processing_opencv.adoc
deleted file mode 100644
index 94ea88f77..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_post_processing_opencv.adoc
+++ /dev/null
@@ -1,107 +0,0 @@
-=== Post-Processing with OpenCV
-
-NOTE: These stages all require OpenCV to be installed on your system. You may also need to rebuild `libcamera-apps` with OpenCV support - please see the instructions for xref:camera_software.adoc#building-libcamera-and-libcamera-apps[building `libcamera-apps` for yourself].
-
-==== `sobel_cv` stage
-
-The `sobel_cv` stage has the following user-configurable parameters:
-
-[cols=",^"]
-|===
-| ksize | Kernel size of the Sobel filter
-|===
-
-
-Default `sobel_cv.json` file:
-
-----
-{
- "sobel_cv":
- {
- "ksize": 5
- }
-}
-----
-
-Example:
-
-image::images/sobel.jpg[Image with Sobel filter]
-
-==== `face_detect_cv` stage
-
-This stage uses the OpenCV Haar classifier to detect faces in an image. It returns the face locations in the metadata (under the key "face_detect.results"), and optionally draws them on the image.
-
-The `face_detect_cv` stage has the following user-configurable parameters:
-
-[cols=",^"]
-|===
-| cascade_name | Name of the file where the Haar cascade can be found.
-| scaling_factor | Determines range of scales at which the image is searched for faces.
-| min_neighbors | Minimum number of overlapping neighbours required to count as a face.
-| min_size | Minimum face size.
-| max_size | Maximum face size.
-| refresh_rate | How many frames to wait before trying to re-run the face detector.
-| draw_features | Whether to draw face locations on the returned image.
-|===
-
-The `face_detect_cv" stage runs only during preview and video capture; it ignores still image capture. It runs on the low resolution stream which would normally be configured to a resolution from about 320x240 to 640x480 pixels.
-
-Default `face_detect_cv.json` file:
-
-----
-{
- "face_detect_cv":
- {
- "cascade_name" : "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml",
- "scaling_factor" : 1.1,
- "min_neighbors" : 2,
- "min_size" : 32,
- "max_size" : 256,
- "refresh_rate" : 1,
- "draw_features" : 1
- }
-}
-----
-
-Example:
-
-image::images/face_detect.jpg[Image showing faces]
-
-==== `annotate_cv` stage
-
-This stage allows text to be written into the top corner of images. It allows the same `%` substitutions as the `--info-text` parameter.
-
-The stage does not output any metadata, but if it finds metadata under the key "annotate.text" it will write this text in place of anything in the JSON configuration file. This allows other post-processing stages to pass it text strings to be written onto the top of the images.
-
-The `annotate_cv` stage has the following user-configurable parameters:
-
-[cols=",^"]
-|===
-| text | The text string to be written.
-| fg | Foreground colour.
-| bg | Background colour.
-| scale | A number proportional to the size of the text.
-| thickness | A number that determines the thickness of the text.
-| alpha | The amount of "alpha" to apply when overwriting the background pixels.
-|===
-
-Default `annotate_cv.json` file:
-
-----
-{
- "annotate_cv" :
- {
- "text" : "Frame %frame exp %exp ag %ag dg %dg",
- "fg" : 255,
- "bg" : 0,
- "scale" : 1.0,
- "thickness" : 2,
- "alpha" : 0.3
- }
-}
-----
-
-Example:
-
-image::images/annotate.jpg[Image with text overlay]
-
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_post_processing_tflite.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_post_processing_tflite.adoc
deleted file mode 100644
index 379762281..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_post_processing_tflite.adoc
+++ /dev/null
@@ -1,186 +0,0 @@
-=== Post-Processing with TensorFlow Lite
-
-NOTE: These stages require TensorFlow Lite (TFLite) libraries to be installed that export the {cpp} API. Unfortunately the TFLite libraries are not normally distributed conveniently in this form, however, one place where they can be downloaded is https://lindevs.com/install-precompiled-tensorflow-lite-on-raspberry-pi/[lindevs.com]. Please follow the installation instructions given on that page. Subsequently you may need to recompile `libcamera-apps` with TensorFlow Lite support - please follow the instructions for xref:camera_software.adoc#building-libcamera-and-libcamera-apps[building `libcamera-apps` for yourself].
-
-==== `object_classify_tf` stage
-
-`object_classify_tf` uses a Google MobileNet v1 model to classify objects in the camera image. It can be obtained from https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz[], which will need to be uncompressed. You will also need the `labels.txt` file which can be found in https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_1.0_224_frozen.tgz[].
-
-This stage has the following configuratble parameters.
-
-[cols=",^"]
-|===
-| top_n_results | How many results to show
-| refresh_rate | The number of frames that must elapse before the model is re-run
-| threshold_high | Confidence threshold (between 0 and 1) where objects are considered as being present
-| threshold_low | Confidence threshold which objects must drop below before being discarded as matches
-| model_file | Pathname to the tflite model file
-| labels_file | Pathname to the file containing the object labels
-| display_labels | Whether to display the object labels on the image. Note that this causes `annotate.text` metadata to be inserted so that the text can be rendered subsequently by the `annotate_cv` stage
-| verbose | Output more information to the console
-|===
-
-Example `object_classify_tf.json` file:
-
-----
-{
- "object_classify_tf":
- {
- "top_n_results" : 2,
- "refresh_rate" : 30,
- "threshold_high" : 0.6,
- "threshold_low" : 0.4,
- "model_file" : "/home/pi/models/mobilenet_v1_1.0_224_quant.tflite",
- "labels_file" : "/home/pi/models/labels.txt",
- "display_labels" : 1
- },
- "annotate_cv" :
- {
- "text" : "",
- "fg" : 255,
- "bg" : 0,
- "scale" : 1.0,
- "thickness" : 2,
- "alpha" : 0.3
- }
-}
-----
-
-The stage operates on a low resolution stream image of size 224x224, so it could be used as follows:
-
-`libcamera-hello --post-process-file object_classify_tf.json --lores-width 224 --lores-height 224`
-
-image::images/classify.jpg[Image showing object classifier results]
-
-==== `pose_estimation_tf` stage
-
-`pose_estimation_tf` uses a Google MobileNet v1 model `posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite` that can be found at https://github.com/Qengineering/TensorFlow_Lite_Pose_RPi_32-bits[].
-
-This stage has the following configurable parameters.
-
-[cols=",^"]
-|===
-| refresh_rate | The number of frames that must elapse before the model is re-run
-| model_file | Pathname to the tflite model file
-| verbose | Output more information to the console
-|===
-
-Also provided is a separate `plot_pose_cv` stage which can be included in the JSON configuration file and which will draw the detected pose onto the main image. This stage has the following configuration parameters.
-
-[cols=",^"]
-|===
-| confidence_threshold | A confidence level determining how much is drawn. This number can be less than zero; please refer to the GitHub repository for more information.
-|===
-
-Example `pose_estimation_tf.json` file:
-
-----
-{
- "pose_estimation_tf":
- {
- "refresh_rate" : 5,
- "model_file" : "posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite"
- },
- "plot_pose_cv" :
- {
- "confidence_threshold" : -0.5
- }
-}
-----
-
-The stage operates on a low resolution stream image of size 257x257 (but which must be rounded up to 258x258 for YUV420 images), so it could be used as follows:
-
-`libcamera-hello --post-process-file pose_estimation_tf.json --lores-width 258 --lores-height 258`
-
-image::images/pose.jpg[Image showing pose estimation results]
-
-==== `object_detect_tf` stage
-
-`object_detect_tf` uses a Google MobileNet v1 SSD (Single Shot Detector) model. The model and labels files can be downloaded from https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip[].
-
-This stage has the following configurable parameters.
-
-[cols=",^"]
-|===
-| refresh_rate | The number of frames that must elapse before the model is re-run
-| model_file | Pathname to the tflite model file
-| labels_file | Pathname to the file containing the list of labels
-| confidence_threshold | Minimum confidence threshold because a match is accepted.
-| overlap_threshold | Determines the amount of overlap between matches for them to be merged as a single match.
-| verbose | Output more information to the console
-|===
-
-Also provided is a separate `object_detect_draw_cv` stage which can be included in the JSON configuration file and which will draw the detected objects onto the main image. This stage has the following configuration parameters.
-
-[cols=",^"]
-|===
-| line_thickness | Thickness of the bounding box lines
-| font_size | Size of the font used for the label
-|===
-
-Example `object_detect_tf.json` file:
-
-----
-{
- "object_detect_tf":
- {
- "number_of_threads" : 2,
- "refresh_rate" : 10,
- "confidence_threshold" : 0.5,
- "overlap_threshold" : 0.5,
- "model_file" : "/home/pi/models/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29/detect.tflite",
- "labels_file" : "/home/pi/models/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29/labelmap.txt",
- "verbose" : 1
- },
- "object_detect_draw_cv":
- {
- "line_thickness" : 2
- }
-}
-----
-
-The stage operates on a low resolution stream image of size 300x300. The following example would pass a 300x300 crop to the detector from the centre of the 400x300 low resolution image.
-
-`libcamera-hello --post-process-file object_detect_tf.json --lores-width 400 --lores-height 300`
-
-image::images/detection.jpg[Image showing detected objects]
-
-==== `segmentation_tf` stage
-
-`segmentation_tf` uses a Google MobileNet v1 model. The model file can be downloaded from https://tfhub.dev/tensorflow/lite-model/deeplabv3/1/metadata/2?lite-format=tflite[], whilst the labels file can be found in the `assets` folder, named `segmentation_labels.txt`.
-
-This stage runs on an image of size 257x257. Because YUV420 images must have even dimensions, the low resolution image should be at least 258 pixels in both width and height. The stage adds a vector of 257x257 values to the image metadata where each value indicates which of the categories (listed in the labels file) that the pixel belongs to. Optionally, a representation of the segmentation can be drawn into the bottom right corner of the image.
-
-This stage has the following configurable parameters.
-
-[cols=",^"]
-|===
-| refresh_rate | The number of frames that must elapse before the model is re-run
-| model_file | Pathname to the tflite model file
-| labels_file | Pathname to the file containing the list of labels
-| threshold | When verbose is set, the stage prints to the console any labels where the number of pixels with that label (in the 257x257 image) exceeds this threshold.
-| draw | Set this value to draw the segmentation map into the bottom right hand corner of the image.
-| verbose | Output more information to the console
-|===
-
-Example `segmentation_tf.json` file:
-
-----
-{
- "segmentation_tf":
- {
- "number_of_threads" : 2,
- "refresh_rate" : 10,
- "model_file" : "/home/pi/models/lite-model_deeplabv3_1_metadata_2.tflite",
- "labels_file" : "/home/pi/models/segmentation_labels.txt",
- "draw" : 1,
- "verbose" : 1
- }
-}
-----
-
-This example takes a square camera image and reduces it to 258x258 pixels in size. In fact the stage also works well when non-square images are squashed unequally down to 258x258 pixels without cropping. The image below shows the segmentation map in the bottom right hand corner.
-
-`libcamera-hello --post-process-file segmentation_tf.json --lores-width 258 --lores-height 258 --viewfinder-width 1024 --viewfinder-height 1024`
-
-image::images/segmentation.jpg[Image showing segmentation in the bottom right corner]
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_post_processing_writing.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_post_processing_writing.adoc
deleted file mode 100644
index cda7ec2cc..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_post_processing_writing.adoc
+++ /dev/null
@@ -1,55 +0,0 @@
-=== Writing your own Post-Processing Stages
-
-The `libcamera-apps` _post-processing framework_ is not only very flexible but is meant to make it easy for users to create their own custom post-processing stages. It is easy to include algorithms and routines that are already available both in OpenCV and TensorFlow Lite.
-
-We are keen to accept and distribute interesting post-processing stages contributed by our users.
-
-==== Basic Post-Processing Stages
-
-Post-processing stages have a simple API, and users can create their own by deriving from the `PostProcessingStage` class. The member functions that must be implemented are listed below, though note that some may be unnecessary for simple stages.
-
-[cols=",^"]
-|===
-| `char const *Name() const` | Return the name of the stage. This is used to match against stages listed in the JSON post-processing configuration file.
-| `void Read(boost::property_tree::ptree const ¶ms)` | This method will read any of the stage's configuration parameters from the JSON file.
-| `void AdjustConfig(std::string const &use_case, StreamConfiguration *config)` | This method gives stages a chance to influence the configuration of the camera, though it is not often necessary to implement it.
-| `void Configure()` | This is called just after the camera has been configured. It is a good moment to check that the stage has access to the streams it needs, and it can also allocate any resources that it may require.
-| `void Start()` | Called when the camera starts. This method is often not required.
-| `bool Process(CompletedRequest &completed_request)` | This method presents completed camera requests for post-processing and is where the necessary pixel manipulations or image analysis will happen. The function returns `true` if the post-processing framework is _not_ to deliver this request on to the application.
-| `void Stop()` | Called when the camera is stopped. Normally a stage would need to shut down any processing that might be running (for example, if it started any asynchronous threads).
-| `void Teardown()` | Called when the camera configuration is torn down. This would typically be used to de-allocate any resources that were set up in the `Configure` method.
-|===
-
-Some helpful hints on writing your own stages:
-
-* Generally, the `Process` method should not take too long as it will block the imaging pipeline and may cause stuttering. When time-consuming algorithms need to be run, it may be helpful to delegate them to another asynchronous thread.
-
-* When delegating work to another thread, the way image buffers are handled currently means that they will need to be copied. For some applications, such as image analysis, it may be viable to use the "low resolution" image stream rather than full resolution images.
-
-* The post-processing framework adds multi-threading parallelism on a per-frame basis. This is helpful in improving throughput if you want to run on every single frame. Some functions may supply parallelism within each frame (such as OpenCV and TFLite). In these cases it would probably be better to serialise the calls so as to suppress the per-frame parallelism.
-
-* Most streams, and in particular the low resolution stream, have YUV420 format. These formats are sometimes not ideal for OpenCV or TFLite so there may sometimes need to be a conversion step.
-
-* When images need to be altered, doing so in place is much the easiest strategy.
-
-* Implementations of any stage should always include a `RegisterStage` call. This registers your new stage with the system so that it will be correctly identified when listed in a JSON file. You will need to add it to the post-processing folder's `CMakeLists.txt` too, of course.
-
-The easiest example to start with is `negate_stage.cpp`, which "negates" an image (turning black white, and vice versa). Aside from a small amount of derived class boiler-plate, it contains barely half a dozen lines of code.
-
-Next up in complexity is `sobel_cv_stage.cpp`. This implements a Sobel filter using just a few lines of OpenCV functions.
-
-==== TFLite Stages
-
-For stages wanting to analyse images using TensorFlowLite we provide the `TfStage` base class. This provides a certain amount of boilerplate code and makes it much easier to implement new TFLite-based stages by deriving from this class. In particular, it delegates the execution of the model to another thread, so that the full camera framerate is still maintained - it is just the model that will run at a lower framerate.
-
-The `TfStage` class implements all the public `PostProcessingStage` methods that normally have to be redefined, with the exception of the `Name` method which must still be supplied. It then presents the following virtual methods which derived classes should implement instead.
-
-[cols=",^"]
-|===
-| `void readExtras()` | The base class reads the named model and certain other parameters like the `refresh_rate`. This method can be supplied to read any extra parameters for the derived stage. It is also a good place to check that the loaded model looks as expected (i.e. has right input and output dimensions).
-| `void checkConfiguration()` | The base class fetches the low resolution stream which TFLite will operate on, and the full resolution stream in case the derived stage needs it. This method is provided for the derived class to check that the streams it requires are present. In case any required stream is missing, it may elect simply to avoid processing any images, or it may signal a fatal error.
-| `void interpretOutputs()` | The TFLite model runs asynchronously so that it can run "every few frames" without holding up the overall framerate. This method gives the derived stage the chance to read and interpret the model's outputs, running right after the model itself and in that same thread.
-| `void applyResults()` | Here we are running once again in the main thread and so this method should run reasonably quickly so as not to hold up the supply of frames to the application. It is provided so that the last results of the model (which might be a few frames ago) can be applied to the current frame. Typically this would involve attaching metadata to the image, or perhaps drawing something onto the main image.
-|===
-
-For further information, readers are referred to the supplied example code implementing the `ObjectClassifyTfStage` and `PoseEstimationTfStage` classes.
diff --git a/documentation/asciidoc/computers/camera/libcamera_apps_writing.adoc b/documentation/asciidoc/computers/camera/libcamera_apps_writing.adoc
deleted file mode 100644
index c923481e3..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_apps_writing.adoc
+++ /dev/null
@@ -1,62 +0,0 @@
-=== Understanding and Writing your own Apps
-
-`libcamera-apps` are not supposed to be a full set of all the applications with all the features that anyone could ever need. Instead, they are supposed to be easy to understand, such that users who require slightly different behaviour can implement it for themselves.
-
-All the applications work by having a simple event loop which receives a message with a new set of frames from the camera system. This set of frames is called a `CompletedRequest`. It contains all the images that have been derived from that single camera frame (so perhaps a low resolution image in addition to the full size output), as well as metadata from the camera system and further metadata from the post-processing system.
-
-==== `libcamera-hello`
-
-`libcamera-hello` is much the easiest application to understand. The only thing it does with the camera images is extract the `CompletedRequestPtr` (a shared pointer to the `CompletedRequest`) from the message:
-
-----
- CompletedRequestPtr &completed_request = std::get(msg.payload);
-----
-
-and forward it to the preview window:
-
-----
- app.ShowPreview(completed_request, app.ViewfinderStream());
-----
-
-One important thing to note is that every `CompletedRequest` must be recycled back to the camera system so that the buffers can be reused, otherwise it will simply run out of buffers in which to receive new camera frames. This recycling process happens automatically when all references to the `CompletedRequest` are dropped, using {cpp}'s _shared pointer_ and _custom deleter_ mechanisms.
-
-In `libcamera-hello` therefore, two things must happen for the `CompletedRequest` to be returned to the camera.
-
-1. The event loop must go round again so that the message (`msg` in the code), which is holding a reference to the shared pointer, is dropped.
-
-2. The preview thread, which takes another reference to the `CompletedRequest` when `ShowPreview` is called, must be called again with a new `CompletedRequest`, causing the previous one to be dropped.
-
-==== `libcamera-vid`
-
-`libcamera-vid` is not unlike `libcamera-hello`, but it adds a codec to the event loop and the preview. Before the event loop starts, we must configure that encoder with a callback which says what happens to the buffer containing the encoded image data.
-
-----
- app.SetEncodeOutputReadyCallback(std::bind(&Output::OutputReady, output.get(), _1, _2, _3, _4));
-----
-
-Here we send the buffer to the `Output` object which may write it to a file, or send it over the network, according to our choice when we started the application.
-
-The encoder also takes a new reference to the `CompletedRequest`, so once the event loop, the preview window and the encoder all drop their references, the `CompletedRequest` will be recycled automatically back to the camera system.
-
-==== `libcamera-raw`
-
-`libcamera-raw` is not so very different from `libcamera-vid`. It too uses an encoder, although this time it is a "dummy" encoder called the `NullEncoder`. This just treats the input image directly as the output buffer and is careful not to drop its reference to the input until the output callback has dealt with it first.
-
-This time, however, we do not forward anything to the preview window, though we could have displayed the (processed) video stream if we had wanted.
-
-The use of the `NullEncoder` is possibly overkill in this application, as we could probably just send the image straight to the `Output` object. However, it serves to underline the general principle that it is normally a bad idea to do too much work directly in the event loop, and time-consuming processes are often better left to other threads.
-
-==== `libcamera-jpeg`
-
-We discuss `libcamera-jpeg` rather than `libcamera-still` as the basic idea (that of switching the camera from preview into capture mode) is the same, and `libcamera-jpeg` has far fewer additional options (such as timelapse capture) that serve to distract from the basic function.
-
-`libcamera-jpeg` starts the camera in preview mode in the usual way, but at the appropriate moment stops it and switches to still capture:
-
-----
- app.StopCamera();
- app.Teardown();
- app.ConfigureStill();
- app.StartCamera();
-----
-
-Then the event loop will grab the first frame that emerges once it's no longer in preview mode, and saves this as a JPEG.
diff --git a/documentation/asciidoc/computers/camera/libcamera_detect.adoc b/documentation/asciidoc/computers/camera/libcamera_detect.adoc
deleted file mode 100644
index e6168a89f..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_detect.adoc
+++ /dev/null
@@ -1,22 +0,0 @@
-=== `libcamera-detect`
-
-`libcamera-detect` is not supplied by default in any Raspberry Pi OS distribution, but can be built by users who have xref:camera_software.adoc#post-processing-with-tensorflow-lite[installed TensorFlow Lite]. In this case, please refer to the xref:camera_software.adoc#building-libcamera-and-libcamera-apps[`libcamera-apps` build instructions]. You will need to run `cmake` with `-DENABLE_TFLITE=1`.
-
-This application runs a preview window and monitors the contents using a Google MobileNet v1 SSD (Single Shot Detector) neural network that has been trained to identify about 80 classes of objects using the Coco dataset. It should recognise people, cars, cats and many other objects.
-
-Its starts by running a preview window, and whenever the target object is detected it will perform a full resolution JPEG capture, before returning back to the preview mode to continue monitoring. It provides a couple of additional command line options that do not apply elsewhere:
-
-`--object `
-
-Detect objects with the given ``. The name should be taken from the model's label file.
-
-`--gap `
-
-Wait at least this many frames after a capture before performing another. This is necessary because the neural network does not run on every frame, so it is best to give it a few frames to run again before considering another capture.
-
-Please refer to the xref:camera_software.adoc#object_detect_tf-stage[TensorFlow Lite object detector] section for more general information on how to obtain and use this model. But as an example, you might spy secretly on your cats while you are away with:
-
-[,bash]
-----
-libcamera-detect -t 0 -o cat%04d.jpg --lores-width 400 --lores-height 300 --post-process-file object_detect_tf.json --object cat
-----
diff --git a/documentation/asciidoc/computers/camera/libcamera_differences.adoc b/documentation/asciidoc/computers/camera/libcamera_differences.adoc
index 4fae4e4f0..1205db97e 100644
--- a/documentation/asciidoc/computers/camera/libcamera_differences.adoc
+++ b/documentation/asciidoc/computers/camera/libcamera_differences.adoc
@@ -1,42 +1,42 @@
-=== Differences compared to _Raspicam_ Apps
+=== Differences between `rpicam` and `raspicam`
-Whilst the `libcamera-apps` attempt to emulate most features of the legacy _Raspicam_ applications, there are some differences. Here we list the principal ones that users are likely to notice.
+The `rpicam-apps` emulate most features of the legacy `raspicam` applications. However, users may notice the following differences:
-* The use of Boost `program_options` doesn't allow multi-character short versions of options, so where these were present they have had to be dropped. The long form options are named the same, and any single character short forms are preserved.
+* Boost `program_options` don't allow multi-character short versions of options, so where these were present they have had to be dropped. The long form options are named the same way, and any single-character short forms are preserved.
-* `libcamera-still` and `libcamera-jpeg` do not show the capture image in the preview window.
+* `rpicam-still` and `rpicam-jpeg` do not show the captured image in the preview window.
-* `libcamera` performs its own camera mode selection, so the `--mode` option is not supported. It deduces camera modes from the resolutions requested. There is still work ongoing in this area.
+* `rpicam-apps` removed the following `raspicam` features:
++
+** opacity (`--opacity`)
+** image effects (`--imxfx`)
+** colour effects (`--colfx`)
+** annotation (`--annotate`, `--annotateex`)
+** dynamic range compression, or DRC (`--drc`)
+** stereo (`--stereo`, `--decimate` and `--3dswap`)
+** image stabilisation (`--vstab`)
+** demo modes (`--demo`)
++
+xref:camera_software.adoc#post-processing-with-rpicam-apps[Post-processing] replaced many of these features.
-* The following features of the legacy apps are not supported as the code has to run on the ARM now. But note that a number of these effects are now provided by the xref:camera_software.adoc#post-processing[post-processing] mechanism.
- - opacity (`--opacity`)
- - image effects (`--imxfx`)
- - colour effects (`--colfx`)
- - annotation (`--annotate`, `--annotateex`)
- - dynamic range compression, or DRC (`--drc`)
+* `rpicam-apps` removed xref:camera_software.adoc#rotation[`rotation`] option support for 90° and 270° rotations.
-* stereo (`--stereo`, `--decimate` and `--3dswap`). There is no support in `libcamera` for stereo currently.
+* `raspicam` conflated metering and exposure; `rpicam-apps` separates these options.
+* To disable Auto White Balance (AWB) in `rpicam-apps`, set a pair of colour gains with xref:camera_software.adoc#awbgains[`awbgains`] (e.g. `1.0,1.0`).
-* There is no image stabilisation (`--vstab`) (though the legacy implementation does not appear to do very much).
+* `rpicam-apps` cannot set Auto White Balance (AWB) into greyworld mode for NoIR camera modules. Instead, pass the xref:camera_software.adoc#tuning-file[`tuning-file`] option a NoIR-specific tuning file like `imx219_noir.json`.
-* There are no demo modes (`--demo`).
+* `rpicam-apps` does not provide explicit control of digital gain. Instead, the xref:camera_software.adoc#gain[`gain`] option sets it implicitly.
-* The transformations supported are those that do not involve a transposition. 180 degree rotations, therefore, are among those permitted but 90 and 270 degree rotations are not.
+* `rpicam-apps` removed the `--ISO` option. Instead, calculate the gain corresponding to the ISO value required. Vendors can provide mappings of gain to ISO.
-* There are some differences in the metering, exposure and AWB options. In particular the legacy apps conflate metering (by which we mean the "metering mode") and the exposure (by which we now mean the "exposure profile"). With regards to AWB, to turn it off you have to set a pair of colour gains (e.g. `--awbgains 1.0,1.0`).
+* `rpicam-apps` does not support setting a flicker period.
-* `libcamera` has no mechanism to set the AWB into "grey world" mode, which is useful for "NOIR" camera modules. However, tuning files are supplied which switch the AWB into the correct mode, so for example, you could use `libcamera-hello --tuning-file /usr/share/libcamera/ipa/raspberrypi/imx219_noir.json`.
-
-* There is support for setting the exposure time (`--shutter`) and analogue gain (`--analoggain` or just `--gain`). There is no explicit control of the digital gain; you get this if the gain requested is larger than the analogue gain can deliver by itself.
-
-* libcamera has no understanding of ISO, so there is no `--ISO` option. Users should calculate the gain corresponding to the ISO value required (usually a manufacturer will tell you that, for example, a gain of 1 corresponds to an ISO of 40), and use the `--gain` parameter instead.
-
-* There is no support for setting the flicker period yet.
-
-* `libcamera-still` does not support burst capture. In fact, because the JPEG encoding is not multi-threaded and pipelined it would produce quite poor framerates. Instead, users are advised to consider using `libcamera-vid` in MJPEG mode instead (and `--segment 1` can be used to force each frame into a separate JPEG file).
-
-* `libcamera` uses open source drivers for all the image sensors, so the mechanism for enabling or disabling on-sensor DPC (Defective Pixel Correction) is different. The imx477 (HQ cam) driver enables on-sensor DPC by default; to disable it the user should, as root, enter
+* `rpicam-still` does not support burst capture. Instead, consider using `rpicam-vid` in MJPEG mode with `--segment 1` to force each frame into a separate file.
+* `rpicam-apps` uses open source drivers for all image sensors, so the mechanism for enabling or disabling on-sensor Defective Pixel Correction (DPC) is different. The imx477 driver on the Raspberry Pi HQ Camera enables on-sensor DPC by default. To disable on-sensor DPC on the HQ Camera, run the following command:
++
+[source,console]
----
-echo 0 > /sys/module/imx477/parameters/dpc_enable
+$ sudo echo 0 > /sys/module/imx477/parameters/dpc_enable
----
diff --git a/documentation/asciidoc/computers/camera/libcamera_hello.adoc b/documentation/asciidoc/computers/camera/libcamera_hello.adoc
deleted file mode 100644
index 6ca12dc6a..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_hello.adoc
+++ /dev/null
@@ -1,70 +0,0 @@
-=== `libcamera-hello`
-
-`libcamera-hello` is the equivalent of a "hello world" application for the camera. It starts the camera, displays a preview window, and does nothing else. For example
-
-[,bash]
-----
-libcamera-hello
-----
-should display a preview window for about 5 seconds. The `-t ` option lets the user select how long the window is displayed, where `` is given in milliseconds. To run the preview indefinitely, use:
-
-[,bash]
-----
-libcamera-hello -t 0
-----
-
-The preview can be halted either by clicking the window's close button, or using `Ctrl-C` in the terminal.
-
-==== Options
-
-`libcamera-apps` uses a 3rd party library to interpret command line options. This includes _long form_ options where the option name consists of more than one character preceded by `--`, and _short form_ options which can only be a single character preceded by a single `-`. For the most part option names are chosen to match those used by the legacy `raspicam` applications with the exception that we can no longer handle multi-character option names with a single `-`. Any such legacy options have been dropped and the long form with `--` must be used instead.
-
-The options are classified broadly into 3 groups, namely those that are common, those that are specific to still images, and those that are for video encoding. They are supported in an identical manner across all the applications where they apply.
-
-Please refer to the xref:camera_software.adoc#common-command-line-options[command line options documentation] for a complete list.
-
-==== The Tuning File
-
-Raspberry Pi's `libcamera` implementation includes a _tuning file_ for each different type of camera module. This is a file that describes or "tunes" the parameters that will be passed to the algorithms and hardware to produce the best image quality. `libcamera` is only able to determine automatically the image sensor being used, not the module as a whole - even though the whole module affects the "tuning".
-
-For this reason it is sometimes necessary to override the default tuning file for a particular sensor.
-
-For example, the NOIR (no IR-filter) versions of sensors require different AWB settings to the standard versions, so the IMX219 NOIR should be run using
-
-[,bash]
-----
-libcamera-hello --tuning-file /usr/share/libcamera/ipa/raspberrypi/imx219_noir.json
-----
-
-If you are using a Soho Enterprises SE327M12 module you should use
-
-[,bash]
-----
-libcamera-hello --tuning-file /usr/share/libcamera/ipa/raspberrypi/se327m12.json
-----
-
-Notice how this also means that users can copy an existing tuning file and alter it according to their own preferences, so long as the `--tuning-file` parameter is pointed to the new version.
-
-Finally, the `--tuning-file` parameter, in common with other `libcamera-hello` command line options, applies identically across all the `libcamera-apps`.
-
-==== Preview Window
-
-Most of the `libcamera-apps` display a preview image in a window. When X Windows is not running it will draw directly to the display using Linux DRM (Direct Rendering Manager), otherwise it will attempt to use X Windows. Both paths use zero-copy buffer sharing with the GPU, and a consequence of this is that X forwarding is _not_ supported.
-
-For this reason there is a third kind of preview window which does support X forwarding, and can be requested with the `--qt-preview` option. This implementation does not benefit from zero-copy buffer sharing nor from 3D acceleration which makes it computationally expensive (especially for large previews), and so is not normally recommended.
-
-NOTE: Older systems using Gtk2 may, when linked with OpenCV, produce `Glib-GObject` errors and fail to show the Qt preview window. In this case please (as root) edit the file `/etc/xdg/qt5ct/qt5ct.conf` and replace the line containing `style=gtk2` with `style=gtk3`.
-
-The preview window can be suppressed entirely with the `-n` (`--nopreview`) option.
-
-The `--info-text` option allows the user to request that certain helpful image information is displayed on the window title bar using "% directives". For example
-
-[,bash]
-----
-libcamera-hello --info-text "red gain %rg, blue gain %bg"
-----
-will display the current red and blue gain values.
-
-For the HQ camera, use `--info-text "%focus"` to display the focus measure, which will be helpful for focusing the lens.
-
-A full description of the `--info-text` parameter is given in the xref:camera_software.adoc#common-command-line-options[command line options documentation].
diff --git a/documentation/asciidoc/computers/camera/libcamera_jpeg.adoc b/documentation/asciidoc/computers/camera/libcamera_jpeg.adoc
deleted file mode 100644
index 145fb24fe..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_jpeg.adoc
+++ /dev/null
@@ -1,48 +0,0 @@
-=== `libcamera-jpeg`
-
-`libcamera-jpeg` is a simple still image capture application. It deliberately avoids some of the additional features of `libcamera-still` which attempts to emulate `raspistill` more fully. As such the code is significantly easier to understand, and in practice still provides many of the same features.
-
-To capture a full resolution JPEG image use
-
-[,bash]
-----
-libcamera-jpeg -o test.jpg
-----
-which will display a preview for about 5 seconds, and then capture a full resolution JPEG image to the file `test.jpg`.
-
-The `-t ` option can be used to alter the length of time the preview shows, and the `--width` and `--height` options will change the resolution of the captured still image. For example
-
-[,bash]
-----
-libcamera-jpeg -o test.jpg -t 2000 --width 640 --height 480
-----
-will capture a VGA sized image.
-
-==== Exposure Control
-
-All the `libcamera-apps` allow the user to run the camera with fixed shutter speed and gain. For example
-
-[,bash]
-----
-libcamera-jpeg -o test.jpg -t 2000 --shutter 20000 --gain 1.5
-----
-would capture an image with an exposure of 20ms and a gain of 1.5x. Note that the gain will be applied as _analogue gain_ within the sensor up until it reaches the maximum analogue gain permitted by the kernel sensor driver, after which the remainder will be applied as digital gain.
-
-Raspberry Pi's AEC/AGC algorithm allows applications to specify _exposure compensation_, that is, the ability to make images darker or brighter by a given number of _stops_, as follows
-
-[,bash]
-----
-libcamera-jpeg --ev -0.5 -o darker.jpg
-libcamera-jpeg --ev 0 -o normal.jpg
-libcamera-jpeg --ev 0.5 -o brighter.jpg
-----
-
-===== Further remarks on Digital Gain
-
-Digital gain is applied by the ISP (the Image Signal Processor), not by the sensor. The digital gain will always be very close to 1.0 unless:
-
-* The total gain requested (either by the `--gain` option, or by the exposure profile in the camera tuning) exceeds that which can be applied as analogue gain within the sensor. Only the extra gain required will be applied as digital gain.
-
-* One of the colour gains is less than 1 (note that colour gains are applied as digital gain too). In this case the advertised digital gain will settle to 1 / min(red_gain, blue_gain). This actually means that one of the colour channels - just not the green one - is having unity digital gain applied to it.
-
-* The AEC/AGC is changing. When the AEC/AGC is moving the digital gain will typically vary to some extent to try and smooth out any fluctuations, but it will quickly settle back to its "normal" value.
diff --git a/documentation/asciidoc/computers/camera/libcamera_known_issues.adoc b/documentation/asciidoc/computers/camera/libcamera_known_issues.adoc
deleted file mode 100644
index 2ec027cd2..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_known_issues.adoc
+++ /dev/null
@@ -1,7 +0,0 @@
-=== Known Issues
-
-We are aware of the following issues in `libcamera` and `libcamera-apps`.
-
-* On Raspberry Pi 3 (and earlier devices) the graphics hardware can only support images up to 2048x2048 pixels which places a limit on the camera images that can be resized into the preview window. In practice this means that video encoding of images larger than 2048 pixels across (which would necessarily be using a codec other than h.264) will not support, or will produce corrupted, preview images. For Raspberry Pi 4 the limit is 4096 pixels. We would recommend using the `-n` (no preview) option for the time being.
-
-* The preview window shows some display tearing when using X windows. This is not likely to be fixable.
diff --git a/documentation/asciidoc/computers/camera/libcamera_options_common.adoc b/documentation/asciidoc/computers/camera/libcamera_options_common.adoc
deleted file mode 100644
index 165825fde..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_options_common.adoc
+++ /dev/null
@@ -1,555 +0,0 @@
-=== Common Command Line Options
-
-The following options apply across all the `libcamera-apps` with similar or identical semantics, unless noted otherwise.
-
-----
- --help, -h Print help information for the application
-----
-
-The `--help` option causes every application to print its full set of command line options with a brief synopsis of each, and then quit.
-
-----
- --version Print out a software version number
-----
-
-All `libcamera-apps` will, when they see the `--version` option, print out a version string both for `libcamera` and `libcamera-apps` and then quit, for example:
-
-----
-libcamera-apps build: ca559f46a97a 27-09-2021 (14:10:24)
-libcamera build: v0.0.0+3058-c29143f7
-----
-
-----
- --list-cameras List the cameras available for use
-----
-
-The `--list-cameras` will display the available cameras attached to the board that can be used by the application. This option also lists the sensor modes supported by each camera. For example:
-
-----
-Available cameras
------------------
-0 : imx219 [3280x2464] (/base/soc/i2c0mux/i2c@1/imx219@10)
- Modes: 'SRGGB10_CSI2P' : 640x480 [206.65 fps - (1000, 752)/1280x960 crop]
- 1640x1232 [41.85 fps - (0, 0)/3280x2464 crop]
- 1920x1080 [47.57 fps - (680, 692)/1920x1080 crop]
- 3280x2464 [21.19 fps - (0, 0)/3280x2464 crop]
- 'SRGGB8' : 640x480 [206.65 fps - (1000, 752)/1280x960 crop]
- 1640x1232 [41.85 fps - (0, 0)/3280x2464 crop]
- 1920x1080 [47.57 fps - (680, 692)/1920x1080 crop]
- 3280x2464 [21.19 fps - (0, 0)/3280x2464 crop]
-1 : imx477 [4056x3040] (/base/soc/i2c0mux/i2c@1/imx477@1a)
- Modes: 'SRGGB10_CSI2P' : 1332x990 [120.05 fps - (696, 528)/2664x1980 crop]
- 'SRGGB12_CSI2P' : 2028x1080 [50.03 fps - (0, 440)/4056x2160 crop]
- 2028x1520 [40.01 fps - (0, 0)/4056x3040 crop]
- 4056x3040 [10.00 fps - (0, 0)/4056x3040 crop]
-----
-
-In the above example, the IMX219 sensor is available at index 0 and IMX477 at index 1. The sensor mode identifier takes the following form:
-----
-S_ :
-----
-For the IMX219 in the above example, all modes have a `RGGB` Bayer ordering and provide either 8-bit or 10-bit CSI2 packed readout at the listed resolutions. The crop is specified as (, )/x, where (x, y) is the location of the crop window of size Width x Height in the sensor array. The units remain native sensor pixels, even if the sensor is being used in a binning or skipping mode.
-
-----
- --camera Selects which camera to use
-----
-
-The `--camera` option will select which camera to use from the supplied value. The value can be obtained from the `--list-cameras` option.
-
-----
- --config, -c Read options from the given file
-----
-
-Normally options are read from the command line, but in case multiple options are required it may be more convenient to keep them in a file.
-
-Example: `libcamera-hello -c config.txt`
-
-This is a text file containing individual lines of `key=value` pairs, for example:
-
-----
-timeout=99000
-verbose=
-----
-
-Note how the `=` is required even for implicit options, and that the `--` used on the command line are omitted. Only long form options are permitted (`t=99000` would not be accepted).
-
-----
- --timeout, -t Delay before application stops automatically
-----
-
-The `--timeout` option specifies how long the application runs before it stops, whether it is recording a video or showing a preview. In the case of still image capture, the application will show the preview window for this long before capturing the output image.
-
-If unspecified, the default value is 5000 (5 seconds). The value zero causes the application to run indefinitely.
-
-Example: `libcamera-hello -t 0`
-
-==== Preview window
-
-----
- --preview, -p Preview window settings
-----
-
-Sets the size and location of the preview window (both X Windows and DRM versions). It does not affect the resolution or aspect ratio of images being requested from the camera. The camera images will be scaled to the size of the preview window for display, and will be pillar/letter-boxed to fit.
-
-Example: `libcamera-hello -p 100,100,500,500`
-
-image::images/preview_window.jpg[Letterboxed preview image]
-
-----
- --fullscreen, -f Fullscreen preview mode
-----
-
-Forces the preview window to use the whole screen, and the window will have no border or title bar. Again the image may be pillar/letter-boxed.
-
-Example `libcamera-still -f -o test.jpg`
-
-----
- --qt-preview Use Qt-based preview window
-----
-
-The preview window is switched to use the Qt-based implementation. This option is not normally recommended because it no longer uses zero-copy buffer sharing nor GPU acceleration and is therefore very expensive, however, it does support X forwarding (which the other preview implementations do not).
-
-The Qt preview window does not support the `--fullscreen` option. Generally it is advised to try and keep the preview window small.
-
-Example `libcamera-hello --qt-preview`
-
-----
- --nopreview, -n Do not display a preview window
-----
-
-The preview window is suppressed entirely.
-
-Example `libcamera-still -n -o test.jpg`
-
-----
- --info-text Set window title bar text
-----
-
-The supplied string is set as the title of the preview window (when running under X Windows). Additionally the string may contain a number of `%` directives which are substituted with information from the image metadata. The permitted directives are
-
-|===
-| Directive | Substitution
-
-| %frame
-| The sequence number of the frame
-
-| %fps
-| The instantaneous frame rate
-
-| %exp
-| The shutter speed used to capture the image, in microseconds
-
-| %ag
-| The analogue gain applied to the image in the sensor
-
-| %dg
-| The digital gain applied to the image by the ISP
-
-| %rg
-| The gain applied to the red component of each pixel
-
-| %bg
-| The gain applied to the blue component of each pixel
-
-| %focus
-| The focus metric for the image, where a larger value implies a sharper image
-
-| %lp
-| The current lens position in dioptres (1 / distance in metres).
-
-| %afstate
-| The autofocus algorithm state (one of `idle`, `scanning`, `focused` or `failed`).
-|===
-
-When not provided, the `--info-text` string defaults to `"#%frame (%fps fps) exp %exp ag %ag dg %dg"`.
-
-Example: `libcamera-hello --info-test "Focus measure: %focus`
-
-image::images/focus.jpg[Image showing focus measure]
-
-==== Camera Resolution and Readout
-
-----
- --width Capture image width
- --height Capture image height
-----
-
-These numbers specify the output resolution of the camera images captured by `libcamera-still`, `libcamera-jpeg` and `libcamera-vid`.
-
-For `libcamera-raw`, it affects the size of the raw frames captured. Where a camera has a 2x2 binned readout mode, specifying a resolution not larger than this binned mode will result in the capture of 2x2 binned raw frames.
-
-For `libcamera-hello` these parameters have no effect.
-
-Examples:
-
-`libcamera-vid -o test.h264 --width 1920 --height 1080` will capture 1080p video.
-
-`libcamera-still -r -o test.jpg --width 2028 --height 1520` will capture a 2028x1520 resolution JPEG. When using the HQ camera the sensor will be driven in its 2x2 binned mode so the raw file - captured in `test.dng` - will contain a 2028x1520 raw Bayer image.
-
-----
- --viewfinder-width Capture image width
- --viewfinder-height Capture image height
-----
-
-These options affect only the preview (meaning both `libcamera-hello` and the preview phase of `libcamera-jpeg` and `libcamera-still`), and specify the image size that will be requested from the camera for the preview window. They have no effect on captured still images or videos. Nor do they affect the preview window as the images are resized to fit.
-
-Example: `libcamera-hello --viewfinder-width 640 --viewfinder-height 480`
-
-----
- --rawfull Force sensor to capture in full resolution mode
-----
-
-This option forces the sensor to be driven in its full resolution readout mode for still and video capture, irrespective of the requested output resolution (given by `--width` and `--height`). It has no effect for `libcamera-hello`.
-
-Using this option often incurs a frame rate penalty, as larger resolution frames are slower to read out.
-
-Example: `libcamera-raw -t 2000 --segment 1 --rawfull -o test%03d.raw` will cause multiple full resolution raw frames to be captured. On the HQ camera each frame will be about 18MB in size. Without the `--rawfull` option the default video output resolution would have caused the 2x2 binned mode to be selected, resulting in 4.5MB raw frames.
-
-----
- --mode Specify sensor mode, given as :::
-----
-
-This option is more general than `--rawfull` and allows the precise selection of one of the camera modes. The mode should be specified by giving its width, height, bit-depth and packing, separated by colons. These numbers do not have to be exact as the system will select the closest it can find. Moreover, the bit-depth and packing are optional (defaulting to 12 and `P` for "packed" respectively). For example:
-
-* `4056:3040:12:P` - 4056x3040 resolution, 12 bits per pixel, packed. This means that raw image buffers will be packed so that 2 pixel values occupy only 3 bytes.
-* `1632:1224:10` - 1632x1224 resolution, 10 bits per pixel. It will default to "packed". A 10-bit packed mode would store 4 pixels in every 5 bytes.
-* `2592:1944:10:U` - 2592x1944 resolution, 10 bits per pixel, unpacked. An unpacked format will store every pixel in 2 bytes, in this case with the top 6 bits of each value being zero.
-* `3264:2448` - 3264x2448 resolution. It will try to select the default 12-bit mode but in the case of the v2 camera there isn't one, so a 10-bit mode would be chosen instead.
-
-The `--mode` option affects the mode choice for video recording and stills capture. To control the mode choice during the preview phase prior to stills capture, please use the `--viewfinder-mode` option.
-
-----
- --viewfinder-mode Specify sensor mode, given as :::
-----
-
-This option is identical to the `--mode` option except that it applies only during the preview phase of stills capture (also used by the `libcamera-hello` application).
-
-----
- --lores-width Low resolution image width
- --lores-height Low resolution image height
-----
-
-`libcamera` allows the possibility of delivering a second lower resolution image stream from the camera system to the application. This stream is available in both the preview and the video modes (i.e. `libcamera-hello` and the preview phase of `libcamera-still`, and `libcamera-vid`), and can be used, among other things, for image analysis. For stills captures, the low resolution image stream is not available.
-
-The low resolution stream has the same field of view as the other image streams. If a different aspect ratio is specified for the low resolution stream, then those images will be squashed so that the pixels are no longer square.
-
-During video recording (`libcamera-vid`), specifying a low resolution stream will disable some extra colour denoise processing that would normally occur.
-
-Example: `libcamera-hello --lores-width 224 --lores-height 224`
-
-Note that the low resolution stream is not particularly useful unless used in conjunction with xref:camera_software.adoc#post-processing[image post-processing].
-
-----
- --hflip Read out with horizontal mirror
- --vflip Read out with vertical flip
- --rotation Use hflip and vflip to create the given rotation
-----
-
-These options affect the order of read-out from the sensor, and can be used to mirror the image horizontally, and/or flip it vertically. The `--rotation` option permits only the value 0 or 180, so note that 90 or 270 degree rotations are not supported. Moreover, `--rotation 180` is identical to `--hflip --vflip`.
-
-Example: `libcamera-hello --vflip --hflip`
-
-----
- --roi Select a crop (region of interest) from the camera
-----
-
-The `--roi` (region of interest) option allows the user to select a particular crop from the full field of view provided by the sensor. The coordinates are specified as a proportion of the available field of view, so that `--roi 0,0,1,1` would have no effect at all.
-
-The `--roi` parameter implements what is commonly referred to as "digital zoom".
-
-Example `libcamera-hello --roi 0.25,0.25,0.5,0.5` will select exactly a quarter of the total number of pixels cropped from the centre of the image.
-
-----
- --hdr Run the camera in HDR mode (supported cameras only)
-----
-
-The `--hdr` option causes the camera to be run in HDR (High Dynamic Range) mode. This option only works for certain supported cameras, including the _Raspberry Pi Camera Module 3_.
-
-Example: `libcamera-still --hdr -o hdr.jpg` for capturing a still image, or `libcamera-vid --hdr -o hdr.h264` to capture a video.
-
-Use of the HDR option may generally cause different camera modes to be available, and this can be checked by comparing the output of `libcamera-hello --list-cameras` with `libcamera-hello --hdr --list-cameras`.
-
-Users may also supply `--hdr 0` or `--hdr 1`, where the former disables the HDR modes (and is equivalent to omitting the option entirely), and the latter is the same as using `--hdr` on its own.
-
-NOTE: For the _Raspberry Pi Camera Module 3_, the non-HDR modes include the usual full resolution (12MP) mode as well as its half resolution 2x2 binned (3MP) equivalent. In the case of HDR, only a single half resolution (3MP) mode is available, and it is not possible to switch between HDR and non-HDR modes without restarting the camera application.
-
-==== Camera Control
-
-The following options affect the image processing and control algorithms that affect the camera image quality.
-
-----
- --sharpness Set image sharpness
-----
-
-The given `` adjusts the image sharpness. The value zero means that no sharpening is applied, the value 1.0 uses the default amount of sharpening, and values greater than 1.0 use extra sharpening.
-
-Example: `libcamera-still -o test.jpg --sharpness 2.0`
-
-----
- --contrast Set image contrast
-----
-
-The given `` adjusts the image contrast. The value zero produces minimum contrast, the value 1.0 uses the default amount of contrast, and values greater than 1.0 apply extra contrast.
-
-Example: `libcamera-still -o test.jpg --contrast 1.5`
-
-----
- --brightness Set image brightness
-----
-
-The given `` adjusts the image brightness. The value -1.0 produces an (almost) black image, the value 1.0 produces an almost entirely white image and the value 0.0 produces standard image brightness.
-
-Note that the brightness parameter adds (or subtracts) an offset from all pixels in the output image. The `--ev` option is often more appropriate.
-
-Example: `libcamera-still -o test.jpg --brightness 0.2`
-
-----
- --saturation Set image colour saturation
-----
-
-The given `` adjusts the colour saturation. The value zero produces a greyscale image, the value 1.0 uses the default amount of sautration, and values greater than 1.0 apply extra colour saturation.
-
-Example: `libcamera-still -o test.jpg --saturation 0.8`
-
-----
- --ev Set EV compensation
-----
-
-Sets the EV compensation of the image in units of _stops_, in the range -10 to 10. Default is 0. It works by raising or lowering the target values the AEC/AGC algorithm is attempting to match.
-
-Example: `libcamera-still -o test.jpg --ev 0.3`
-
-----
- --shutter Set the exposure time in microseconds
-----
-
-The shutter time is fixed to the given value. The gain will still be allowed to vary (unless that is also fixed).
-
-Note that this shutter time may not be achieved if the camera is running at a frame rate that is too fast to allow it. In this case the `--framerate` option may be used to lower the frame rate. The maximum possible shutter times for the official Raspberry Pi supported can be found xref:../accessories/camera.adoc#hardware-specification[in this table].
-
-Using values above these maximums will result in undefined behaviour. Cameras will also have different minimum shutter times, though in practice this is not important as they are all low enough to expose bright scenes appropriately.
-
-Example: `libcamera-hello --shutter 30000`
-
-----
- --gain Sets the combined analogue and digital gains
- --analoggain Synonym for --gain
-----
-
-These two options are actually identical, and set the combined analogue and digital gains that will be used. The `--analoggain` form is permitted so as to be more compatible with the legacy `raspicam` applications. Where the requested gain can be supplied by the sensor driver, then only analogue gain will be used. Once the analogue gain reaches the maximum permitted value, then extra gain beyond this will be supplied as digital gain.
-
-Note that there are circumstances where the digital gain can go above 1 even when the analogue gain limit is not exceeded. This can occur when
-
-* Either of the colour gains goes below 1.0, which will cause the digital gain to settle to 1.0/min(red_gain,blue_gain). This means that the total digital gain being applied to any colour channel does not go below 1.0, as that would cause discolouration artifacts.
-* The digital gain can vary slightly while the AEC/AGC changes, though this effect should be only transient.
-
-----
- --metering Set the metering mode
-----
-
-Sets the metering mode of the AEC/AGC algorithm. This may one of the following values
-
-* `centre` - centre weighted metering (which is the default)
-* `spot` - spot metering
-* `average` - average or whole frame metering
-* `custom` - custom metering mode which would have to be defined in the camera tuning file.
-
-For more information on defining a custom metering mode, and also on how to adjust the region weights in the existing metering modes, please refer to the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Tuning guide for the Raspberry Pi cameras and libcamera].
-
-Example: `libcamera-still -o test.jpg --metering spot`
-
-----
- --exposure Set the exposure profile
-----
-
-The exposure profile may be either `normal`, `sport` or `long`. Changing the exposure profile should not affect the overall exposure of an image, but the `sport` mode will tend to prefer shorter exposure times and larger gains to achieve the same net result.
-
-Exposure profiles can be edited in the camera tuning file. Please refer to the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Tuning guide for the Raspberry Pi cameras and libcamera] for more information.
-
-Example: `libcamera-still -o test.jpg --exposure sport`
-
-----
- --awb Set the AWB mode
-----
-
-This option sets the AWB algorithm into the named AWB mode. Valid modes are:
-
-|===
-| Mode name | Colour temperature
-
-| auto
-| 2500K to 8000K
-
-| incandescent
-| 2500K to 3000K
-
-| tungsten
-| 3000K to 3500K
-
-| fluorescent
-| 4000K to 4700K
-
-| indoor
-| 3000K to 5000K
-
-| daylight
-| 5500K to 6500K
-
-| cloudy
-| 7000K to 8500K
-
-| custom
-| A custom range would have to be defined in the camera tuning file.
-|===
-
-There is no mode that turns the AWB off, instead fixed colour gains should be specified with the `--awbgains` option.
-
-Note that these values are only approximate, the values could vary according to the camera tuning.
-
-For more information on AWB modes and how to define a custom one, please refer to the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Tuning guide for the Raspberry Pi cameras and libcamera].
-
-Example: `libcamera-still -o test.jpg --awb tungsten`
-
-----
- --awbgains Set fixed colour gains
-----
-
-This option accepts a red and a blue gain value and uses them directly in place of running the AWB algorithm. Setting non-zero values here has the effect of disabling the AWB calculation.
-
-Example: `libcamera-still -o test.jpg --awbgains 1.5,2.0`
-
-----
- --denoise Set the denoising mode
-----
-
-The following denoise modes are supported:
-
-* `auto` - This is the default. It always enables standard spatial denoise. It uses extra fast colour denoise for video, and high quality colour denoise for stills capture. Preview does not enable any extra colour denoise at all.
-
-* `off` - Disables spatial and colour denoise.
-
-* `cdn_off` - Disables colour denoise.
-
-* `cdn_fast` - Uses fast color denoise.
-
-* `cdn_hq` - Uses high quality colour denoise. Not appropriate for video/viewfinder due to reduced throughput.
-
-Note that even the use of fast colour denoise can result in lower framerates. The high quality colour denoise will normally result in much lower framerates.
-
-Example: `libcamera-vid -o test.h264 --denoise cdn_off`
-
-----
- --tuning-file Specify the camera tuning to use
-----
-
-This identifies the name of the JSON format tuning file that should be used. The tuning file covers many aspects of the image processing, including the AEC/AGC, AWB, colour shading correction, colour processing, denoising and so forth.
-
-For more information on the camera tuning file, please consult the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Tuning guide for the Raspberry Pi cameras and libcamera].
-
-Example: `libcamera-hello --tuning-file ~/my-camera-tuning.json`
-
-----
- --autofocus-mode Specify the autofocus mode
-----
-
-Specifies the autofocus mode to use, which may be one of
-
-* `default` (also the default if the option is omitted) - normally puts the camera into continuous autofocus mode, except if either `--lens-position` or `--autofocus-on-capture` is given, in which case manual mode is chosen instead
-* `manual` - do not move the lens at all, but it can be set with the `--lens-position` option
-* `auto` - does not move the lens except for an autofocus sweep when the camera starts (and for `libcamera-still`, just before capture if `--autofocus-on-capture` is given)
-* `continuous` - adjusts the lens position automatically as the scene changes.
-
-This option is only supported for certain camera modules (such as the _Raspberry Pi Camera Module 3_).
-
-----
- --autofocus-range Specify the autofocus range
-----
-
-Specifies the autofocus range, which may be one of
-
-* `normal` (the default) - focuses from reasonably close to infinity
-* `macro` - focuses only on close objects, including the closest focal distances supported by the camera
-* `full` - will focus on the entire range, from the very closest objects to infinity.
-
-This option is only supported for certain camera modules (such as the _Raspberry Pi Camera Module 3_).
-
-----
- --autofocus-speed Specify the autofocus speed
-----
-
-Specifies the autofocus speed, which may be one of
-
-* `normal` (the default) - the lens position will change at the normal speed
-* `fast` - the lens position may change more quickly.
-
-This option is only supported for certain camera modules (such as the _Raspberry Pi Camera Module 3_).
-
-----
- --autofocus-window Specify the autofocus window
-----
-
-Specifies the autofocus window, in the form `x,y,width,height` where the coordinates are given as a proportion of the entire image. For example, `--autofocus-window 0.25,0.25,0.5,0.5` would choose a window that is half the size of the output image in each dimension, and centred in the middle.
-
-The default value causes the algorithm to use the middle third of the output image in both dimensions (so 1/9 of the total image area).
-
-This option is only supported for certain camera modules (such as the _Raspberry Pi Camera Module 3_).
-
-----
- --lens-position Set the lens to a given position
-----
-
-Moves the lens to a fixed focal distance, normally given in dioptres (units of 1 / _distance in metres_). We have
-
-* 0.0 will move the lens to the "infinity" position
-* Any other `number`: move the lens to the 1 / `number` position, so the value 2 would focus at approximately 0.5m
-* `default` - move the lens to a default position which corresponds to the hyperfocal position of the lens.
-
-It should be noted that lenses can only be expected to be calibrated approximately, and there may well be variation between different camera modules.
-
-This option is only supported for certain camera modules (such as the _Raspberry Pi Camera Module 3_).
-
-
-==== Output File Options
-
-----
- --output, -o Output file name
-----
-
-`--output` sets the name of the output file to which the output image or video is written. Besides regular file names, this may take the following special values:
-
-* `-` - write to stdout
-* `udp://` - a string starting with this is taken as a network address for streaming
-* `tcp://` - a string starting with this is taken as a network address for streaming
-* a string containing a `%d` directive is taken as a file name where the format directive is replaced with a count that increments for each file that is opened. Standard C format directive modifiers are permitted.
-
-Examples:
-
-`libcamera-vid -t 100000 --segment 10000 -o chunk%04d.h264` records a 100 second file in 10 second segments, where each file is named `chunk.h264` but with the inclusion of an incrementing counter. Note that `%04d` writes the count to a string, but padded up to a total width of at least 4 characters by adding leading zeroes.
-
-`libcamera-vid -t 0 --inline -o udp://192.168.1.13:5000` stream H.264 video to network address 192.168.1.13 on port 5000.
-
-----
- --wrap Wrap output file counter at
-----
-
-When outputting to files with an incrementing counter (e.g. `%d` in the output file name), wrap the counter back to zero when it reaches this value.
-
-Example: `libcamera-vid -t 0 --codec mjpeg --segment 1 --wrap 100 -o image%d.jpg`
-
-----
- --flush Flush output files immediately
-----
-
-`--flush` causes output files to be flushed to disk as soon as every frame is written, rather than waiting for the system to do it.
-
-Example: `libcamera-vid -t 10000 --flush -o test.h264`
-
-==== Post Processing Options
-
-The `--post-process-file` option specifies a JSON file that configures the post-processing that the imaging pipeline applies to camera images before they reach the application. It can be thought of as a replacement for the legacy `raspicam` "image effects".
-
-Post-processing is a large topic and admits the use of 3rd party software like OpenCV and TensorFlowLite to analyse and manipulate images. For more information, please refer to the section on xref:camera_software.adoc#post-processing[post-processing].
-
-Example: `libcamera-hello --post-process-file negate.json`
-
-This might apply a "negate" effect to an image, if the file `negate.json` is appropriately configured.
diff --git a/documentation/asciidoc/computers/camera/libcamera_options_still.adoc b/documentation/asciidoc/computers/camera/libcamera_options_still.adoc
deleted file mode 100644
index 62a74249e..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_options_still.adoc
+++ /dev/null
@@ -1,148 +0,0 @@
-=== Still Command Line Options
-
-----
- --quality, -q JPEG quality
-----
-
-Set the JPEG quality. 100 is maximum quality and 93 is the default. Only applies when saving JPEG files.
-
-Example: `libcamera-jpeg -o test.jpg -q 80`
-
-----
- --exif, -x Add extra EXIF tags
-----
-
-The given extra EXIF tags are saved in the JPEG file. Only applies when saving JPEG files.
-
-EXIF is supported using the `libexif` library and so there are some associated limitations. In particular, `libexif` seems to recognise a number of tags but without knowing the correct format for them. The software will currently treat these (incorrectly, in many cases) as ASCII, but will print a warning to the terminal. As we come across these they can be added to the table of known exceptions in the software.
-
-Clearly the application needs to supply EXIF tags that contain specific camera data (like the exposure time). But for other tags that have nothing to do with the camera, a reasonable workaround would simply be to add them _post facto_, using something like `exiftool`.
-
-Example: `libcamera-still -o test.jpg --exif IDO0.Artist=Someone`
-
-----
- --timelapse Time interval between timelapse captures
-----
-
-This puts `libcamera-still` into timelapse mode where it runs according to the timeout (`--timeout` or `-t`) that has been set, and for that period will capture repeated images at the interval specified here. (`libcamera-still` only.)
-
-Example: `libcamera-still -t 100000 -o test%d.jpg --timelapse 10000` captures an image every 10s for about 100s.
-
-----
- --framestart The starting value for the frame counter
-----
-
-When writing counter values into the output file name, this specifies the starting value for the counter.
-
-Example: `libcamera-still -t 100000 -o test%d.jpg --timelapse 10000 --framestart 1` captures an image every 10s for about 100s, starting at 1 rather than 0. (`libcamera-still` only.)
-
-----
- --datetime Use date format for the output file names
-----
-
-Use the current date and time to construct the output file name, in the form MMDDhhmmss.jpg, where MM = 2-digit month number, DD = 2-digit day number, hh = 2-digit 24-hour hour number, mm = 2-digit minute number, ss = 2-digit second number. (`libcamera-still` only.)
-
-Example: `libcamera-still --datetime`
-
-----
- --timestamp Use system timestamps for the output file names
-----
-
-Uses the current system timestamp (the number of seconds since the start of 1970) as the output file name. (`libcamera-still` only.)
-
-Example: `libcamera-still --timestamp`
-
-----
- --restart Set the JPEG restart interval
-----
-
-Sets the JPEG restart interval to the given value. Default is zero.
-
-Example: `libcamera-still -o test.jpg --restart 20`
-
-----
- --keypress, -k Capture image when Enter pressed
-----
-
-This switches `libcamera-still` into keypress mode. It will capture a still image either when the timeout expires or the Enter key is pressed in the terminal window. Typing `x` and Enter causes `libcamera-still` to quit without capturing.
-
-Example: `libcamera-still -t 0 -o test.jpg -k`
-
-----
- --signal, -s Capture image when SIGUSR1 received
-----
-
-This switches `libcamera-still` into signal mode. It will capture a still image either when the timeout expires or a SIGUSR1 is received. SIGUSR2 will cause `libcamera-still` to quit without capturing.
-
-Example:
-
-`libcamera-still -t 0 -o test.jpg -s &`
-
-then
-
-`kill -SIGUSR1 $!`
-
-----
- --thumb Set thumbnail parameters or none
-----
-
-Sets the dimensions and quality parameter of the associated thumbnail image. The defaults are size 320x240 and quality 70.
-
-Example: `libcamera-still -o test.jpg --thumb 640:480:80`
-
-The value `none` may be given, in which case no thumbnail is saved in the image at all.
-
-----
- --encoding, -e Set the still image codec
-----
-
-Select the still image encoding to be used. Valid encoders are:
-
-* `jpg` - JPEG (the default)
-* `png` - PNG format
-* `bmp` - BMP format
-* `rgb` - binary dump of uncompressed RGB pixels
-* `yuv420` - binary dump of uncompressed YUV420 pixels.
-
-Note that this option determines the encoding and that the extension of the output file name is ignored for this purpose. However, for the `--datetime` and `--timestamp` options, the file extension is taken from the encoder name listed above. (`libcamera-still` only.)
-
-Example: `libcamera-still -e png -o test.png`
-
-----
- --raw, -r Save raw file
-----
-
-Save a raw Bayer file in DNG format alongside the usual output image. The file name is given by replacing the output file name extension by `.dng`. These are standard DNG files, and can be processed with standard tools like _dcraw_ or _RawTherapee_, among others. (`libcamera-still` only.)
-
-The image data in the raw file is exactly what came out of the sensor, with no processing whatsoever either by the ISP or anything else. The EXIF data saved in the file, among other things, includes:
-
-* exposure time
-* analogue gain (the ISO tag is 100 times the analogue gain used)
-* white balance gains (which are the reciprocals of the "as shot neutral" values)
-* the colour matrix used by the ISP.
-
-----
- --latest Make symbolic link to latest file saved
-----
-
-This causes `libcamera-still` to make a symbolic link to the most recently saved file, thereby making it easier to identify. (`libcamera-still` only.)
-
-Example: `libcamera-still -t 100000 --timelapse 10000 -o test%d.jpg --latest latest.jpg`
-
-----
- --autofocus-on-capture Whether to run an autofocus cycle before capture
-----
-
-If set, this will cause an autofocus cycle to be run just before the image is captured.
-
-If `--autofocus-mode` is not specified, or was set to `default` or `manual`, this will be the only autofocus cycle.
-
-If `--autofocus-mode` was set to `auto`, there will be an additional autofocus cycle at the start of the preview window.
-
-If `--autofocus-mode` was set to `continuous`, this option will be ignored.
-
-You can also use `--autofocus-on-capture 1` in place of `--autofocus-on-capture`, and `--autofocus-on-capture 0` as an alternative to omitting the parameter entirely.
-
-Example: `libcamera-still --autofocus-on-capture -o test.jpg`
-
-This option is only supported for certain camera modules (such as the _Raspberry Pi Camera Module 3_).
diff --git a/documentation/asciidoc/computers/camera/libcamera_options_vid.adoc b/documentation/asciidoc/computers/camera/libcamera_options_vid.adoc
deleted file mode 100644
index 14c664f37..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_options_vid.adoc
+++ /dev/null
@@ -1,138 +0,0 @@
-=== Video Command Line Options
-
-----
- --quality, -q JPEG quality
-----
-
-Set the JPEG quality. 100 is maximum quality and 50 is the default. Only applies when saving in MJPEG format.
-
-Example: `libcamera-vid --codec mjpeg -o test.mjpeg -q 80`
-
-----
- --bitrate, -b H.264 bitrate
-----
-
-Set the target bitrate for the H.264 encoder, in _bits per second_. Only applies when encoding in H.264 format.
-
-Example: `libcamera-vid -b 10000000 --width 1920 --height 1080 -o test.h264`
-
-----
- --intra, -g Intra-frame period (H.264 only)
-----
-
-Sets the frequency of I (Intra) frames in the H.264 bitstream, as a number of frames. The default value is 60.
-
-Example: `libcamera-vid --intra 30 --width 1920 --height 1080 -o test.h264`
-
-----
- --profile H.264 profile
-----
-
-Set the H.264 profile. The value may be `baseline`, `main` or `high`.
-
-Example: `libcamera-vid --width 1920 --height 1080 --profile main -o test.h264`
-
-----
- --level H.264 level
-----
-
-Set the H.264 level. The value may be `4`, `4.1` or `4.2`.
-
-Example: `libcamera-vid --width 1920 --height 1080 --level 4.1 -o test.h264`
-
-----
- --codec Encoder to be used
-----
-
-This can select how the video frames are encoded. Valid options are:
-
-* h264 - use H.264 encoder (the default)
-* mjpeg - use MJPEG encoder
-* yuv420 - output uncompressed YUV420 frames.
-* libav - use the libav backend to encode audio and video (see the xref:camera_software.adoc#libav-integration-with-libcamera-vid[libav section] for further details).
-
-Examples:
-
-`libcamera-vid -t 10000 --codec mjpeg -o test.mjpeg`
-
-`libcamera-vid -t 10000 --codec yuv420 -o test.data`
-
-----
- --keypress, -k Toggle between recording and pausing
-----
-
-Pressing Enter will toggle `libcamera-vid` between recording the video stream and not recording it (i.e. discarding it). The application starts off in the recording state, unless the `--initial` option specifies otherwise. Typing `x` and Enter causes `libcamera-vid` to quit.
-
-Example: `libcamera-vid -t 0 -o test.h264 -k`
-
-----
- --signal, -s Toggle between recording and pausing when SIGUSR1 received
-----
-
-The SIGUSR1 signal will toggle `libcamera-vid` between recording the video stream and not recording it (i.e. discarding it). The application starts off in the recording state, unless the `--initial` option specifies otherwise. SIGUSR2 causes `libcamera-vid` to quit.
-
-Example:
-
-`libcamera-vid -t 0 -o test.h264 -s`
-
-then
-
-`kill -SIGUSR1 $!`
-
-----
- --initial Start the application in the recording or paused state
-----
-
-The value passed may be `record` or `pause` to start the application in, respectively, the recording or the paused state. This option should be used in conjunction with either `--keypress` or `--signal` to toggle between the two states.
-
-Example: `libcamera-vid -t 0 -o test.h264 -k --initial pause`
-
-----
- --split Split multiple recordings into separate files
-----
-
-This option should be used in conjunction with `--keypress` or `--signal` and causes each recording session (in between the pauses) to be written to a separate file.
-
-Example: `libcamera-vid -t 0 --keypress --split --initial pause -o test%04d.h264`
-
-----
- --segment Write the video recording into multiple segments
-----
-
-This option causes the video recording to be split across multiple files where the parameter gives the approximate duration of each file in milliseconds.
-
-One convenient little trick is to pass a very small duration parameter (namely, `--segment 1`) which will result in each frame being written to a separate output file. This makes it easy to do "burst" JPEG capture (using the MJPEG codec), or "burst" raw frame capture (using `libcamera-raw`).
-
-Example: `libcamera-vid -t 100000 --segment 10000 -o test%04d.h264`
-
-----
- --circular Write the video recording into a circular buffer of the given
-----
-
-The video recording is written to a circular buffer which is written to disk when the application quits. The size of the circular buffer may be given in units of megabytes, defaulting to 4MB.
-
-Example: `libcamera-vid -t 0 --keypress --inline --circular -o test.h264`
-
-----
- --inline Write sequence header in every I frame (H.264 only)
-----
-
-This option causes the H.264 sequence headers to be written into every I (Intra) frame. This is helpful because it means a client can understand and decode the video sequence from any I frame, not just from the very beginning of the stream. It is recommended to use this option with any output type that breaks the output into pieces (`--segment`, `--split`, `--circular`), or transmits the output over a network.
-
-Example: `libcamera-vid -t 0 --keypress --inline --split -o test%04d.h264`
-
-----
- --listen Wait for an incoming TCP connection
-----
-
-This option is provided for streaming over a network using TCP/IP. Using `--listen` will cause `libcamera-vid` to wait for an incoming client connection before starting the video encode process, which will then be forwarded to that client.
-
-Example: `libcamera-vid -t 0 --inline --listen -o tcp://0.0.0.0:8123`
-
-----
- --frames Record exactly this many frames
-----
-
-Exactly `` frames are recorded. Specifying a non-zero value will override any timeout.
-
-Example: `libcamera-vid -o test.h264 --frames 1000`
diff --git a/documentation/asciidoc/computers/camera/libcamera_python.adoc b/documentation/asciidoc/computers/camera/libcamera_python.adoc
index 57d9adc8d..d14a17068 100644
--- a/documentation/asciidoc/computers/camera/libcamera_python.adoc
+++ b/documentation/asciidoc/computers/camera/libcamera_python.adoc
@@ -1,48 +1,26 @@
-=== Python Bindings for `libcamera`
+[[picamera2]]
+=== Use `libcamera` from Python with Picamera2
-The https://github.com/raspberrypi/picamera2[Picamera2 library] is a libcamera-based replacement for Picamera which was a Python interface to the Raspberry Pi's legacy camera stack. Picamera2 presents an easy to use Python API.
+The https://github.com/raspberrypi/picamera2[Picamera2 library] is a `rpicam`-based replacement for Picamera, which was a Python interface to Raspberry Pi's legacy camera stack. Picamera2 presents an easy-to-use Python API.
-Documentation about Picamera2 is available https://github.com/raspberrypi/picamera2[on Github] and in the https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf[Picamera2 Manual].
+Documentation about Picamera2 is available https://github.com/raspberrypi/picamera2[on GitHub] and in the https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf[Picamera2 manual].
==== Installation
-Picamera2 is only supported on Raspberry Pi OS Bullseye (or later) images, both 32 and 64-bit.
+Recent Raspberry Pi OS images include Picamera2 with all the GUI (Qt and OpenGL) dependencies. Recent Raspberry Pi OS Lite images include Picamera2 without the GUI dependencies, although preview images can still be displayed using DRM/KMS.
-NOTE: As of September 2022, Picamera2 is pre-installed on images downloaded from Raspberry Pi. It works on all Raspberry Pi boards right down to the Pi Zero, although performance in some areas may be worse on less powerful devices.
-
-Picamera2 is not supported on:
-
-. Images based on Buster or earlier releases.
-. Raspberry Pi OS Legacy images.
-. Bullseye (or later) images where the legacy camera stack has been re-enabled.
-
-On Raspberry Pi OS images, Picamera2 is now installed with all the GUI (Qt and OpenGL) dependencies. On Raspberry Pi OS Lite, it is installed without the GUI dependencies, although preview images can still be displayed using DRM/KMS. If these users wish to use the additional X-Windows GUI features, they will need to run
-
-----
-sudo apt install -y python3-pyqt5 python3-opengl
-----
-
-NOTE: No changes are required to Picamera2 itself.
-
-If your image did not come pre-installed with Picamera2 `apt` is the recommended way of installing and updating Picamera2.
-
-----
-$ sudo apt update
-sudo apt upgrade
-----
-
-Thereafter, you can install Picamera2 with all the GUI (Qt and OpenGL) dependencies using
+If your image did not include Picamera2, run the following command to install Picamera2 with all of the GUI dependencies:
+[source,console]
----
$ sudo apt install -y python3-picamera2
----
-If you do not want the GUI dependencies, use
+If you don't want the GUI dependencies, you can run the following command to install Picamera2 without the GUI dependencies:
+[source,console]
----
$ sudo apt install -y python3-picamera2 --no-install-recommends
----
-NOTE: If you have installed Picamera2 previously using `pip`, then you should also uninstall this, using the command `pip3 uninstall picamera2`.
-
-NOTE: If Picamera2 is already installed, you can update it with `sudo apt install -y python3-picamera2`, or as part of a full system update (for example, `sudo apt upgrade`).
\ No newline at end of file
+NOTE: If you previously installed Picamera2 with `pip`, uninstall it with: `pip3 uninstall picamera2`.
diff --git a/documentation/asciidoc/computers/camera/libcamera_raw.adoc b/documentation/asciidoc/computers/camera/libcamera_raw.adoc
deleted file mode 100644
index c372f89db..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_raw.adoc
+++ /dev/null
@@ -1,23 +0,0 @@
-=== `libcamera-raw`
-
-`libcamera-raw` is like a video recording application except that it records raw Bayer frames directly from the sensor. It does not show a preview window. For a 2 second raw clip use
-
-[,bash]
-----
-libcamera-raw -t 2000 -o test.raw
-----
-
-The raw frames are dumped with no formatting information at all, one directly after another. The application prints the pixel format and image dimensions to the terminal window so that the user can know how to interpret the pixel data.
-
-By default the raw frames are saved in a single (potentially very large) file. As we saw previously, the `--segment` option can be used conveniently to direct each to a separate file.
-[,bash]
-----
-libcamera-raw -t 2000 --segment 1 -o test%05d.raw
-----
-
-In good conditions (using a fast SSD) `libcamera-raw` can get close to writing 12MP HQ camera frames (18MB of data each) to disk at 10 frames per second. It writes the raw frames with no formatting in order to achieve these speeds; it has no capability to save them as DNG files (like `libcamera-still`). If you want to be sure not to drop frames you could reduce the framerate slightly using the `--framerate` option, for example
-
-[,bash]
-----
-libcamera-raw -t 5000 --width 4056 --height 3040 -o test.raw --framerate 8
-----
diff --git a/documentation/asciidoc/computers/camera/libcamera_software.adoc b/documentation/asciidoc/computers/camera/libcamera_software.adoc
deleted file mode 100644
index dc17bb8df..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_software.adoc
+++ /dev/null
@@ -1,121 +0,0 @@
-== _libcamera_ and _libcamera-apps_ Installation
-
-Your Raspberry Pi should be running the latest version of the Raspberry Pi OS (_Buster_ at the time of writing), and the camera and I2C interfaces must both be enabled (check the _Interfaces_ tab of the _Raspberry Pi Configuration_ tool, from the _Preferences_ menu). First ensure your system, firmware and all its applications and repositories are up to date by entering the following commands into a terminal window.
-
-[,bash]
-----
-sudo apt update
-sudo apt full-upgrade
-----
-
-libcamera is under active development which sometimes means that new features need to be supported in Raspberry Pi OS, even before they are officially released. Therefore we currently recommend updating to the latest release candidate. To do this, first reboot your Raspberry Pi, and then use
-
-[,bash]
-----
-sudo rpi-update
-----
-
-WARNING: Note that the release candidate is not as thoroughly tested as an official release. If your Raspberry Pi contains important or critical data we would strongly advise that it is backed up first, or that a fresh SD card is used for the purpose of trying _libcamera_.
-
-Next, the `/boot/config.txt` file must be updated to load and use the camera driver, by adding the following to the bottom.
-
-[,bash]
-----
-dtoverlay=imx219
-----
-
-If you are using a sensor other than the `imx219` you will need to supply the alternative name here (for example, `ov5647` for the V1 camera, or `imx477` for the HQ Cam).
-
-*NOTE*: after rebooting, control of the camera system will be passed to the ARM cores, and firmware-based camera functions (such as raspistill and so forth) will no longer work. Setting `/boot/config.txt` back and rebooting will restore the previous behaviour.
-
-=== Select the Correct Graphics Driver
-
-There are 3 different graphics drivers available on the Raspberry Pi: firmware, FKMS and KMS. The firmware graphics driver cannot be used with _libcamera-apps_. The Raspberry Pi 4 and Raspberry Pi 400 use the newer FKMS graphics driver by default: this is compatible with _libcamera-apps_. For all other models of Raspberry Pi, you must select the FKMS driver by adding the following line to the `/boot/config.txt` file:
-
-----
-dtoverlay=vc4-fkms-v3d
-----
-
-=== Building _libcamera_ and _qcam_
-
-The build system and runtime environment of _libcamera_ have a number of dependencies. They can be installed with the following commands.
-
-[,bash]
-----
-sudo apt install libboost-dev
-sudo apt install libgnutls28-dev openssl libtiff5-dev
-sudo apt install qtbase5-dev libqt5core5a libqt5gui5 libqt5widgets5
-sudo apt install meson
-sudo pip3 install pyyaml ply
-----
-
-The Qt libraries are only required for _libcamera_'s _qcam_ demo app.
-
-Unfortunately, at the time of writing, the default version of meson is a little old, so please execute:
-
-[,bash]
-----
-sudo pip3 install --upgrade meson
-----
-
-We can now check out the code and build _libcamera_ as follows. Note that if you are using a 1GB system (such as a Raspberry Pi 3) you may need to replace `ninja -C build` by `ninja -C build -j 2` as this will stop ninja exhausting the system memory and aborting.
-
-[,bash]
-----
-git clone https://github.com/raspberrypi/libcamera.git
-cd libcamera
-meson build
-cd build
-meson configure -Dpipelines=raspberrypi -Dtest=false
-cd ..
-ninja -C build
-sudo ninja -C build install
-----
-
-At this stage you may wish to check that _qcam_ works. Type `build/src/qcam/qcam` and check that you see a camera image.
-
-=== Raspberry Pi's _libcamera-apps_
-
-Raspberry Pi's _libcamera-apps_ provide very similar functionality to the _raspistill_ and _raspivid_ applications that use the proprietary firmware-based camera stack. To build them, we must first install _libepoxy_.
-
-[,bash]
-----
-cd
-sudo apt install libegl1-mesa-dev
-git clone https://github.com/anholt/libepoxy.git
-cd libepoxy
-mkdir _build
-cd _build
-meson
-ninja
-sudo ninja install
-----
-
-Finally we can build the _libcamera-apps_. As we saw previously, 1GB platforms may need `make -j2` in place of `make -j4`.
-
-[,bash]
-----
-cd
-sudo apt install cmake libboost-program-options-dev libdrm-dev libexif-dev
-git clone https://github.com/raspberrypi/libcamera-apps.git
-cd libcamera-apps
-mkdir build
-cd build
-cmake ..
-make -j4
-----
-
-To check everything is working correctly, type `./libcamera-hello` - you should see a preview window displayed for about 5 seconds.
-
-[NOTE]
-====
-For Raspberry Pi 3 devices, as we saw previously, 1GB devices may need `make -j2` instead of `make -j4`.
-
-Also, Raspberry Pi 3 does not use the correct GL driver by default, so please ensure you have `dtoverlay=vc4-fkms-v3d` in the `[all]` (not in the `[pi4]`) section of your `/boot/config.txt` file.
-====
-
-=== Further Documentation
-
-You can find out more in the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[_Raspberry Pi Camera Algorithm and Tuning Guide_.].
-
-More information on the _libcamera-apps_ is available https://github.com/raspberrypi/libcamera-apps/blob/main/README.md[on Github].
diff --git a/documentation/asciidoc/computers/camera/libcamera_still.adoc b/documentation/asciidoc/computers/camera/libcamera_still.adoc
deleted file mode 100644
index b70c72839..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_still.adoc
+++ /dev/null
@@ -1,92 +0,0 @@
-=== `libcamera-still`
-
-`libcamera-still` is very similar to `libcamera-jpeg` but supports more of the legacy `raspistill` options. As before, a single image can be captured with
-
-[,bash]
-----
-libcamera-still -o test.jpg
-----
-
-==== Encoders
-
-`libcamera-still` allows files to be saved in a number of different formats. It supports both `png` and `bmp` encoding. It also allows files to be saved as a binary dump of RGB or YUV pixels with no encoding or file format at all. In these latter cases the application reading the files will have to understand the pixel arrangement for itself.
-
-[,bash]
-----
-libcamera-still -e png -o test.png
-libcamera-still -e bmp -o test.bmp
-libcamera-still -e rgb -o test.data
-libcamera-still -e yuv420 -o test.data
-----
-Note that the format in which the image is saved depends on the `-e` (equivalently `--encoding`) option and is _not_ selected automatically based on the output file name.
-
-==== Raw Image Capture
-
-_Raw_ images are the images produced directly by the image sensor, before any processing is applied to them either by the ISP (Image Signal Processor) or any of the CPU cores. For colour image sensors these are usually _Bayer_ format images. Note that _raw_ images are quite different from the processed but unencoded RGB or YUV images that we saw earlier.
-
-To capture a raw image use
-
-[,bash]
-----
-libcamera-still -r -o test.jpg
-----
-
-Here, the `-r` option (also `--raw`) indicates to capture the raw image as well as the JPEG. In fact, the raw image is the exact image from which the JPEG was produced. Raw images are saved in DNG (Adobe Digital Negative) format and are compatible with many standard applications, such as _dcraw_ or _RawTherapee_. The raw image is saved to a file with the same name but the extension `.dng`, thus `test.dng` in this case.
-
-These DNG files contain metadata pertaining to the image capture, including black levels, white balance information and the colour matrix used by the ISP to produce the JPEG. This makes these DNG files much more convenient for later "by hand" raw conversion with some of the aforementioned tools. Using `exiftool` shows all the metadata encoded into the DNG file:
-
-----
-File Name : test.dng
-Directory : .
-File Size : 24 MB
-File Modification Date/Time : 2021:08:17 16:36:18+01:00
-File Access Date/Time : 2021:08:17 16:36:18+01:00
-File Inode Change Date/Time : 2021:08:17 16:36:18+01:00
-File Permissions : rw-r--r--
-File Type : DNG
-File Type Extension : dng
-MIME Type : image/x-adobe-dng
-Exif Byte Order : Little-endian (Intel, II)
-Make : Raspberry Pi
-Camera Model Name : /base/soc/i2c0mux/i2c@1/imx477@1a
-Orientation : Horizontal (normal)
-Software : libcamera-still
-Subfile Type : Full-resolution Image
-Image Width : 4056
-Image Height : 3040
-Bits Per Sample : 16
-Compression : Uncompressed
-Photometric Interpretation : Color Filter Array
-Samples Per Pixel : 1
-Planar Configuration : Chunky
-CFA Repeat Pattern Dim : 2 2
-CFA Pattern 2 : 2 1 1 0
-Black Level Repeat Dim : 2 2
-Black Level : 256 256 256 256
-White Level : 4095
-DNG Version : 1.1.0.0
-DNG Backward Version : 1.0.0.0
-Unique Camera Model : /base/soc/i2c0mux/i2c@1/imx477@1a
-Color Matrix 1 : 0.8545269369 -0.2382823821 -0.09044229197 -0.1890484985 1.063961506 0.1062747385 -0.01334283455 0.1440163847 0.2593136724
-As Shot Neutral : 0.4754476844 1 0.413686484
-Calibration Illuminant 1 : D65
-Strip Offsets : 0
-Strip Byte Counts : 0
-Exposure Time : 1/20
-ISO : 400
-CFA Pattern : [Blue,Green][Green,Red]
-Image Size : 4056x3040
-Megapixels : 12.3
-Shutter Speed : 1/20
-----
-We note that there is only a single calibrated illuminant (the one determined by the AWB algorithm even though it gets labelled always as "D65"), and that dividing the ISO number by 100 gives the analogue gain that was being used.
-
-==== Very long exposures
-
-To capture very long exposure images, we need to be careful to disable the AEC/AGC and AWB because these algorithms will otherwise force the user to wait for a number of frames while they converge. The way to disable them is to supply explicit values. Additionally, the entire preview phase of the capture can be skipped with the `--immediate` option.
-
-So to perform a 100 second exposure capture, use
-
-`libcamera-still -o long_exposure.jpg --shutter 100000000 --gain 1 --awbgains 1,1 --immediate`
-
-For reference, the maximum exposure times of the three official Raspberry Pi cameras can be found in xref:../accessories/camera.adoc#hardware-specification[this table].
diff --git a/documentation/asciidoc/computers/camera/libcamera_vid.adoc b/documentation/asciidoc/computers/camera/libcamera_vid.adoc
deleted file mode 100644
index 44e8d031e..000000000
--- a/documentation/asciidoc/computers/camera/libcamera_vid.adoc
+++ /dev/null
@@ -1,118 +0,0 @@
-=== `libcamera-vid`
-
-`libcamera-vid` is the video capture application. By default it uses the Raspberry Pi's hardware H.264 encoder. It will display a preview window and write the encoded bitstream to the specified output. For example, to write a 10 second video to file use
-
-[,bash]
-----
-libcamera-vid -t 10000 -o test.h264
-----
-The resulting file can be played with `vlc` (among other applications)
-[,bash]
-----
-vlc test.h264
-----
-Note that this is an unpackaged video bitstream, it is not wrapped in any kind of container format (such as an mp4 file). The `--save-pts` option can be used to output frame timestamps so that the bitstream can subsequently be converted into an appropriate format using a tool like `mkvmerge`.
-
-`libcamera-vid -o test.h264 --save-pts timestamps.txt`
-
-and then if you want an _mkv_ file:
-
-`mkvmerge -o test.mkv --timecodes 0:timestamps.txt test.h264`
-
-==== Encoders
-
-There is support for motion JPEG, and also for uncompressed and unformatted YUV420, for example
-[,bash]
-----
-libcamera-vid -t 10000 --codec mjpeg -o test.mjpeg
-libcamera-vid -t 10000 --codec yuv420 -o test.data
-----
-In both cases the `--codec` parameter determines the output format, not the extension of the output file.
-
-The `--segment` parameter breaks output files up into chunks of the segment size (given in milliseconds). This is quite handy for breaking a motion JPEG stream up into individual JPEG files by specifying very short (1 millisecond) segments.
-[,bash]
-----
-libcamera-vid -t 10000 --codec mjpeg --segment 1 -o test%05d.jpeg
-----
-Observe that the output file name is normally only sensible if we avoid over-writing the previous file every time, such as by using a file name that includes a counter (as above). More information on output file names is available below.
-
-==== Network Streaming
-
-NOTE: This section describes native streaming from `libcamera-vid`. However, it is also possible to use the libav backend for network streaming. See the xref:camera_software.adoc#libav-integration-with-libcamera-vid[libav section] for further details.
-
-===== UDP
-
-To stream video using UDP, on the Raspberry Pi (server) use
-[,bash]
-----
-libcamera-vid -t 0 --inline -o udp://:
-----
-where `` is the IP address of the client, or multicast address (if appropriately configured to reach the client). On the client use (for example)
-[,bash]
-----
-vlc udp://@: :demux=h264
-----
-or alternatively
-----
-ffplay udp://: -fflags nobuffer -flags low_delay -framedrop
-----
-with the same `` value.
-
-===== TCP
-
-Video can be streamed using TCP. To use the Raspberry Pi as a server
-[,bash]
-----
-libcamera-vid -t 0 --inline --listen -o tcp://0.0.0.0:
-----
-and on the client
-[,bash]
-----
-vlc tcp/h264://:
-----
-or alternatively
-----
-ffplay tcp://: -vf "setpts=N/30" -fflags nobuffer -flags low_delay -framedrop
-----
-for a 30 frames per second stream with low latency.
-
-The Raspberry Pi will wait until the client connects, and then start streaming video.
-
-===== RTSP
-
-vlc is useful on the Raspberry Pi for formatting an RTSP stream, though there are other RTSP servers available.
-[,bash]
-----
-libcamera-vid -t 0 --inline -o - | cvlc stream:///dev/stdin --sout '#rtp{sdp=rtsp://:8554/stream1}' :demux=h264
-----
-and this can be played with
-[,bash]
-----
-vlc rtsp://:8554/stream1
-----
-or alternatively
-----
-ffplay rtsp://:8554/stream1 -vf "setpts=N/30" -fflags nobuffer -flags low_delay -framedrop
-----
-
-In all cases, the preview window on the server (the Raspberry Pi) can be suppressed with the `-n` (`--nopreview`) option. Note also the use of the `--inline` option which forces the stream header information to be included with every I (intra) frame. This is important so that a client can correctly understand the stream if it missed the very beginning.
-
-NOTE: Recent versions of VLC seem to have problems with playback of H.264 streams. We recommend using `ffplay` for playback using the above commands until these issues have been resolved.
-
-==== High framerate capture
-
-Using `libcamera-vid` to capture high framerate video (generally anything over 60 fps) while minimising frame drops requires a few considerations:
-
-1. The https://en.wikipedia.org/wiki/Advanced_Video_Coding#Levels[H.264 target level] must be set to 4.2 with the `--level 4.2` argument.
-2. Software colour denoise processing must be turned off with the `--denoise cdn_off` argument.
-3. For rates over 100 fps, disabling the display window with the `-n` option would free up some additional CPU cycles to help avoid frame drops.
-4. It is advisable to set `force_turbo=1` in `/boot/config.txt` to ensure the CPU clock does not get throttled during the video capture. See https://www.raspberrypi.com/documentation/computers/config_txt.html#force_turbo[here] for further details.
-5. Adjust the ISP output resolution with `--width 1280 --height 720` or something even lower to achieve your framerate target.
-6. On a Pi 4, you can overclock the GPU to improve performance by adding `gpu_freq=550` or higher in `/boot/config.txt`. See https://www.raspberrypi.com/documentation/computers/config_txt.html#overclocking[here] for further details.
-
-An example command for 1280x720 120fps video encode would be:
-
-[,bash]
-----
-libcamera-vid --level 4.2 --framerate 120 --width 1280 --height 720 --save-pts timestamp.pts -o video.264 -t 10000 --denoise cdn_off -n
-----
\ No newline at end of file
diff --git a/documentation/asciidoc/computers/camera/qt.adoc b/documentation/asciidoc/computers/camera/qt.adoc
index 9c7fa346a..66aa9bb9e 100644
--- a/documentation/asciidoc/computers/camera/qt.adoc
+++ b/documentation/asciidoc/computers/camera/qt.adoc
@@ -1,15 +1,16 @@
-=== Using _libcamera_ and _Qt_ together
+=== Use `libcamera` with Qt
-_Qt_ is a popular application framework and GUI toolkit, and indeed _libcamera-apps_ optionally makes use of it to implement a camera preview window.
+Qt is a popular application framework and GUI toolkit. `rpicam-apps` includes an option to use Qt for a camera preview window.
-However, _Qt_ defines certain symbols as macros in the global namespace (such as `slot` and `emit`) and this causes errors when including _libcamera_ files. The problem is common to all platforms trying to use both _Qt_ and _libcamera_ and not specific to Raspberry Pi. Nonetheless we suggest that developers experiencing difficulties try the following workarounds.
+Unfortunately, Qt defines certain symbols (such as `slot` and `emit`) as macros in the global namespace. This causes errors when including `libcamera` files. The problem is common to all platforms that use both Qt and `libcamera`. Try the following workarounds to avoid these errors:
-1. _libcamera_ include files, or files that include _libcamera_ files (such as _libcamera-apps_ files), should be listed before any _Qt_ header files where possible.
+* List `libcamera` include files, or files that include `libcamera` files (such as `rpicam-apps` files), _before_ any Qt header files whenever possible.
-2. If you do need to mix your Qt application files with libcamera includes, replace `signals:` with `Q_SIGNALS:`, `slots:` with `Q_SLOTS:`, `emit` with `Q_EMIT` and `foreach` with `Q_FOREACH`.
+* If you do need to mix your Qt application files with `libcamera` includes, replace `signals:` with `Q_SIGNALS:`, `slots:` with `Q_SLOTS:`, `emit` with `Q_EMIT` and `foreach` with `Q_FOREACH`.
-3. Before any _libcamera_ include files, add
+* Add the following at the top of any `libcamera` include files:
+
+[source,cpp]
----
#undef signals
#undef slots
@@ -17,6 +18,5 @@ However, _Qt_ defines certain symbols as macros in the global namespace (such as
#undef foreach
----
-4. If you are using _qmake_, add `CONFIG += no_keywords` to the project file. If using _cmake_, add `SET(QT_NO_KEYWORDS ON)`.
-
-We are not aware of any plans for the underlying library problems to be addressed.
+* If your project uses `qmake`, add `CONFIG += no_keywords` to the project file.
+* If your project uses `cmake`, add `SET(QT_NO_KEYWORDS ON)`.
diff --git a/documentation/asciidoc/computers/camera/raspicam.adoc b/documentation/asciidoc/computers/camera/raspicam.adoc
deleted file mode 100644
index 15b46ff9c..000000000
--- a/documentation/asciidoc/computers/camera/raspicam.adoc
+++ /dev/null
@@ -1,1447 +0,0 @@
-== Raspicam applications
-
-[WARNING]
-====
-Raspberry Pi has transitioned from a legacy camera software stack based on proprietary Broadcom GPU code to an open-source stack based on `libcamera`. As such, the _Raspicam_ stack is now deprecated. Raspberry Pi OS images from _Bullseye_ onwards contain *only* the `libcamera`-based stack. Raspberry Pi OS images up to and including _Buster_ still use the legacy _Raspicam_ stack.
-====
-
-IMPORTANT: The Raspberry Pi https://www.raspberrypi.com/products/camera-module-3/[Camera Module 3] is *not supported* by the legacy camera stack.
-
-Users are encouraged to use the newest OS images and the `libcamera`-based stack because:
-
-* It will continue to be developed moving forward.
-* Raspberry Pi and 3rd parties can fix bugs and problems in the camera stack.
-* Raspberry Pi and 3rd parties can add new features to the camera stack.
-* It is much easier to add support for new cameras.
-* 3rd parties can add support directly for their own cameras.
-* Nearly all aspects of the camera tuning can be changed by users.
-* It integrates much more conveniently with other standard Linux APIs.
-* Raspberry Pi supply a set of `libcamera-apps` which emulate most of the features of the legacy applications.
-* It provdes a feature-rich post-processing framework integrating OpenCV and TensorFlow Lite.
-* Libcamera makes it easier to control the parameters of the image sensor and the camera system.
-* It is fully supported on 64-bit operating systems.
-
-Reasons to consider staying with an older OS and using the legacy _Raspicam_ stack might include:
-
-* It may perform better on Raspberry Pi 2 and Raspberry Pi Zero devices, as it offloads more to the GPU and is less dependent on the ARM cores.
-
-== Re-enabling the legacy stack
-
-IMPORTANT: The legacy camera stack is **not available** in the 64-bit version of Raspberry Pi OS, it cannot be re-enabled on the 64-bit OS.
-
-The legacy camera stack can be re-enabled in Bullseye using the following steps.
-
-1. Ensure your system is up-to-date and reboot it.
-2. Run `sudo raspi-config`.
-3. Navigate to `Interface Options` and select `Legacy camera` to enable it.
-4. Reboot your Raspberry Pi again.
-
-These steps are shown in the following video.
-
-video::E7KPSc_Xr24[youtube]
-
-NOTE: More information can be found in the https://www.raspberrypi.com/news/bullseye-camera-system/[blog post] discussing the transition.
-
-== Raspicam commands
-
-`raspistill`, `raspivid` and `raspiyuv` are command line tools for using the camera module.
-
-=== Enabling the Camera
-
-Before using any of the _Raspicam_ applications, the camera must be enabled.
-
-==== On the desktop
-
-Select `Preferences` and `Raspberry Pi Configuration` from the desktop menu: a window will appear. Select the `Interfaces` tab, then click on the `enable camera` option. Click `OK`. You will need to reboot for the changes to take effect.
-
-==== With the command line
-
-Open the `raspi-config` tool from the terminal:
-
-[,bash]
-----
-sudo raspi-config
-----
-
-Select `Interfacing Options` then `Camera` and press `Enter`. Choose `Yes` then `Ok`. Go to `Finish` and you'll be prompted to reboot.
-
-To test that the system is installed and working, try the following command:
-
-[,bash]
-----
-raspistill -v -o test.jpg
-----
-
-The display should show a five-second preview from the camera and then take a picture, saved to the file `test.jpg`, whilst displaying various informational messages.
-
-=== `raspistill`
-
-`raspistill` is the command line tool for capturing still photographs with a Raspberry Pi camera module.
-
-==== Basic usage of raspistill
-
-With a camera module xref:../accessories/camera.adoc#camera-modules[connected and enabled], enter the following command in the terminal to take a picture:
-
-[,bash]
-----
-raspistill -o cam.jpg
-----
-
-image::images/cam.jpg[Upside-down photo]
-
-In this example the camera has been positioned upside-down. If the camera is placed in this position, the image must be flipped to appear the right way up.
-
-==== Vertical flip and horizontal flip
-
-With the camera placed upside-down, the image must be rotated 180° to be displayed correctly. The way to correct for this is to apply both a vertical and a horizontal flip by passing in the `-vf` and `-hf` flags:
-
-[,bash]
-----
-raspistill -vf -hf -o cam2.jpg
-----
-
-image::images/cam2.jpg[Vertical and horizontal flipped photo]
-
-Now the photo has been captured correctly.
-
-==== Resolution
-
-The camera module takes pictures at a resolution of `2592 x 1944` which is 5,038,848 pixels or 5 megapixels.
-
-==== File size
-
-A photo taken with the camera module will be around 2.4MB. This is about 425 photos per GB.
-
-Taking 1 photo per minute would take up 1GB in about 7 hours. This is a rate of about 144MB per hour or 3.3GB per day.
-
-==== Bash script
-
-You can create a Bash script which takes a picture with the camera. To create a script, open up your editor of choice and write the following example code:
-
-[,bash]
-----
-#!/bin/bash
-
-DATE=$(date +"%Y-%m-%d_%H%M")
-
-raspistill -vf -hf -o /home/pi/camera/$DATE.jpg
-----
-
-This script will take a picture and name the file with a timestamp.
-
-You'll also need to make sure the path exists by creating the `camera` folder:
-
-[,bash]
-----
-mkdir camera
-----
-
-Say we saved it as `camera.sh`, we would first make the file executable:
-
-[,bash]
-----
-chmod +x camera.sh
-----
-
-Then run with:
-
-[,bash]
-----
-./camera.sh
-----
-
-==== More options
-
-For a full list of possible options, run `raspistill` with no arguments. To scroll, redirect stderr to stdout and pipe the output to `less`:
-
-[,bash]
-----
-raspistill 2>&1 | less
-----
-
-Use the arrow keys to scroll and type `q` to exit.
-
-=== `raspivid`
-
-`raspivid` is the command line tool for capturing video with a Raspberry Pi camera module.
-
-==== Basic usage of raspivid
-
-With a camera module xref:../accessories/camera.adoc#camera-modules[connected and enabled], record a video using the following command:
-
-[,bash]
-----
-raspivid -o vid.h264
-----
-
-Remember to use `-hf` and `-vf` to flip the image if required, like with xref:camera_software.adoc#raspistill[raspistill]
-
-This will save a 5 second video file to the path given here as `vid.h264` (default length of time).
-
-==== Specify length of video
-
-To specify the length of the video taken, pass in the `-t` flag with a number of milliseconds. For example:
-
-[,bash]
-----
-raspivid -o video.h264 -t 10000
-----
-
-This will record 10 seconds of video.
-
-==== More options
-
-For a full list of possible options, run `raspivid` with no arguments, or pipe this command through `less` and scroll through:
-
-[,bash]
-----
-raspivid 2>&1 | less
-----
-
-Use the arrow keys to scroll and type `q` to exit.
-
-==== MP4 Video Format
-
-The Raspberry Pi captures video as a raw H264 video stream. Many media players will refuse to play it, or play it at an incorrect speed, unless it is "wrapped" in a suitable container format like MP4. The easiest way to obtain an MP4 file from the raspivid command is using MP4Box.
-
-Install MP4Box with this command:
-
-[,bash]
-----
-sudo apt install -y gpac
-----
-
-Capture your raw video with raspivid and wrap it in an MP4 container like this:
-
-[,bash]
-----
-# Capture 30 seconds of raw video at 640x480 and 150kBps bit rate into a pivideo.h264 file:
-raspivid -t 30000 -w 640 -h 480 -fps 25 -b 1200000 -p 0,0,640,480 -o pivideo.h264
-# Wrap the raw video with an MP4 container:
-MP4Box -add pivideo.h264 pivideo.mp4
-# Remove the source raw file, leaving the remaining pivideo.mp4 file to play
-rm pivideo.h264
-----
-
-Alternatively, wrap MP4 around your existing raspivid output, like this:
-
-[,bash]
-----
-MP4Box -add video.h264 video.mp4
-----
-
-=== `raspiyuv`
-
-`raspiyuv` has the same set of features as `raspistill` but instead of outputting standard image files such as ``.jpg``s, it generates YUV420 or RGB888 image files from the output of the camera ISP.
-
-In most cases using `raspistill` is the best option for standard image capture, but using YUV can be of benefit in certain circumstances. For example if you just need a uncompressed black and white image for computer vision applications, you can simply use the Y channel of a YUV capture.
-
-There are some specific points about the YUV420 files that are required in order to use them correctly. Line stride (or pitch) is a multiple of 32, and each plane of YUV is a multiple of 16 in height. This can mean there may be extra pixels at the end of lines, or gaps between planes, depending on the resolution of the captured image. These gaps are unused.
-
-=== Troubleshooting
-
-If the Camera Module isn't working correctly, there are number of things to try:
-
-* Is the ribbon cable attached to the Camera Serial Interface (CSI), not the Display Serial Interface (DSI)? The ribbon connector will fit into either port. The Camera port is located near the HDMI connector.
-* Are the ribbon connectors all firmly seated, and are they the right way round? They must be straight in their sockets.
-* Is the Camera Module connector, between the smaller black Camera Module itself and the PCB, firmly attached? Sometimes this connection can come loose during transit or when putting the Camera Module in a case. Using a fingernail, flip up the connector on the PCB, then reconnect it with gentle pressure. It engages with a very slight click. Don't force it; if it doesn't engage, it's probably slightly misaligned.
-* Have `sudo apt update` and `sudo apt full-upgrade` been run?
-* Has `raspi-config` been run and the Camera Module enabled?
-* Is your power supply sufficient? The Camera Module adds about 200-250mA to the power requirements of your Raspberry Pi.
-
-If things are still not working, try the following:
-
-* `Error : raspistill/raspivid command not found`. This probably means your update/upgrade failed in some way. Try it again.
-* `Error : ENOMEM`. The Camera Module is not starting up. Check all connections again.
-* `Error : ENOSPC`. The Camera Module is probably running out of GPU memory. Check `config.txt` in the /boot/ folder. The `gpu_mem` option should be at least 128. Alternatively, use the Memory Split option in the Advanced section of `raspi-config` to set this.
-* If you've checked all the above issues and the Camera Module is still not working, try posting on our forums for more help.
-
-=== Command Line Options
-
-==== Preview window
-
-----
- --preview, -p Preview window settings <'x,y,w,h'>
-----
-
-Allows the user to define the size of the preview window and its location on the screen. Note this will be superimposed over the top of any other windows/graphics.
-
-----
- --fullscreen, -f Fullscreen preview mode
-----
-
-Forces the preview window to use the whole screen. Note that the aspect ratio of the incoming image will be retained, so there may be bars on some edges.
-
-----
- --nopreview, -n Do not display a preview window
-----
-
-Disables the preview window completely. Note that even though the preview is disabled, the camera will still be producing frames, so will be using power.
-
-----
- --opacity, -op Set preview window opacity
-----
-
-Sets the opacity of the preview windows. 0 = invisible, 255 = fully opaque.
-
-==== Camera control options
-
-----
- --sharpness, -sh Set image sharpness (-100 - 100)
-----
-
-Sets the sharpness of the image. 0 is the default.
-
-----
- --contrast, -co Set image contrast (-100 - 100)
-----
-
-Sets the contrast of the image. 0 is the default.
-
-----
- --brightness, -br Set image brightness (0 - 100)
-----
-
-Sets the brightness of the image. 50 is the default. 0 is black, 100 is white.
-
-----
- --saturation, -sa Set image saturation (-100 - 100)
-----
-
-Sets the colour saturation of the image. 0 is the default.
-
-----
- --ISO, -ISO Set capture ISO (100 - 800)
-----
-
-Sets the ISO to be used for captures.
-
-----
- --vstab, -vs Turn on video stabilisation
-----
-
-In video mode only, turns on video stabilisation.
-
-----
- --ev, -ev Set EV compensation (-10 - 10)
-----
-
-Sets the EV compensation of the image. Default is 0.
-
-----
- --exposure, -ex Set exposure mode
-----
-
-Possible options are:
-
-* auto: use automatic exposure mode
-* night: select setting for night shooting
-* nightpreview:
-* backlight: select setting for backlit subject
-* spotlight:
-* sports: select setting for sports (fast shutter etc.)
-* snow: select setting optimised for snowy scenery
-* beach: select setting optimised for beach
-* verylong: select setting for long exposures
-* fixedfps: constrain fps to a fixed value
-* antishake: antishake mode
-* fireworks: select setting optimised for fireworks
-
-Note that not all of these settings may be implemented, depending on camera tuning.
-
-----
- --flicker, -fli Set flicker avoidance mode
-----
-
-Set a mode to compensate for lights flickering at the mains frequency, which can be seen as a dark horizontal band across an image. Flicker avoidance locks the exposure time to a multiple of the mains flicker frequency (8.33ms for 60Hz, or 10ms for 50Hz). This means that images can be noisier as the control algorithm has to increase the gain instead of exposure time should it wish for an intermediate exposure value. `auto` can be confused by external factors, therefore it is preferable to leave this setting off unless actually required.
-
-Possible options are:
-
-* off: turn off flicker avoidance
-* auto: automatically detect mains frequency
-* 50hz: set avoidance at 50Hz
-* 60hz: set avoidance at 60Hz
-
-----
- --awb, -awb Set Automatic White Balance (AWB) mode
-----
-
-Modes for which colour temperature ranges (K) are available have these settings in brackets.
-
-* off: turn off white balance calculation
-* auto: automatic mode (default)
-* sun: sunny mode (between 5000K and 6500K)
-* cloud: cloudy mode (between 6500K and 12000K)
-* shade: shade mode
-* tungsten: tungsten lighting mode (between 2500K and 3500K)
-* fluorescent: fluorescent lighting mode (between 2500K and 4500K)
-* incandescent: incandescent lighting mode
-* flash: flash mode
-* horizon: horizon mode
-* greyworld: Use this on the NoIR camera to fix incorrect AWB results due to the lack of the IR filter.
-
-Note that not all of these settings may be implemented, depending on camera type.
-
-----
- --imxfx, -ifx Set image effect
-----
-
-Set an effect to be applied to the image:
-
-* none: no effect (default)
-* negative: invert the image colours
-* solarise: solarise the image
-* posterise: posterise the image
-* whiteboard: whiteboard effect
-* blackboard: blackboard effect
-* sketch: sketch effect
-* denoise: denoise the image
-* emboss: emboss the image
-* oilpaint: oil paint effect
-* hatch: hatch sketch effect
-* gpen: graphite sketch effect
-* pastel: pastel effect
-* watercolour: watercolour effect
-* film: film grain effect
-* blur: blur the image
-* saturation: colour saturate the image
-* colourswap: not fully implemented
-* washedout: not fully implemented
-* colourpoint: not fully implemented
-* colourbalance: not fully implemented
-* cartoon: not fully implemented
-
-Note that not all of these settings may be available in all circumstances.
-
-----
- --colfx, -cfx Set colour effect
-----
-
-The supplied U and V parameters (range 0 - 255) are applied to the U and Y channels of the image. For example, --colfx 128:128 should result in a monochrome image.
-
-----
- --metering, -mm Set metering mode
-----
-
-Specify the metering mode used for the preview and capture:
-
-* average: average the whole frame for metering
-* spot: spot metering
-* backlit: assume a backlit image
-* matrix: matrix metering
-
-----
- --rotation, -rot Set image rotation (0 - 359)
-----
-
-Sets the rotation of the image in the viewfinder and resulting image. This can take any value from 0 upwards, but due to hardware constraints only 0, 90, 180, and 270 degree rotations are supported.
-
-----
- --hflip, -hf Set horizontal flip
-----
-
-Flips the preview and saved image horizontally.
-
-----
- --vflip, -vf Set vertical flip
-----
-
-Flips the preview and saved image vertically.
-
-----
- --roi, -roi Set sensor region of interest
-----
-
-Allows the specification of the area of the sensor to be used as the source for the preview and capture. This is defined as x,y for the top-left corner, and a width and height, with all values in normalised coordinates (0.0 - 1.0). So, to set a ROI at halfway across and down the sensor, and a width and height of a quarter of the sensor, use:
-
-----
--roi 0.5,0.5,0.25,0.25
-----
-
-----
- --shutter, -ss Set shutter speed/time
-----
-
-Sets the shutter open time to the specified value (in microseconds). Shutter speed limits are as follows:
-
-[cols=",^"]
-|===
-| Camera Version | Max (microseconds)
-
-| V1 (OV5647)
-| 6000000 (i.e. 6s)
-
-| V2 (IMX219)
-| 10000000 (i.e. 10s)
-
-| HQ (IMX477)
-| 200000000 (i.e. 200s)
-|===
-
-Using values above these maximums will result in undefined behaviour.
-
-----
- --drc, -drc Enable/disable dynamic range compression
-----
-
-DRC changes the images by increasing the range of dark areas, and decreasing the brighter areas. This can improve the image in low light areas.
-
-* off
-* low
-* med
-* high
-
-By default, DRC is off.
-
-----
- --stats, -st Use stills capture frame for image statistics
-----
-
-Force recomputation of statistics on stills capture pass. Digital gain and AWB are recomputed based on the actual capture frame statistics, rather than the preceding preview frame.
-
-----
- --awbgains, -awbg
-----
-
-Sets blue and red gains (as floating point numbers) to be applied when `-awb off` is set e.g. -awbg 1.5,1.2
-
-----
- --analoggain, -ag
-----
-
-Sets the analog gain value directly on the sensor (floating point value from 1.0 to 8.0 for the OV5647 sensor on Camera Module V1, and 1.0 to 12.0 for the IMX219 sensor on Camera Module V2 and the IMX447 on the HQ Camera).
-
-----
- --digitalgain, -dg
-----
-
-Sets the digital gain value applied by the ISP (floating point value from 1.0 to 64.0, but values over about 4.0 will produce overexposed images)
-
-----
- --mode, -md
-----
-
-Sets a specified sensor mode, disabling the automatic selection. Possible values depend on the version of the Camera Module being used:
-
-Version 1.x (OV5647)
-
-|===
-| Mode | Size | Aspect Ratio | Frame rates | FOV | Binning
-
-| 0
-| automatic selection
-|
-|
-|
-|
-
-| 1
-| 1920x1080
-| 16:9
-| 1-30fps
-| Partial
-| None
-
-| 2
-| 2592x1944
-| 4:3
-| 1-15fps
-| Full
-| None
-
-| 3
-| 2592x1944
-| 4:3
-| 0.1666-1fps
-| Full
-| None
-
-| 4
-| 1296x972
-| 4:3
-| 1-42fps
-| Full
-| 2x2
-
-| 5
-| 1296x730
-| 16:9
-| 1-49fps
-| Full
-| 2x2
-
-| 6
-| 640x480
-| 4:3
-| 42.1-60fps
-| Full
-| 2x2 plus skip
-
-| 7
-| 640x480
-| 4:3
-| 60.1-90fps
-| Full
-| 2x2 plus skip
-|===
-
-Version 2.x (IMX219)
-
-|===
-| Mode | Size | Aspect Ratio | Frame rates | FOV | Binning
-
-| 0
-| automatic selection
-|
-|
-|
-|
-
-| 1
-| 1920x1080
-| 16:9
-| 0.1-30fps
-| Partial
-| None
-
-| 2
-| 3280x2464
-| 4:3
-| 0.1-15fps
-| Full
-| None
-
-| 3
-| 3280x2464
-| 4:3
-| 0.1-15fps
-| Full
-| None
-
-| 4
-| 1640x1232
-| 4:3
-| 0.1-40fps
-| Full
-| 2x2
-
-| 5
-| 1640x922
-| 16:9
-| 0.1-40fps
-| Full
-| 2x2
-
-| 6
-| 1280x720
-| 16:9
-| 40-90fps
-| Partial
-| 2x2
-
-| 7
-| 640x480
-| 4:3
-| 40-200fps^1^
-| Partial
-| 2x2
-|===
-
-^1^For frame rates over 120fps, it is necessary to turn off automatic exposure and gain control using `-ex off`. Doing so should achieve the higher frame rates, but exposure time and gains will need to be set to fixed values supplied by the user.
-
-HQ Camera
-
-|===
-| Mode | Size | Aspect Ratio | Frame rates | FOV | Binning/Scaling
-
-| 0
-| automatic selection
-|
-|
-|
-|
-
-| 1
-| 2028x1080
-| 169:90
-| 0.1-50fps
-| Partial
-| 2x2 binned
-
-| 2
-| 2028x1520
-| 4:3
-| 0.1-50fps
-| Full
-| 2x2 binned
-
-| 3
-| 4056x3040
-| 4:3
-| 0.005-10fps
-| Full
-| None
-
-| 4
-| 1332x990
-| 74:55
-| 50.1-120fps
-| Partial
-| 2x2 binned
-|===
-
-----
- --camselect, -cs
-----
-
-Selects which camera to use on a multi-camera system. Use 0 or 1.
-
-----
- --annotate, -a Enable/set annotate flags or text
-----
-
-Adds some text and/or metadata to the picture.
-
-Metadata is indicated using a bitmask notation, so add them together to show multiple parameters. For example, 12 will show time(4) and date(8), since 4+8=12.
-
-Text may include date/time placeholders by using the '%' character, as used by http://man7.org/linux/man-pages/man3/strftime.3.html[strftime].
-
-|===
-| Value | Meaning | Example Output
-
-| -a 4
-| Time
-| 20:09:33
-
-| -a 8
-| Date
-| 10/28/15
-
-| -a 12
-| 4+8=12 Show the date(4) and time(8)
-| 20:09:33 10/28/15
-
-| -a 16
-| Shutter Settings
-|
-
-| -a 32
-| CAF Settings
-|
-
-| -a 64
-| Gain Settings
-|
-
-| -a 128
-| Lens Settings
-|
-
-| -a 256
-| Motion Settings
-|
-
-| -a 512
-| Frame Number
-|
-
-| -a 1024
-| Black Background
-|
-
-| -a "ABC %Y-%m-%d %X"
-| Show some text
-| ABC %Y-%m-%d %X
-
-| -a 4 -a "ABC %Y-%m-%d %X"
-| Show custom http://man7.org/linux/man-pages/man3/strftime.3.html[formatted] date/time
-| ABC 2015-10-28 20:09:33
-
-| -a 8 -a "ABC %Y-%m-%d %X"
-| Show custom http://man7.org/linux/man-pages/man3/strftime.3.html[formatted] date/time
-| ABC 2015-10-28 20:09:33
-|===
-
-----
- --annotateex, -ae Set extra annotation parameters
-----
-
-Specifies annotation size, text colour, and background colour. Colours are in hex YUV format.
-
-Size ranges from 6 - 160; default is 32. Asking for an invalid size should give you the default.
-
-|===
-| Example | Explanation
-
-| -ae 32,0xff,0x808000 -a "Annotation text"
-| gives size 32 white text on black background
-
-| -ae 10,0x00,0x8080FF -a "Annotation text"
-| gives size 10 black text on white background
-|===
-
-----
- --stereo, -3d
-----
-
-Select the specified stereo imaging mode; `sbs` selects side-by-side mode, `tb` selects top/bottom mode; `off` turns off stereo mode (the default).
-
-----
- --decimate, -dec
-----
-
-Halves the width and height of the stereo image.
-
-----
- --3dswap, -3dswap
-----
-
-Swaps the camera order used in stereoscopic imaging; NOTE: currently not working.
-
-----
- --settings, -set
-----
-
-Retrieves the current camera settings and writes them to stdout.
-
-=== Application-specific Settings
-
-==== `raspistill`
-
-----
- --width, -w Set image width
-
- --height, -h Set image height
-
- --quality, -q Set JPEG quality <0 to 100>
-----
-
-Quality 100 is almost completely uncompressed. 75 is a good all-round value.
-
-----
- --raw, -r Add raw Bayer data to JPEG metadata
-----
-
-This option inserts the raw Bayer data from the camera into the JPEG metadata.
-
-----
- --output, -o Output filename
-----
-
-Specifies the output filename. If not specified, no file is saved. If the filename is '-', then all output is sent to stdout.
-
-----
- --latest, -l Link latest frame to filename
-----
-
-Makes a file system link under this name to the latest frame.
-
-----
- --verbose, -v Output verbose information during run
-----
-
-Outputs debugging/information messages during the program run.
-
-----
- --timeout, -t Time before the camera takes picture and shuts down
-----
-
-The program will run for the specified length of time, entered in milliseconds. It then takes the capture and saves it if an output is specified. If a timeout value is not specified, then it is set to 5 seconds (-t 5000). Note that low values (less than 500ms, although it can depend on other settings) may not give enough time for the camera to start up and provide enough frames for the automatic algorithms like AWB and AGC to provide accurate results.
-
-If set to 0, the preview will run indefinitely, until stopped with CTRL-C. In this case no capture is made.
-
-----
- --timelapse, -tl time-lapse mode
-----
-
-The specific value is the time between shots in milliseconds. Note that you should specify `%04d` at the point in the filename where you want a frame count number to appear. So, for example, the code below will produce a capture every 2 seconds, over a total period of 30s, named `image0001.jpg`, `image0002.jpg` and so on, through to `image0015.jpg`.
-
-----
--t 30000 -tl 2000 -o image%04d.jpg
-----
-
-Note that the `%04d` indicates a 4-digit number, with leading zeroes added to make the required number of digits. So, for example, `%08d` would result in an 8-digit number.
-
-If a time-lapse value of 0 is entered, the application will take pictures as fast as possible. Note that there's an minimum enforced pause of 30ms between captures to ensure that exposure calculations can be made.
-
-----
- --framestart, -fs
-----
-
-Specifies the first frame number in the timelapse. Useful if you have already saved a number of frames, and want to start again at the next frame.
-
-----
- --datetime, -dt
-----
-
-Instead of a simple frame number, the timelapse file names will use a date/time value of the format `aabbccddee`, where `aa` is the month, `bb` is the day of the month, `cc` is the hour, `dd` is the minute, and `ee` is the second.
-
-----
- --timestamp, -ts
-----
-
-Instead of a simple frame number, the timelapse file names will use a single number which is the Unix timestamp, i.e. the seconds since 1970.
-
-----
- --thumb, -th Set thumbnail parameters (x:y:quality)
-----
-
-Allows specification of the thumbnail image inserted into the JPEG file. If not specified, defaults are a size of 64x48 at quality 35.
-
-if `--thumb none` is specified, no thumbnail information will be placed in the file. This reduces the file size slightly.
-
-----
- --demo, -d Run a demo mode
-----
-
-This options cycles through the range of camera options. No capture is taken, and the demo will end at the end of the timeout period, irrespective of whether all the options have been cycled. The time between cycles should be specified as a millisecond value.
-
-----
- --encoding, -e Encoding to use for output file
-----
-
-Valid options are `jpg`, `bmp`, `gif`, and `png`. Note that unaccelerated image types (GIF, PNG, BMP) will take much longer to save than jpg, which is hardware accelerated. Also note that the filename suffix is completely ignored when deciding the encoding of a file.
-
-----
- --restart, -rs
-----
-
-Sets the JPEG restart marker interval to a specific value. Can be useful for lossy transport streams because it allows a broken JPEG file to still be partially displayed.
-
-----
- --exif, -x EXIF tag to apply to captures (format as 'key=value')
-----
-
-Allows the insertion of specific EXIF tags into the JPEG image. You can have up to 32 EXIF tag entries. This is useful for tasks like adding GPS metadata. For example, to set the longitude:
-
-----
---exif GPS.GPSLongitude=5/1,10/1,15/1
-----
-
-would set the longitude to 5 degs, 10 minutes, 15 seconds. See EXIF documentation for more details on the range of tags available; the supported tags are as follows:
-
-----
-IFD0.< or
-IFD1.<
-ImageWidth, ImageLength, BitsPerSample, Compression, PhotometricInterpretation, ImageDescription, Make, Model, StripOffsets, Orientation, SamplesPerPixel, RowsPerString, StripByteCounts, XResolution, YResolution, PlanarConfiguration, ResolutionUnit, TransferFunction, Software, DateTime, Artist, WhitePoint, PrimaryChromaticities, JPEGInterchangeFormat, JPEGInterchangeFormatLength, YCbCrCoefficients, YCbCrSubSampling, YCbCrPositioning, ReferenceBlackWhite, Copyright>
-
-EXIF.<
-ExposureTime, FNumber, ExposureProgram, SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, DateTimeOriginal, DateTimeDigitized, ComponentsConfiguration, CompressedBitsPerPixel, ShutterSpeedValue, ApertureValue, BrightnessValue, ExposureBiasValue, MaxApertureValue, SubjectDistance, MeteringMode, LightSource, Flash, FocalLength, SubjectArea, MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, SubSecTimeDigitized, FlashpixVersion, ColorSpace, PixelXDimension, PixelYDimension, RelatedSoundFile, FlashEnergy, SpatialFrequencyResponse, FocalPlaneXResolution, FocalPlaneYResolution, FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, SensingMethod, FileSource, SceneType, CFAPattern, CustomRendered, ExposureMode, WhiteBalance, DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, GainControl, Contrast, Saturation, Sharpness, DeviceSettingDescription, SubjectDistanceRange, ImageUniqueID>
-
-GPS.<
-GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, GPSAreaInformation, GPSDateStamp, GPSDifferential>
-
-EINT.<
-InteroperabilityIndex, InteroperabilityVersion, RelatedImageFileFormat, RelatedImageWidth, RelatedImageLength>
-----
-
-Note that a small subset of these tags will be set automatically by the camera system, but will be overridden by any EXIF options on the command line.
-
-Setting `--exif none` will prevent any EXIF information being stored in the file. This reduces the file size slightly.
-
-----
- --gpsdexif, -gps
-----
-
-Applies real-time EXIF information from any attached GPS dongle (using GSPD) to the image; requires `libgps.so` to be installed.
-
-----
- --fullpreview, -fp Full preview mode
-----
-
-This runs the preview window using the full resolution capture mode. Maximum frames per second in this mode is 15fps, and the preview will have the same field of view as the capture. Captures should happen more quickly, as no mode change should be required. This feature is currently under development.
-
-----
- --keypress, -k Keypress mode
-----
-
-The camera is run for the requested time (`-t`), and a capture can be initiated throughout that time by pressing the Enter key. Pressing X then Enter will exit the application before the timeout is reached. If the timeout is set to 0, the camera will run indefinitely until the user presses X then Enter. Using the verbose option (`-v`) will display a prompt asking for user input, otherwise no prompt is displayed.
-
-----
- --signal, -s Signal mode
-----
-
-The camera is run for the requested time (`-t`), and a capture can be initiated throughout that time by sending a `USR1` signal to the camera process. This can be done using the `kill` command. You can find the camera process ID using the `pgrep raspistill` command.
-
-`kill -USR1 `
-
-----
- --burst, -bm
-----
-
-Sets burst capture mode. This prevents the camera from returning to preview mode in between captures, meaning that captures can be taken closer together.
-
-==== `raspivid`
-
-----
- --width, -w Set image width
-----
-
-Width of resulting video. This should be between 64 and 1920.
-
-----
- --height, -h Set image height
-----
-
-Height of resulting video. This should be between 64 and 1080.
-
-----
- --bitrate, -b Set bitrate
-----
-
-Use bits per second, so 10Mbps would be `-b 10000000`. For H264, 1080p30 a high quality bitrate would be 15Mbps or more. Maximum bitrate is 25Mbps (`-b 25000000`), but much over 17Mbps won't show noticeable improvement at 1080p30.
-
-----
- --output, -o Output filename
-----
-
-Specify the output filename. If not specified, no file is saved. If the filename is '-', then all output is sent to stdout.
-
-To connect to a remote IPv4 host, use `tcp` or `udp` followed by the required IP Address. e.g. `tcp://192.168.1.2:1234` or `udp://192.168.1.2:1234`.
-
-To listen on a TCP port (IPv4) and wait for an incoming connection use `--listen (-l)` option, e.g. `raspivid -l -o tcp://0.0.0.0:3333` will bind to all network interfaces, `raspivid -l -o tcp://192.168.1.1:3333` will bind to a local IPv4.
-
-----
- --listen, -l
-----
-
-When using a network connection as the data sink, this option will make the system wait for a connection from the remote system before sending data.
-
-----
- --verbose, -v Output verbose information during run
-----
-
-Outputs debugging/information messages during the program run.
-
-----
- --timeout, -t Time before the camera takes picture and shuts down
-----
-
-The total length of time that the program will run for. If not specified, the default is 5000ms (5 seconds). If set to 0, the application will run indefinitely until stopped with Ctrl-C.
-
-----
- --demo, -d Run a demo mode
-----
-
-This options cycles through the range of camera options. No recording is done, and the demo will end at the end of the timeout period, irrespective of whether all the options have been cycled. The time between cycles should be specified as a millisecond value.
-
-----
- --framerate, -fps Specify the frames per second to record
-----
-
-At present, the minimum frame rate allowed is 2fps, and the maximum is 30fps. This is likely to change in the future.
-
-----
- --penc, -e Display preview image after encoding
-----
-
-Switch on an option to display the preview after compression. This will show any compression artefacts in the preview window. In normal operation, the preview will show the camera output prior to being compressed. This option is not guaranteed to work in future releases.
-
-----
- --intra, -g Specify the intra refresh period (key frame rate/GoP)
-----
-
-Sets the intra refresh period (GoP) rate for the recorded video. H264 video uses a complete frame (I-frame) every intra refresh period, from which subsequent frames are based. This option specifies the number of frames between each I-frame. Larger numbers here will reduce the size of the resulting video, and smaller numbers make the stream less error-prone.
-
-----
- --qp, -qp Set quantisation parameter
-----
-
-Sets the initial quantisation parameter for the stream. Varies from approximately 10 to 40, and will greatly affect the quality of the recording. Higher values reduce quality and decrease file size. Combine this setting with a bitrate of 0 to set a completely variable bitrate.
-
-----
- --profile, -pf Specify H264 profile to use for encoding
-----
-
-Sets the H264 profile to be used for the encoding. Options are:
-
-* baseline
-* main
-* high
-
-----
- --level, -lev
-----
-
-Specifies the H264 encoder level to use for encoding. Options are `4`, `4.1`, and `4.2`.
-
-----
- --irefresh, -if
-----
-
-Sets the H264 intra-refresh type. Possible options are `cyclic`, `adaptive`, `both`, and `cyclicrows`.
-
-----
- --inline, -ih Insert PPS, SPS headers
-----
-
-Forces the stream to include PPS and SPS headers on every I-frame. Needed for certain streaming cases e.g. Apple HLS. These headers are small, so don't greatly increase the file size.
-
-----
- --spstimings, -stm
-----
-
-Insert timing information into the SPS block.
-
-----
- --timed, -td Do timed switches between capture and pause
-----
-
-This options allows the video capture to be paused and restarted at particular time intervals. Two values are required: the on time and the off time. On time is the amount of time the video is captured, and off time is the amount it is paused. The total time of the recording is defined by the `timeout` option. Note that the recording may take slightly over the timeout setting depending on the on and off times.
-
-For example:
-
-----
-raspivid -o test.h264 -t 25000 -timed 2500,5000
-----
-
-will record for a period of 25 seconds. The recording will be over a timeframe consisting of 2500ms (2.5s) segments with 5000ms (5s) gaps, repeating over the 20s. So the entire recording will actually be only 10s long, since 4 segments of 2.5s = 10s separated by 5s gaps. So:
-
-2.5 record -- 5 pause - 2.5 record -- 5 pause - 2.5 record -- 5 pause -- 2.5 record
-
-gives a total recording period of 25s, but only 10s of actual recorded footage.
-
-----
- --keypress, -k Toggle between record and pause on Enter keypress
-----
-
-On each press of the Enter key, the recording will be paused or restarted. Pressing X then Enter will stop recording and close the application. Note that the timeout value will be used to signal the end of recording, but is only checked after each Enter keypress; so if the system is waiting for a keypress, even if the timeout has expired, it will still wait for the keypress before exiting.
-
-----
- --signal, -s Toggle between record and pause according to SIGUSR1
-----
-
-Sending a `USR1` signal to the `raspivid` process will toggle between recording and paused. This can be done using the `kill` command, as below. You can find the `raspivid` process ID using `pgrep raspivid`.
-
-`kill -USR1 `
-
-Note that the timeout value will be used to indicate the end of recording, but is only checked after each receipt of the `SIGUSR1` signal; so if the system is waiting for a signal, even if the timeout has expired, it will still wait for the signal before exiting.
-
-----
- --split, -sp
-----
-
-When in a signal or keypress mode, each time recording is restarted, a new file is created.
-
-----
- --circular, -c
-----
-
-Select circular buffer mode. All encoded data is stored in a circular buffer until a trigger is activated, then the buffer is saved.
-
-----
- --vectors, -x
-----
-
-Turns on output of motion vectors from the H264 encoder to the specified file name.
-
-----
- --flush, -fl
-----
-
-Forces a flush of output data buffers as soon as video data is written. This bypasses any OS caching of written data, and can decrease latency.
-
-----
- --save-pts, -pts
-----
-
-Saves timestamp information to the specified file. Useful as an input file to `mkvmerge`.
-
-----
- --codec, -cd
-----
-
-Specifies the encoder codec to use. Options are `H264` and `MJPEG`. H264 can encode up to 1080p, whereas MJPEG can encode up to the sensor size, but at decreased framerates due to the higher processing and storage requirements.
-
-----
- --initial, -i Define initial state on startup
-----
-
-Define whether the camera will start paused or will immediately start recording. Options are `record` or `pause`. Note that if you are using a simple timeout, and `initial` is set to `pause`, no output will be recorded.
-
-----
- --segment, -sg Segment the stream into multiple files
-----
-
-Rather than creating a single file, the file is split into segments of approximately the number of milliseconds specified. In order to provide different filenames, you should add `%04d` or similar at the point in the filename where you want a segment count number to appear e.g:
-
-----
---segment 3000 -o video%04d.h264
-----
-
-will produce video clips of approximately 3000ms (3s) long, named `video0001.h264`, `video0002.h264` etc. The clips should be seamless (no frame drops between clips), but the accuracy of each clip length will depend on the intraframe period, as the segments will always start on an I-frame. They will therefore always be equal or longer to the specified period.
-
-The most recent version of Raspivid will also allow the file name to be time-based, rather than using a segment number. For example:
-
-----
---segment 3000 -o video_%c.h264
-----
-
-will produce file names formatted like so: `video_Fri Jul 20 16:23:48 2018.h264`
-
-There are http://man7.org/linux/man-pages/man3/strftime.3.html[many different formatting options] available. Note than the `%d` and `%u` options are not available, as they are used for the segment number formatting, and that some combinations may produce invalid file names.
-
-----
- --wrap, -wr Set the maximum value for segment number
-----
-
-When outputting segments, this is the maximum the segment number can reach before it's reset to 1, giving the ability to keep recording segments, but overwriting the oldest one. So if set to 4, in the segment example above, the files produced will be `video0001.h264`, `video0002.h264`, `video0003.h264`, and `video0004.h264`. Once `video0004.h264` is recorded, the count will reset to 1, and `video0001.h264` will be overwritten.
-
-----
- --start, -sn Set the initial segment number
-----
-
-When outputting segments, this is the initial segment number, giving the ability to resume a previous recording from a given segment. The default value is 1.
-
-----
- --raw, -r
-----
-
-Specify the output file name for any raw data files requested.
-
-----
- --raw-format, -rf
-----
-
-Specify the raw format to be used if raw output requested. Options as `yuv`, `rgb`, and `grey`. `grey` simply saves the Y channel of the YUV image.
-
-==== `raspiyuv`
-
-Many of the options for `raspiyuv` are the same as those for `raspistill`. This section shows the differences.
-
-Unsupported options:
-
-----
---exif, --encoding, --thumb, --raw, --quality
-----
-
-Extra options :
-
-----
- --rgb, -rgb Save uncompressed data as RGB888
-----
-
-This option forces the image to be saved as RGB data with 8 bits per channel, rather than YUV420.
-
-Note that the image buffers saved in `raspiyuv` are padded to a horizontal size divisible by 32, so there may be unused bytes at the end of each line. Buffers are also padded vertically to be divisible by 16, and in the YUV mode, each plane of Y,U,V is padded in this way.
-
-----
- --luma, -y
-----
-
-Only outputs the luma (Y) channel of the YUV image. This is effectively the black and white, or intensity, part of the image.
-
-----
- --bgr, -bgr
-----
-
-Saves the image data as BGR data rather than YUV.
-
-=== Command Line Examples
-
-==== Still Captures
-
-By default, captures are done at the highest resolution supported by the sensor. This can be changed using the `-w` and `-h` command line options.
-
-Take a default capture after 2s (times are specified in milliseconds) on the viewfinder, saving in `image.jpg`:
-
-[,bash]
-----
-raspistill -t 2000 -o image.jpg
-----
-
-Take a capture at a different resolution:
-
-[,bash]
-----
-raspistill -t 2000 -o image.jpg -w 640 -h 480
-----
-
-Reduce the quality considerably to reduce file size:
-
-[,bash]
-----
-raspistill -t 2000 -o image.jpg -q 5
-----
-
-Force the preview to appear at coordinate 100,100, with width 300 pixels and height 200 pixels:
-
-[,bash]
-----
-raspistill -t 2000 -o image.jpg -p 100,100,300,200
-----
-
-Disable preview entirely:
-
-[,bash]
-----
-raspistill -t 2000 -o image.jpg -n
-----
-
-Save the image as a PNG file (lossless compression, but slower than JPEG). Note that the filename suffix is ignored when choosing the image encoding:
-
-[,bash]
-----
-raspistill -t 2000 -o image.png –e png
-----
-
-Add some EXIF information to the JPEG. This sets the Artist tag name to Boris, and the GPS altitude to 123.5m. Note that if setting GPS tags you should set as a minimum GPSLatitude, GPSLatitudeRef, GPSLongitude, GPSLongitudeRef, GPSAltitude, and GPSAltitudeRef:
-
-[,bash]
-----
-raspistill -t 2000 -o image.jpg -x IFD0.Artist=Boris -x GPS.GPSAltitude=1235/10
-----
-
-Set an emboss image effect:
-
-[,bash]
-----
-raspistill -t 2000 -o image.jpg -ifx emboss
-----
-
-Set the U and V channels of the YUV image to specific values (128:128 produces a greyscale image):
-
-[,bash]
-----
-raspistill -t 2000 -o image.jpg -cfx 128:128
-----
-
-Run preview for 2s, with no saved image:
-
-[,bash]
-----
-raspistill -t 2000
-----
-
-Take a time-lapse picture, every 10 seconds for 10 minutes (10 minutes = 600000ms), naming the files `image_num_001_today.jpg`, `image_num_002_today.jpg` and so on, with the latest picture also available under the name `latest.jpg`:
-
-[,bash]
-----
-raspistill -t 600000 -tl 10000 -o image_num_%03d_today.jpg -l latest.jpg
-----
-
-Take a picture and send the image data to stdout:
-
-[,bash]
-----
-raspistill -t 2000 -o -
-----
-
-Take a picture and send the image data to a file:
-
-[,bash]
-----
-raspistill -t 2000 -o - > my_file.jpg
-----
-
-Run the camera forever, taking a picture when Enter is pressed:
-
-[,bash]
-----
-raspistill -t 0 -k -o my_pics%02d.jpg
-----
-
-==== Video captures
-
-Image size and preview settings are the same as for stills capture. Default size for video recording is 1080p (1920x1080).
-
-Record a 5s clip with default settings (1080p30):
-
-[,bash]
-----
-raspivid -t 5000 -o video.h264
-----
-
-Record a 5s clip at a specified bitrate (3.5Mbps):
-
-[,bash]
-----
-raspivid -t 5000 -o video.h264 -b 3500000
-----
-
-Record a 5s clip at a specified framerate (5fps):
-
-[,bash]
-----
-raspivid -t 5000 -o video.h264 -f 5
-----
-
-Encode a 5s camera stream and send the image data to stdout:
-
-[,bash]
-----
-raspivid -t 5000 -o -
-----
-
-Encode a 5s camera stream and send the image data to a file:
-
-[,bash]
-----
-raspivid -t 5000 -o - > my_file.h264
-----
-
-=== Shell Error Codes
-
-The applications described here will return a standard error code to the shell on completion. Possible error codes are:
-
-|===
-| C Define | Code | Description
-
-| EX_OK
-| 0
-| Application ran successfully
-
-| EX_USAGE
-| 64
-| Bad command line parameter
-
-| EX_SOFTWARE
-| 70
-| Software or camera error
-
-|
-| 130
-| Application terminated by Ctrl-C
-|===
-
-=== Long Exposures
-
-The maximum exposure times of the three official Raspberry Pi cameras can be found in xref:../accessories/camera.adoc#hardware-specification[this table].
-
-Due to the way the ISP works, by default asking for a long exposure can result in the capture process taking up to 7 times the exposure time, so a 200 second exposure on the HQ camera could take 1400 seconds to actually return an image. This is due to the way the camera system works out the correct exposures and gains to use in the image, using it's AGC (automatic gain control) and AWB (automatic white balance) algorithms. The system needs a few frames to calculate these numbers in order to produce a decent image. When combined with frame discards at the start of processing (in case they are corrupt), and the switching between preview and captures modes, this can result in up to 7 frames needed to produce a final image. With long exposures, that can take a long time.
-
-Fortunately, the camera parameters can be altered to reduce frame time dramatically; however this means turning off the automatic algorithms and manually providing values for the AGC.
-
-The AWB gains can usually be omitted as the legacy stack is able to reprocess the camera data to work them out (the `-st` option), though it is fine to specify them as well. Additionally, burst mode (`-bm`) with a short timeout should be requested to suppress the initial preview phase, and the exposure mode also needs disabling (`-ex off`).
-
-The following example will perform a 100 second exposure capture
-
-`raspistill -t 10 -bm -ex off -ag 1 -ss 100000000 -st -o long_exposure.jpg`
-
-=== Shooting RAW using the Camera Modules
-
-The definition of raw images can vary. The usual meaning is raw Bayer data directly from the sensor, although some may regard an uncompressed image that has passed through the ISP (and has therefore been processed) as raw. For the latter, we recommend using the term _unencoded_ so as to be clear about the difference.
-
-Both options are available from the Raspberry Pi cameras.
-
-==== Processed, Non-Lossy Images
-
-The usual output from `raspistill` is a compressed JPEG file that has passed through all the stages of image processing to produce a high-quality image. However, JPEG, being a lossy format does throw away some information that the user may want.
-
-`raspistill` has an `encoding` option that allows you to specify the output format: either `jpg`, `bmp`, `png` or `gif`. All but `jpg` are lossless, so no data is thrown away in an effort to improve compression, but do require conversion from the original YUV, and because these formats do not have hardware support they produce images slightly more slowly than JPEG.
-
-e.g.
-
-`raspistill --encoding png -o fred.png`
-
-Another option is to output completely formatted YUV420 or RGB data using the xref:camera_software.adoc#raspiyuv[`raspiyuv`] application.
-
-==== Unprocessed Images
-
-For some applications, such as astrophotography, having the raw Bayer data direct from the sensor can be useful. This data will need to be post-processed to produce a useful image.
-
-`raspistill` has a raw option that will cause the Bayer data to be output.
-
-`raspistill --raw -o fred.jpg`
-
-The raw data is appended to the end of the JPEG file and will https://www.raspberrypi.com/news/processing-raw-image-files-from-a-raspberry-pi-high-quality-camera/[need to be extracted].
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_building.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_building.adoc
new file mode 100644
index 000000000..9fe1ea10a
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_building.adoc
@@ -0,0 +1,293 @@
+== Advanced `rpicam-apps`
+
+=== Build `libcamera` and `rpicam-apps`
+
+Build `libcamera` and `rpicam-apps` for yourself for the following benefits:
+
+* You can pick up the latest enhancements and features.
+
+* `rpicam-apps` can be compiled with extra optimisation for Raspberry Pi 3 and Raspberry Pi 4 devices running a 32-bit OS.
+
+* You can include optional OpenCV and/or TFLite post-processing stages, or add your own.
+
+* You can customise or add your own applications derived from `rpicam-apps`
+
+==== Remove pre-installed `rpicam-apps`
+
+Raspberry Pi OS includes a pre-installed copy of `rpicam-apps`. Before building and installing your own version of `rpicam-apps`, you must first remove the pre-installed version. Run the following command to remove the `rpicam-apps` package from your Raspberry Pi:
+
+[source,console]
+----
+$ sudo apt remove --purge rpicam-apps
+----
+
+==== Building `rpicam-apps` without building `libcamera`
+
+To build `rpicam-apps` without first rebuilding `libcamera` and `libepoxy`, install `libcamera`, `libepoxy` and their dependencies with `apt`:
+
+[source,console]
+----
+$ sudo apt install -y libcamera-dev libepoxy-dev libjpeg-dev libtiff5-dev libpng-dev libopencv-dev
+----
+
+TIP: If you do not need support for the GLES/EGL preview window, omit `libepoxy-dev`.
+
+To use the Qt preview window, install the following additional dependencies:
+
+[source,console]
+----
+$ sudo apt install -y qtbase5-dev libqt5core5a libqt5gui5 libqt5widgets5
+----
+
+For xref:camera_software.adoc#libav-integration-with-rpicam-vid[`libav`] support in `rpicam-vid`, install the following additional dependencies:
+
+[source,console]
+----
+$ sudo apt install libavcodec-dev libavdevice-dev libavformat-dev libswresample-dev
+----
+
+If you run Raspberry Pi OS Lite, install `git`:
+
+[source,console]
+----
+$ sudo apt install -y git
+----
+
+Next, xref:camera_software.adoc#building-rpicam-apps[build `rpicam-apps`].
+
+==== Building `libcamera`
+
+NOTE: Only build `libcamera` from scratch if you need custom behaviour or the latest features that have not yet reached `apt` repositories.
+
+[NOTE]
+======
+If you run Raspberry Pi OS Lite, begin by installing the following packages:
+
+[source,console]
+----
+$ sudo apt install -y python3-pip git python3-jinja2
+----
+======
+
+First, install the following `libcamera` dependencies:
+
+[source,console]
+----
+$ sudo apt install -y libboost-dev
+$ sudo apt install -y libgnutls28-dev openssl libtiff5-dev pybind11-dev
+$ sudo apt install -y qtbase5-dev libqt5core5a libqt5gui5 libqt5widgets5
+$ sudo apt install -y meson cmake
+$ sudo apt install -y python3-yaml python3-ply
+$ sudo apt install -y libglib2.0-dev libgstreamer-plugins-base1.0-dev
+----
+
+Now we're ready to build `libcamera` itself.
+
+Download a local copy of Raspberry Pi's fork of `libcamera` from GitHub:
+
+[source,console]
+----
+$ git clone https://github.com/raspberrypi/libcamera.git
+----
+
+Navigate into the root directory of the repository:
+
+[source,console]
+----
+$ cd libcamera
+----
+
+Next, run `meson` to configure the build environment:
+
+[source,console]
+----
+$ meson setup build --buildtype=release -Dpipelines=rpi/vc4,rpi/pisp -Dipas=rpi/vc4,rpi/pisp -Dv4l2=true -Dgstreamer=enabled -Dtest=false -Dlc-compliance=disabled -Dcam=disabled -Dqcam=disabled -Ddocumentation=disabled -Dpycamera=enabled
+----
+
+NOTE: You can disable the `gstreamer` plugin by replacing `-Dgstreamer=enabled` with `-Dgstreamer=disabled` during the `meson` build configuration. If you disable `gstreamer`, there is no need to install the `libglib2.0-dev` and `libgstreamer-plugins-base1.0-dev` dependencies.
+
+Now, you can build `libcamera` with `ninja`:
+
+[source,console]
+----
+$ ninja -C build
+----
+
+Finally, run the following command to install your freshly-built `libcamera` binary:
+
+[source,console]
+----
+$ sudo ninja -C build install
+----
+
+TIP: On devices with 1GB of memory or less, the build may exceed available memory. Append the `-j 1` flag to `ninja` commands to limit the build to a single process. This should prevent the build from exceeding available memory on devices like the Raspberry Pi Zero and the Raspberry Pi 3.
+
+`libcamera` does not yet have a stable binary interface. Always build `rpicam-apps` after you build `libcamera`.
+
+==== Building `rpicam-apps`
+
+First fetch the necessary dependencies for `rpicam-apps`.
+
+[source,console]
+----
+$ sudo apt install -y cmake libboost-program-options-dev libdrm-dev libexif-dev
+$ sudo apt install -y meson ninja-build
+----
+
+Download a local copy of Raspberry Pi's `rpicam-apps` GitHub repository:
+
+[source,console]
+----
+$ git clone https://github.com/raspberrypi/rpicam-apps.git
+----
+
+Navigate into the root directory of the repository:
+
+[source,console]
+----
+$ cd rpicam-apps
+----
+
+For desktop-based operating systems like Raspberry Pi OS, configure the `rpicam-apps` build with the following `meson` command:
+
+[source,console]
+----
+$ meson setup build -Denable_libav=enabled -Denable_drm=enabled -Denable_egl=enabled -Denable_qt=enabled -Denable_opencv=disabled -Denable_tflite=disabled -Denable_hailo=disabled
+----
+
+For headless operating systems like Raspberry Pi OS Lite, configure the `rpicam-apps` build with the following `meson` command:
+
+[source,console]
+----
+$ meson setup build -Denable_libav=disabled -Denable_drm=enabled -Denable_egl=disabled -Denable_qt=disabled -Denable_opencv=disabled -Denable_tflite=disabled -Denable_hailo=disabled
+----
+
+[TIP]
+======
+
+* Use `-Dneon_flags=armv8-neon` to enable optimisations for 32-bit OSes on Raspberry Pi 3 or Raspberry Pi 4.
+* Use `-Denable_opencv=enabled` if you have installed OpenCV and wish to use OpenCV-based post-processing stages.
+* Use `-Denable_tflite=enabled` if you have installed TensorFlow Lite and wish to use it in post-processing stages.
+* Use `-Denable_hailo=enabled` if you have installed HailoRT and wish to use it in post-processing stages.
+
+======
+
+You can now build `rpicam-apps` with the following command:
+
+[source,console]
+----
+$ meson compile -C build
+----
+
+TIP: On devices with 1GB of memory or less, the build may exceed available memory. Append the `-j 1` flag to `meson` commands to limit the build to a single process. This should prevent the build from exceeding available memory on devices like the Raspberry Pi Zero and the Raspberry Pi 3.
+
+Finally, run the following command to install your freshly-built `rpicam-apps` binary:
+
+[source,console]
+----
+$ sudo meson install -C build
+----
+
+[TIP]
+====
+The command above should automatically update the `ldconfig` cache. If you have trouble accessing your new `rpicam-apps` build, run the following command to update the cache:
+
+[source,console]
+----
+$ sudo ldconfig
+----
+====
+
+Run the following command to check that your device uses the new binary:
+
+[source,console]
+----
+$ rpicam-still --version
+----
+
+The output should include the date and time of your local `rpicam-apps` build.
+
+Finally, follow the `dtoverlay` and display driver instructions in the xref:camera_software.adoc#configuration[Configuration section].
+
+==== `rpicam-apps` meson flag reference
+
+The `meson` build configuration for `rpicam-apps` supports the following flags:
+
+`-Dneon_flags=armv8-neon`:: Speeds up certain post-processing features on Raspberry Pi 3 or Raspberry Pi 4 devices running a 32-bit OS.
+
+`-Denable_libav=enabled`:: Enables or disables `libav` encoder integration.
+
+`-Denable_drm=enabled`:: Enables or disables **DRM/KMS preview rendering**, a preview window used in the absence of a desktop environment.
+
+`-Denable_egl=enabled`:: Enables or disables the non-Qt desktop environment-based preview. Disable if your system lacks a desktop environment.
+
+`-Denable_qt=enabled`:: Enables or disables support for the Qt-based implementation of the preview window. Disable if you do not have a desktop environment installed or if you have no intention of using the Qt-based preview window. The Qt-based preview is normally not recommended because it is computationally very expensive, however it does work with X display forwarding.
+
+`-Denable_opencv=enabled`:: Forces OpenCV-based post-processing stages to link or not link. Requires OpenCV to enable. Defaults to `disabled`.
+
+`-Denable_tflite=enabled`:: Enables or disables TensorFlow Lite post-processing stages. Disabled by default. Requires Tensorflow Lite to enable. Depending on how you have built and/or installed TFLite, you may need to tweak the `meson.build` file in the `post_processing_stages` directory.
+
+`-Denable_hailo=enabled`:: Enables or disables HailoRT-based post-processing stages. Requires HailoRT to enable. Defaults to `auto`.
+
+`-Ddownload_hailo_models=true`:: Downloads and installs models for HailoRT post-processing stages. Requires `wget` to be installed. Defaults to `true`.
+
+
+Each of the above options (except for `neon_flags`) supports the following values:
+
+* `enabled`: enables the option, fails the build if dependencies are not available
+* `disabled`: disables the option
+* `auto`: enables the option if dependencies are available
+
+==== Building `libepoxy`
+
+Rebuilding `libepoxy` should not normally be necessary as this library changes only very rarely. If you do want to build it from scratch, however, please follow the instructions below.
+
+Start by installing the necessary dependencies.
+
+[source,console]
+----
+$ sudo apt install -y libegl1-mesa-dev
+----
+
+Next, download a local copy of the `libepoxy` repository from GitHub:
+
+[source,console]
+----
+$ git clone https://github.com/anholt/libepoxy.git
+----
+
+Navigate into the root directory of the repository:
+
+[source,console]
+----
+$ cd libepoxy
+----
+
+Create a build directory at the root level of the repository, then navigate into that directory:
+
+[source,console]
+----
+$ mkdir _build
+$ cd _build
+----
+
+Next, run `meson` to configure the build environment:
+
+[source,console]
+----
+$ meson
+----
+
+Now, you can build `libexpoxy` with `ninja`:
+
+[source,console]
+----
+$ ninja
+----
+
+Finally, run the following command to install your freshly-built `libepoxy` binary:
+
+[source,console]
+----
+$ sudo ninja install
+----
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_getting_help.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_getting_help.adoc
new file mode 100644
index 000000000..8cf2367bc
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_getting_help.adoc
@@ -0,0 +1,17 @@
+== Getting help
+
+For further help with `libcamera` and the `rpicam-apps`, check the https://forums.raspberrypi.com/viewforum.php?f=43[Raspberry Pi Camera forum]. Before posting:
+
+* Make a note of your operating system version (`uname -a`).
+
+* Make a note of your `libcamera` and `rpicam-apps` versions (`rpicam-hello --version`).
+
+* Report the make and model of the camera module you are using.
+
+* Report the software you are trying to use. We don't support third-party camera module vendor software.
+
+* Report your Raspberry Pi model, including memory size.
+
+* Include any relevant excerpts from the application's console output.
+
+If there are specific problems in the camera software (such as crashes), consider https://github.com/raspberrypi/rpicam-apps[creating an issue in the `rpicam-apps` GitHub repository], including the same details listed above.
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_intro.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_intro.adoc
new file mode 100644
index 000000000..4accca0a8
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_intro.adoc
@@ -0,0 +1,47 @@
+== `rpicam-apps`
+
+[NOTE]
+====
+Raspberry Pi OS _Bookworm_ renamed the camera capture applications from ``libcamera-\*`` to ``rpicam-*``. Symbolic links allow users to use the old names for now. **Adopt the new application names as soon as possible.** Raspberry Pi OS versions prior to _Bookworm_ still use the ``libcamera-*`` name.
+====
+
+Raspberry Pi supplies a small set of example `rpicam-apps`. These CLI applications, built on top of `libcamera`, capture images and video from a camera. These applications include:
+
+* `rpicam-hello`: A "hello world"-equivalent for cameras, which starts a camera preview stream and displays it on the screen.
+* `rpicam-jpeg`: Runs a preview window, then captures high-resolution still images.
+* `rpicam-still`: Emulates many of the features of the original `raspistill` application.
+* `rpicam-vid`: Captures video.
+* `rpicam-raw`: Captures raw (unprocessed Bayer) frames directly from the sensor.
+* `rpicam-detect`: Not built by default, but users can build it if they have TensorFlow Lite installed on their Raspberry Pi. Captures JPEG images when certain objects are detected.
+
+Recent versions of Raspberry Pi OS include the five basic `rpicam-apps`, so you can record images and videos using a camera even on a fresh Raspberry Pi OS installation.
+
+Users can create their own `rpicam`-based applications with custom functionality to suit their own requirements. The https://github.com/raspberrypi/rpicam-apps[`rpicam-apps` source code] is freely available under a BSD-2-Clause licence.
+
+=== `libcamera`
+
+`libcamera` is an open-source software library aimed at supporting camera systems directly from the Linux operating system on Arm processors. Proprietary code running on the Broadcom GPU is minimised. For more information about `libcamera` see the https://libcamera.org[`libcamera` website].
+
+`libcamera` provides a {cpp} API that configures the camera, then allows applications to request image frames. These image buffers reside in system memory and can be passed directly to still image encoders (such as JPEG) or to video encoders (such as H.264). `libcamera` doesn't encode or display images itself: that that functionality, use `rpicam-apps`.
+
+You can find the source code in the https://git.linuxtv.org/libcamera.git/[official libcamera repository]. The Raspberry Pi OS distribution uses a https://github.com/raspberrypi/libcamera.git[fork] to control updates.
+
+Underneath the `libcamera` core, we provide a custom pipeline handler. `libcamera` uses this layer to drive the sensor and image signal processor (ISP) on the Raspberry Pi. `libcamera` contains a collection of image-processing algorithms (IPAs) including auto exposure/gain control (AEC/AGC), auto white balance (AWB), and auto lens-shading correction (ALSC).
+
+Raspberry Pi's implementation of `libcamera` supports the following cameras:
+
+* Official cameras:
+** OV5647 (V1)
+** IMX219 (V2)
+** IMX708 (V3)
+** IMX477 (HQ)
+** IMX500 (AI)
+** IMX296 (GS)
+* Third-party sensors:
+** IMX290
+** IMX327
+** IMX378
+** IMX519
+** OV9281
+
+To extend support to a new sensor, https://git.linuxtv.org/libcamera.git/[contribute to `libcamera`].
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_multicam.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_multicam.adoc
new file mode 100644
index 000000000..fb387443a
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_multicam.adoc
@@ -0,0 +1,68 @@
+=== Use multiple cameras
+
+`rpicam-apps` has basic support for multiple cameras. You can attach multiple cameras to a Raspberry Pi in the following ways:
+
+* For Raspberry Pi Compute Modules, you can connect two cameras directly to a Raspberry Pi Compute Module I/O board. See the xref:../computers/compute-module.adoc#attach-a-camera-module[Compute Module documentation] for further details. With this method, you can _use both cameras simultaneously_.
+* For Raspberry Pi 5, you can connect two cameras directly to the board using the dual MIPI connectors.
+* For other Raspberry Pi devices with a camera port, you can attach two or more cameras with a Video Mux board such as https://www.arducam.com/product/multi-camera-v2-1-adapter-raspberry-pi/[this third-party product]. Since both cameras are attached to a single Unicam port, _only one camera may be used at a time_.
+
+To list all the cameras available on your platform, use the xref:camera_software.adoc#list-cameras[`list-cameras`] option. To choose which camera to use, pass the camera index to the xref:camera_software.adoc#camera[`camera`] option.
+
+NOTE: `libcamera` does not yet provide stereoscopic camera support. When running two cameras simultaneously, they must be run in separate processes, meaning there is no way to synchronise 3A operation between them. As a workaround, you could synchronise the cameras through an external sync signal for the HQ (IMX477) camera or use the software camera synchronisation support that is described below, switching the 3A to manual mode if necessary.
+
+==== Software Camera Synchronisation
+
+Raspberry Pi's _libcamera_ implementation has the ability to synchronise the frames of different cameras using only software. This will cause one camera to adjust it's frame timing so as to coincide as closely as possible with the frames of another camera. No soldering or hardware connections are required, and it will work with all of Raspberry Pi's camera modules, and even third party ones so long as their drivers implement frame duration control correctly.
+
+**How it works**
+
+The scheme works by designating one camera to be the _server_. The server will broadcast timing messages onto the network at regular intervals, such as once a second. Meanwhile other cameras, known as _clients_, can listen to these messages whereupon they may lengthen or shorten frame times slightly so as to pull them into sync with the server. This process is continual, though after the first adjustment, subsequent adjustments are normally small.
+
+The client cameras may be attached to the same Raspberry Pi device as the server, or they may be attached to different Raspberry Pis on the same network. The camera model on the clients may match the server, or they may be different.
+
+Clients and servers need to be set running at the same nominal framerate (such as 30fps). Note that there is no back-channel from the clients back to the server. It is solely the clients' responsibility to be up and running in time to match the server, and the server is completely unaware whether clients have synchronised successfully, or indeed whether there are any clients at all.
+
+In normal operation, running the same make of camera on the same Raspberry Pi, we would expect the frame start times of the camera images to match within "several tens of microseconds". When the camera models are different this could be significantly larger as the cameras will probably not be able to match framerates exactly and will therefore be continually drifting apart (and brought back together with every timing message).
+
+When cameras are on different devices, the system clocks should be synchronised using NTP (normally the case by default for Raspberry Pi OS), or if this is insufficiently precise, another protocol like PTP might be used. Any discrepancy between system clocks will feed directly into extra error in frame start times - even though the advertised timestamps on the frames will not tell you.
+
+**The Server**
+
+The server, as previously explained, broadcasts timing messages onto the network, by default every second. The server will run for a fixed number of frames, by default 100, after which it will inform the camera application on the device that the "synchronisation point" has been reached. At this moment, the application will start using the frames, so in the case of `rpicam-vid`, they will start being encoded and recorded. Recall that the behaviour and even existence of clients has no bearing on this.
+
+If required, there can be several servers on the same network so long as they are broadcasting timing messages to different network addresses. Clients, of course, will have to be configured to listen for the correct address.
+
+**Clients**
+
+Clients listen out for server timing messages and, when they receive one, will shorten or lengthen a camera frame duration by the required amount so that subsequent frames will start, as far as possible, at the same moment as the server's.
+
+The clients learn the correct "synchronisation point" from the server's messages, and just like the server, will signal the camera application at the same moment that it should start using the frames. So in the case of `rpicam-vid`, this is once again the moment at which frames will start being recorded.
+
+Normally it makes sense to start clients _before_ the server, as the clients will simply wait (the "synchronisation point" has not been reached) until a server is seen broadcasting onto the network. This obviously avoids timing problems where a server might reach its "synchronisation point" even before all the clients have been started!
+
+**Usage in `rpicam-vid`**
+
+We can use software camera synchronisation with `rpicam-vid` to record videos that are synchronised frame-by-frame. We're going to assume we have two cameras attached, and we're going to use camera 0 as the server, and camera 1 as the client. `rpicam-vid` defaults to a fixed 30 frames per second, which will be fine for us.
+
+First we should start the client:
+[source,console]
+----
+$ rpicam-vid -n -t 20s --camera 1 --codec libav -o client.mp4 --sync client
+----
+
+Note the `--sync client` parameter. This will record for 20 seconds but _only_ once the synchronisation point has been reached. If necessary, it will wait indefinitely for the first server message.
+
+To start the server:
+[source,console]
+----
+$ rpicam-vid -n -t 20s --camera 0 --codec libav -o server.mp4 --sync server
+----
+
+This too will run for 20 seconds counting from when the synchronisation point is reached and the recording starts. With the default synchronisation settings (100 frames at 30fps) this means there will be just over 3 seconds for clients to get synchronised.
+
+The server's broadcast address and port, the frequency of the timing messages and the number of frames to wait for clients to synchronise, can all be changed in the camera tuning file. Clients only pay attention to the broadcast address here which should match the server's; the other information will be ignored. Please refer to the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Raspberry Pi Camera tuning guide] for more information.
+
+In practical operation there are a few final points to be aware of:
+
+* The fixed framerate needs to be below the maximum framerate at which the camera can operate (in the camera mode that is being used). This is because the synchronisation algorithm may need to _shorten_ camera frames so that clients can catch up with the server, and this will fail if it is already running as fast as it can.
+* Whilst camera frames should be correctly synchronised, at higher framerates or depending on system load, it is possible for frames, either on the clients or server, to be dropped. In these cases the frame timestamps will help an application to work out what has happened, though it's usually simpler to try and avoid frame drops - perhaps by lowering the framerate, increasing the number of buffers being allocated to the camera queues (see the xref:camera_software.adoc#buffer-count[`--buffer-count` option]), or reducing system load.
\ No newline at end of file
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_packages.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_packages.adoc
new file mode 100644
index 000000000..031fcc44e
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_packages.adoc
@@ -0,0 +1,15 @@
+=== Install `libcamera` and `rpicam-apps`
+
+Raspberry Pi provides two `rpicam-apps` packages:
+
+* `rpicam-apps` contains full applications with support for previews using a desktop environment. This package is pre-installed in Raspberry Pi OS.
+
+* `rpicam-apps-lite` omits desktop environment support, and only makes the DRM preview available. This package is pre-installed in Raspberry Pi OS Lite.
+
+==== Dependencies
+
+`rpicam-apps` depends on library packages named `library-name`, where `` is the ABI version. Your package manager should install these automatically.
+
+==== Dev packages
+
+You can rebuild `rpicam-apps` without building `libcamera` and `libepoxy` from scratch. For more information, see xref:camera_software.adoc#building-rpicam-apps-without-building-libcamera[Building `rpicam-apps` without rebuilding `libcamera`].
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_post_processing.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_post_processing.adoc
new file mode 100644
index 000000000..339828d50
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_post_processing.adoc
@@ -0,0 +1,243 @@
+== Post-processing with `rpicam-apps`
+
+`rpicam-apps` share a common post-processing framework. This allows them to pass the images received from the camera system through a number of custom image-processing and image-analysis routines. Each such routine is known as a _stage_. To run post-processing stages, supply a JSON file instructing the application which stages and options to apply. You can find example JSON files that use the built-in post-processing stages in the https://github.com/raspberrypi/rpicam-apps/tree/main/assets[`assets` folder of the `rpicam-apps` repository].
+
+For example, the **negate** stage turns light pixels dark and dark pixels light. Because the negate stage is basic, requiring no configuration, `negate.json` just names the stage:
+
+[source,json]
+----
+{
+ "negate": {}
+}
+----
+
+To apply the negate stage to an image, pass `negate.json` to the `post-process-file` option:
+
+[source,console]
+----
+$ rpicam-hello --post-process-file negate.json
+----
+
+To run multiple post-processing stages, create a JSON file that contains multiple stages as top-level keys. For example, to the following configuration runs the Sobel stage, then the negate stage:
+
+[source,json]
+----
+{
+ "sobel_cv":
+ {
+ "ksize": 5
+ },
+ "negate": {}
+}
+----
+
+The xref:camera_software.adoc#sobel_cv-stage[Sobel stage] uses OpenCV, hence the `cv` suffix. It has a user-configurable parameter, `ksize`, that specifies the kernel size of the filter to be used. In this case, the Sobel filter produces bright edges on a black background, and the negate stage turns this into dark edges on a white background.
+
+.A negated Sobel filter.
+image::images/sobel_negate.jpg[A negated Sobel filter]
+
+Some stages, such as `negate`, alter the image in some way. Other stages analyse the image to generate metadata. Post-processing stages can pass this metadata to other stages and even the application.
+
+To improve performance, image analysis often uses reduced resolution. `rpicam-apps` provide a dedicated low-resolution feed directly from the ISP.
+
+NOTE: The `rpicam-apps` supplied with Raspberry Pi OS do not include OpenCV and TensorFlow Lite. As a result, certain post-processing stages that rely on them are disabled. To use these stages, xref:camera_software.adoc#build-libcamera-and-rpicam-apps[re-compile `rpicam-apps`]. On a Raspberry Pi 3 or 4 running a 32-bit kernel, compile with the `-DENABLE_COMPILE_FLAGS_FOR_TARGET=armv8-neon` flag to speed up certain stages.
+
+=== Built-in stages
+
+==== `negate` stage
+
+This stage turns light pixels dark and dark pixels light.
+
+The `negate` stage has no user-configurable parameters.
+
+Default `negate.json` file:
+
+[source,json]
+----
+{
+ "negate" : {}
+}
+----
+
+Run the following command to use this stage file with `rpicam-hello`:
+
+[source,console]
+----
+$ rpicam-hello --post-process-file negate.json
+----
+
+Example output:
+
+.A negated image.
+image::images/negate.jpg[A negated image]
+
+==== `hdr` stage
+
+This stage emphasises details in images using High Dynamic Range (HDR) and Dynamic Range Compression (DRC). DRC uses a single image, while HDR combines multiple images for a similar result.
+
+Parameters fall into three groups: the LP filter, global tonemapping, and local contrast.
+
+This stage applies a smoothing filter to the fully-processed input images to generate a low pass (LP) image. It then generates the high pass (HP) image from the diff of the original and LP images. Then, it applies a global tonemap to the LP image and adds it back to the HP image. This process helps preserve local contrast.
+
+You can configure this stage with the following parameters:
+
+[cols="1,3a"]
+|===
+| `num_frames`
+| The number of frames to accumulate; for DRC, use 1; for HDR, try 8
+| `lp_filter_strength`
+| The coefficient of the low pass IIR filter.
+| `lp_filter_threshold`
+| A piecewise linear function that relates pixel level to the threshold of meaningful detail
+| `global_tonemap_points`
+| Points in the input image histogram mapped to targets in the output range where we wish to move them. Uses the following sub-configuration:
+
+* an inter-quantile mean (`q` and `width`)
+* a target as a proportion of the full output range (`target`)
+* maximum (`max_up`) and minimum (`max_down`) gains to move the measured inter-quantile mean, to prevents the image from changing image too drastically
+| `global_tonemap_strength`
+| Strength of application of the global tonemap
+| `local_pos_strength`
+| A piecewise linear function that defines the gain applied to local contrast when added back to the tonemapped LP image, for positive (bright) detail
+| `local_neg_strength`
+| A piecewise linear function that defines the gain applied to local contrast when added back to the tonemapped LP image, for negative (dark) detail
+| `local_tonemap_strength`
+| An overall gain applied to all local contrast that is added back
+| `local_colour_scale`
+| A factor that allows the output colours to be affected more or less strongly
+|===
+
+To control processing strength, changing the `global_tonemap_strength` and `local_tonemap_strength` parameters.
+
+Processing a single image takes between two and three seconds for a 12MP image on a Raspberry Pi 4. When accumulating multiple frames, this stage sends only the processed image to the application.
+
+Default `drc.json` file for DRC:
+
+[source,json]
+----
+{
+ "hdr" : {
+ "num_frames" : 1,
+ "lp_filter_strength" : 0.2,
+ "lp_filter_threshold" : [ 0, 10.0 , 2048, 205.0, 4095, 205.0 ],
+ "global_tonemap_points" :
+ [
+ { "q": 0.1, "width": 0.05, "target": 0.15, "max_up": 1.5, "max_down": 0.7 },
+ { "q": 0.5, "width": 0.05, "target": 0.5, "max_up": 1.5, "max_down": 0.7 },
+ { "q": 0.8, "width": 0.05, "target": 0.8, "max_up": 1.5, "max_down": 0.7 }
+ ],
+ "global_tonemap_strength" : 1.0,
+ "local_pos_strength" : [ 0, 6.0, 1024, 2.0, 4095, 2.0 ],
+ "local_neg_strength" : [ 0, 4.0, 1024, 1.5, 4095, 1.5 ],
+ "local_tonemap_strength" : 1.0,
+ "local_colour_scale" : 0.9
+ }
+}
+----
+
+Example:
+
+.Image without DRC processing
+image::images/nodrc.jpg[Image without DRC processing]
+
+Run the following command to use this stage file with `rpicam-still`:
+
+[source,console]
+----
+$ rpicam-still -o test.jpg --post-process-file drc.json
+----
+
+.Image with DRC processing
+image::images/drc.jpg[Image with DRC processing]
+
+Default `hdr.json` file for HDR:
+
+[source,json]
+----
+{
+ "hdr" : {
+ "num_frames" : 8,
+ "lp_filter_strength" : 0.2,
+ "lp_filter_threshold" : [ 0, 10.0 , 2048, 205.0, 4095, 205.0 ],
+ "global_tonemap_points" :
+ [
+ { "q": 0.1, "width": 0.05, "target": 0.15, "max_up": 5.0, "max_down": 0.5 },
+ { "q": 0.5, "width": 0.05, "target": 0.45, "max_up": 5.0, "max_down": 0.5 },
+ { "q": 0.8, "width": 0.05, "target": 0.7, "max_up": 5.0, "max_down": 0.5 }
+ ],
+ "global_tonemap_strength" : 1.0,
+ "local_pos_strength" : [ 0, 6.0, 1024, 2.0, 4095, 2.0 ],
+ "local_neg_strength" : [ 0, 4.0, 1024, 1.5, 4095, 1.5 ],
+ "local_tonemap_strength" : 1.0,
+ "local_colour_scale" : 0.8
+ }
+}
+----
+
+Example:
+
+.Image without HDR processing
+image::images/nohdr.jpg[Image without HDR processing]
+
+Run the following command to use this stage file with `rpicam-still`:
+
+[source,console]
+----
+$ rpicam-still -o test.jpg --ev -2 --denoise cdn_off --post-process-file hdr.json
+----
+
+.Image with HDR processing
+image::images/hdr.jpg[Image with DRC processing]
+
+==== `motion_detect` stage
+
+The `motion_detect` stage analyses frames from the low-resolution image stream. You must configure the low-resolution stream to use this stage. The stage detects motion by comparing a region of interest (ROI) in the frame to the corresponding part of a previous frame. If enough pixels change between frames, this stage indicates the motion in metadata under the `motion_detect.result` key.
+
+This stage has no dependencies on third-party libraries.
+
+You can configure this stage with the following parameters, passing dimensions as a proportion of the low-resolution image size between 0 and 1:
+
+[cols="1,3"]
+|===
+| `roi_x` | x-offset of the region of interest for the comparison (proportion between 0 and 1)
+| `roi_y` | y-offset of the region of interest for the comparison (proportion between 0 and 1)
+| `roi_width` | Width of the region of interest for the comparison (proportion between 0 and 1)
+| `roi_height` | Height of the region of interest for the comparison (proportion between 0 and 1)
+| `difference_m` | Linear coefficient used to construct the threshold for pixels being different
+| `difference_c` | Constant coefficient used to construct the threshold for pixels being different according to `threshold = difference_m * pixel_value + difference_c`
+| `frame_period` | The motion detector will run only this many frames
+| `hskip` | The pixel subsampled by this amount horizontally
+| `vksip` | The pixel subsampled by this amount vertically
+| `region_threshold` | The proportion of pixels (regions) which must be categorised as different for them to count as motion
+| `verbose` | Print messages to the console, including when the motion status changes
+|===
+
+Default `motion_detect.json` configuration file:
+
+[source,json]
+----
+{
+ "motion_detect" : {
+ "roi_x" : 0.1,
+ "roi_y" : 0.1,
+ "roi_width" : 0.8,
+ "roi_height" : 0.8,
+ "difference_m" : 0.1,
+ "difference_c" : 10,
+ "region_threshold" : 0.005,
+ "frame_period" : 5,
+ "hskip" : 2,
+ "vskip" : 2,
+ "verbose" : 0
+ }
+}
+----
+
+Adjust the differences and the threshold to make the algorithm more or less sensitive. To improve performance, use the `hskip` and `vskip` parameters.
+
+Run the following command to use this stage file with `rpicam-hello`:
+
+[source,console]
+----
+$ rpicam-hello --lores-width 128 --lores-height 96 --post-process-file motion_detect.json
+----
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_post_processing_opencv.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_post_processing_opencv.adoc
new file mode 100644
index 000000000..787393e96
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_post_processing_opencv.adoc
@@ -0,0 +1,120 @@
+=== Post-processing with OpenCV
+
+NOTE: These stages require an OpenCV installation. You may need to xref:camera_software.adoc#build-libcamera-and-rpicam-apps[rebuild `rpicam-apps` with OpenCV support].
+
+==== `sobel_cv` stage
+
+This stage applies a https://en.wikipedia.org/wiki/Sobel_operator[Sobel filter] to an image to emphasise edges.
+
+You can configure this stage with the following parameters:
+
+[cols="1,3"]
+|===
+| `ksize` | Kernel size of the Sobel filter
+|===
+
+
+Default `sobel_cv.json` file:
+
+[source,json]
+----
+{
+ "sobel_cv" : {
+ "ksize": 5
+ }
+}
+----
+
+Example:
+
+.Using a Sobel filter to emphasise edges.
+image::images/sobel.jpg[Using a Sobel filter to emphasise edges]
+
+==== `face_detect_cv` stage
+
+This stage uses the OpenCV Haar classifier to detect faces in an image. It returns face location metadata under the key `face_detect.results` and optionally draws the locations on the image.
+
+You can configure this stage with the following parameters:
+
+[cols=",3]
+|===
+| `cascade_name` | Name of the file where the Haar cascade can be found
+| `scaling_factor` | Determines range of scales at which the image is searched for faces
+| `min_neighbors` | Minimum number of overlapping neighbours required to count as a face
+| `min_size` | Minimum face size
+| `max_size` | Maximum face size
+| `refresh_rate` | How many frames to wait before trying to re-run the face detector
+| `draw_features` | Whether to draw face locations on the returned image
+|===
+
+The `face_detect_cv` stage runs only during preview and video capture. It ignores still image capture. It runs on the low resolution stream with a resolution between 320×240 and 640×480 pixels.
+
+Default `face_detect_cv.json` file:
+
+[source,json]
+----
+{
+ "face_detect_cv" : {
+ "cascade_name" : "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml",
+ "scaling_factor" : 1.1,
+ "min_neighbors" : 2,
+ "min_size" : 32,
+ "max_size" : 256,
+ "refresh_rate" : 1,
+ "draw_features" : 1
+ }
+}
+----
+
+Example:
+
+.Drawing detected faces onto an image.
+image::images/face_detect.jpg[Drawing detected faces onto an image]
+
+==== `annotate_cv` stage
+
+This stage writes text into the top corner of images using the same `%` substitutions as the xref:camera_software.adoc#info-text[`info-text`] option.
+
+Interprets xref:camera_software.adoc#info-text[`info-text` directives] first, then passes any remaining tokens to https://www.man7.org/linux/man-pages/man3/strftime.3.html[`strftime`].
+
+For example, to achieve a datetime stamp on the video, pass `%F %T %z`:
+
+* `%F` displays the ISO-8601 date (2023-03-07)
+* `%T` displays 24h local time (e.g. "09:57:12")
+* `%z` displays the timezone relative to UTC (e.g. "-0800")
+
+This stage does not output any metadata, but it writes metadata found in `annotate.text` in place of anything in the JSON configuration file. This allows other post-processing stages to write text onto images.
+
+You can configure this stage with the following parameters:
+
+[cols="1,3"]
+|===
+| `text` | The text string to be written
+| `fg` | Foreground colour
+| `bg` | Background colour
+| `scale` | A number proportional to the size of the text
+| `thickness` | A number that determines the thickness of the text
+| `alpha` | The amount of alpha to apply when overwriting background pixels
+|===
+
+Default `annotate_cv.json` file:
+
+[source,json]
+----
+{
+ "annotate_cv" : {
+ "text" : "Frame %frame exp %exp ag %ag dg %dg",
+ "fg" : 255,
+ "bg" : 0,
+ "scale" : 1.0,
+ "thickness" : 2,
+ "alpha" : 0.3
+ }
+}
+----
+
+Example:
+
+.Writing camera and date information onto an image with annotations.
+image::images/annotate.jpg[Writing camera and date information onto an image with annotations]
+
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_post_processing_tflite.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_post_processing_tflite.adoc
new file mode 100644
index 000000000..39d607f5e
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_post_processing_tflite.adoc
@@ -0,0 +1,220 @@
+=== Post-Processing with TensorFlow Lite
+
+==== Prerequisites
+
+These stages require TensorFlow Lite (TFLite) libraries that export the {cpp} API. TFLite doesn't distribute libraries in this form, but you can download and install a version that exports the API from https://lindevs.com/install-precompiled-tensorflow-lite-on-raspberry-pi/[lindevs.com].
+
+After installing, you must xref:camera_software.adoc#build-libcamera-and-rpicam-apps[recompile `rpicam-apps` with TensorFlow Lite support].
+
+==== `object_classify_tf` stage
+
+Download: https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz[]
+
+`object_classify_tf` uses a Google MobileNet v1 model to classify objects in the camera image. This stage requires a https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_1.0_224_frozen.tgz[`labels.txt` file].
+
+You can configure this stage with the following parameters:
+
+[cols="1,3"]
+|===
+| `top_n_results` | The number of results to show
+| `refresh_rate` | The number of frames that must elapse between model runs
+| `threshold_high` | Confidence threshold (between 0 and 1) where objects are considered as being present
+| `threshold_low` | Confidence threshold which objects must drop below before being discarded as matches
+| `model_file` | Filepath of the TFLite model file
+| `labels_file` | Filepath of the file containing the object labels
+| `display_labels` | Whether to display the object labels on the image; inserts `annotate.text` metadata for the `annotate_cv` stage to render
+| `verbose` | Output more information to the console
+|===
+
+Example `object_classify_tf.json` file:
+
+[source,json]
+----
+{
+ "object_classify_tf" : {
+ "top_n_results" : 2,
+ "refresh_rate" : 30,
+ "threshold_high" : 0.6,
+ "threshold_low" : 0.4,
+ "model_file" : "/home//models/mobilenet_v1_1.0_224_quant.tflite",
+ "labels_file" : "/home//models/labels.txt",
+ "display_labels" : 1
+ },
+ "annotate_cv" : {
+ "text" : "",
+ "fg" : 255,
+ "bg" : 0,
+ "scale" : 1.0,
+ "thickness" : 2,
+ "alpha" : 0.3
+ }
+}
+----
+
+The stage operates on a low resolution stream image of size 224×224.
+Run the following command to use this stage file with `rpicam-hello`:
+
+[source,console]
+----
+$ rpicam-hello --post-process-file object_classify_tf.json --lores-width 224 --lores-height 224
+----
+
+.Object classification of a desktop computer and monitor.
+image::images/classify.jpg[Object classification of a desktop computer and monitor]
+
+==== `pose_estimation_tf` stage
+
+Download: https://github.com/Qengineering/TensorFlow_Lite_Pose_RPi_32-bits[]
+
+`pose_estimation_tf` uses a Google MobileNet v1 model to detect pose information.
+
+You can configure this stage with the following parameters:
+
+[cols="1,3"]
+|===
+| `refresh_rate` | The number of frames that must elapse between model runs
+| `model_file` | Filepath of the TFLite model file
+| `verbose` | Output extra information to the console
+|===
+
+Use the separate `plot_pose_cv` stage to draw the detected pose onto the main image.
+
+You can configure the `plot_pose_cv` stage with the following parameters:
+
+[cols="1,3"]
+|===
+| `confidence_threshold` | Confidence threshold determining how much to draw; can be less than zero
+|===
+
+Example `pose_estimation_tf.json` file:
+
+[source,json]
+----
+{
+ "pose_estimation_tf" : {
+ "refresh_rate" : 5,
+ "model_file" : "posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite"
+ },
+ "plot_pose_cv" : {
+ "confidence_threshold" : -0.5
+ }
+}
+----
+
+The stage operates on a low resolution stream image of size 257×257. **Because YUV420 images must have even dimensions, round up to 258×258 for YUV420 images.**
+
+Run the following command to use this stage file with `rpicam-hello`:
+
+[source,console]
+----
+$ rpicam-hello --post-process-file pose_estimation_tf.json --lores-width 258 --lores-height 258
+----
+
+.Pose estimation of an adult human male.
+image::images/pose.jpg[Pose estimation of an adult human male]
+
+==== `object_detect_tf` stage
+
+Download: https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip[]
+
+`object_detect_tf` uses a Google MobileNet v1 SSD (Single Shot Detector) model to detect and label objects.
+
+You can configure this stage with the following parameters:
+
+[cols="1,3"]
+|===
+| `refresh_rate` | The number of frames that must elapse between model runs
+| `model_file` | Filepath of the TFLite model file
+| `labels_file` | Filepath of the file containing the list of labels
+| `confidence_threshold` | Confidence threshold before accepting a match
+| `overlap_threshold` | Determines the amount of overlap between matches for them to be merged as a single match.
+| `verbose` | Output extra information to the console
+|===
+
+Use the separate `object_detect_draw_cv` stage to draw the detected objects onto the main image.
+
+You can configure the `object_detect_draw_cv` stage with the following parameters:
+
+[cols="1,3"]
+|===
+| `line_thickness` | Thickness of the bounding box lines
+| `font_size` | Size of the font used for the label
+|===
+
+Example `object_detect_tf.json` file:
+
+[source,json]
+----
+{
+ "object_detect_tf" : {
+ "number_of_threads" : 2,
+ "refresh_rate" : 10,
+ "confidence_threshold" : 0.5,
+ "overlap_threshold" : 0.5,
+ "model_file" : "/home//models/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29/detect.tflite",
+ "labels_file" : "/home//models/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29/labelmap.txt",
+ "verbose" : 1
+ },
+ "object_detect_draw_cv" : {
+ "line_thickness" : 2
+ }
+}
+----
+
+The stage operates on a low resolution stream image of size 300×300. Run the following command, which passes a 300×300 crop to the detector from the centre of the 400×300 low resolution image, to use this stage file with `rpicam-hello`:
+
+[source,console]
+----
+$ rpicam-hello --post-process-file object_detect_tf.json --lores-width 400 --lores-height 300
+----
+
+.Detecting apple and cat objects.
+image::images/detection.jpg[Detecting apple and cat objects]
+
+==== `segmentation_tf` stage
+
+Download: https://tfhub.dev/tensorflow/lite-model/deeplabv3/1/metadata/2?lite-format=tflite[]
+
+`segmentation_tf` uses a Google MobileNet v1 model. This stage requires a label file, found at the `assets/segmentation_labels.txt`.
+
+This stage runs on an image of size 257×257. Because YUV420 images must have even dimensions, the low resolution image should be at least 258 pixels in both width and height. The stage adds a vector of 257×257 values to the image metadata where each value indicates the categories a pixel belongs to. You can optionally draw a representation of the segmentation into the bottom right corner of the image.
+
+You can configure this stage with the following parameters:
+
+[cols="1,3"]
+|===
+| `refresh_rate` | The number of frames that must elapse between model runs
+| `model_file` | Filepath of the TFLite model file
+| `labels_file` | Filepath of the file containing the list of labels
+| `threshold` | When verbose is set, prints when the number of pixels with any label exceeds this number
+| `draw` | Draws the segmentation map into the bottom right hand corner of the image
+| `verbose` | Output extra information to the console
+|===
+
+Example `segmentation_tf.json` file:
+
+[source,json]
+----
+{
+ "segmentation_tf" : {
+ "number_of_threads" : 2,
+ "refresh_rate" : 10,
+ "model_file" : "/home//models/lite-model_deeplabv3_1_metadata_2.tflite",
+ "labels_file" : "/home//models/segmentation_labels.txt",
+ "draw" : 1,
+ "verbose" : 1
+ }
+}
+----
+
+This example takes a camera image and reduces it to 258×258 pixels in size. This stage even works when squashing a non-square image without cropping. This example enables the segmentation map in the bottom right hand corner.
+
+Run the following command to use this stage file with `rpicam-hello`:
+
+[source,console]
+----
+$ rpicam-hello --post-process-file segmentation_tf.json --lores-width 258 --lores-height 258 --viewfinder-width 1024 --viewfinder-height 1024
+----
+
+.Running segmentation and displaying the results on a map in the bottom right.
+image::images/segmentation.jpg[Running segmentation and displaying the results on a map in the bottom right]
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_post_processing_writing.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_post_processing_writing.adoc
new file mode 100644
index 000000000..b010133f3
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_post_processing_writing.adoc
@@ -0,0 +1,51 @@
+=== Write your own post-processing stages
+
+With the `rpicam-apps` post-processing framework, users can create their own custom post-processing stages. You can even include algorithms and routines from OpenCV and TensorFlow Lite.
+
+==== Basic post-processing stages
+
+To create your own post-processing stage, derive a new class from the `PostProcessingStage` class.
+All post-processing stages must implement the following member functions:
+
+`char const *Name() const`:: Returns the name of the stage. Matched against stages listed in the JSON post-processing configuration file.
+`void Read(boost::property_tree::ptree const ¶ms)`:: Reads the stage's configuration parameters from a provided JSON file.
+`void AdjustConfig(std::string const &use_case, StreamConfiguration *config)`:: Gives stages a chance to influence the configuration of the camera. Frequently empty for stages with no need to configure the camera.
+`void Configure()`:: Called just after the camera has been configured to allocate resources and check that the stage has access to necessary streams.
+`void Start()`:: Called when the camera starts. Frequently empty for stages with no need to configure the camera.
+`bool Process(CompletedRequest &completed_request)`:: Presents completed camera requests for post-processing. This is where you'll implement pixel manipulations and image analysis. Returns `true` if the post-processing framework should **not** deliver this request to the application.
+`void Stop()`:: Called when the camera stops. Used to shut down any active processing on asynchronous threads.
+`void Teardown()`:: Called when the camera configuration is destroyed. Use this as a deconstructor where you can de-allocate resources set up in the `Configure` method.
+
+In any stage implementation, call `RegisterStage` to register your stage with the system.
+
+Don't forget to add your stage to `meson.build` in the post-processing folder.
+When writing your own stages, keep these tips in mind:
+
+* The `Process` method blocks the imaging pipeline. If it takes too long, the pipeline will stutter. **Always delegate time-consuming algorithms to an asynchronous thread.**
+
+* When delegating work to another thread, you must copy the image buffers. For applications like image analysis that don't require full resolution, try using a low-resolution image stream.
+
+* The post-processing framework _uses parallelism to process every frame_. This improves throughput. However, some OpenCV and TensorFlow Lite functions introduce another layer of parallelism _within_ each frame. Consider serialising calls within each frame since post-processing already takes advantage of multiple threads.
+
+* Most streams, including the low resolution stream, use the YUV420 format. You may need to convert this to another format for certain OpenCV or TFLite functions.
+
+* For the best performance, always alter images in-place.
+
+For a basic example, see https://github.com/raspberrypi/rpicam-apps/blob/main/post_processing_stages/negate_stage.cpp[`negate_stage.cpp`]. This stage negates an image by turning light pixels dark and dark pixels light. This stage is mostly derived class boiler-plate, achieving the negation logic in barely half a dozen lines of code.
+
+For another example, see https://github.com/raspberrypi/rpicam-apps/blob/main/post_processing_stages/sobel_cv_stage.cpp[`sobel_cv_stage.cpp`], which implements a Sobel filter in just a few lines of OpenCV functions.
+
+==== TensorFlow Lite stages
+
+For stages that use TensorFlow Lite (TFLite), derive a new class from the `TfStage` class.
+This class delegates model execution to a separate thread to prevent camera stuttering.
+
+The `TfStage` class implements all the `PostProcessingStage` member functions post-processing stages must normally implement, _except for_ ``Name``.
+All `TfStage`-derived stages must implement the ``Name`` function, and should implement some or all of the following virtual member functions:
+
+`void readExtras()`:: The base class reads the named model and certain other parameters like the `refresh_rate`. Use this function this to read extra parameters for the derived stage and check that the loaded model is correct (e.g. has right input and output dimensions).
+`void checkConfiguration()`:: The base class fetches the low resolution stream that TFLite operates on and the full resolution stream in case the derived stage needs it. Use this function to check for the streams required by your stage. If your stage can't access one of the required streams, you might skip processing or throw an error.
+`void interpretOutputs()`:: Use this function to read and interpret the model output. _Runs in the same thread as the model when the model completes_.
+`void applyResults()`:: Use this function to apply results of the model (could be several frames old) to the current frame. Typically involves attaching metadata or drawing. _Runs in the main thread, before frames are delivered_.
+
+For an example implementation, see the https://github.com/raspberrypi/rpicam-apps/blob/main/post_processing_stages/object_classify_tf_stage.cpp[`object_classify_tf_stage.cpp`] and https://github.com/raspberrypi/rpicam-apps/blob/main/post_processing_stages/pose_estimation_tf_stage.cpp[`pose_estimation_tf_stage.cpp`].
diff --git a/documentation/asciidoc/computers/camera/rpicam_apps_writing.adoc b/documentation/asciidoc/computers/camera/rpicam_apps_writing.adoc
new file mode 100644
index 000000000..fd5a9217b
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_apps_writing.adoc
@@ -0,0 +1,59 @@
+=== Write your own `rpicam` apps
+
+`rpicam-apps` does not provide all of the camera-related features that anyone could ever need. Instead, these applications are small and flexible. Users who require different behaviour can implement it themselves.
+
+All of the `rpicam-apps` use an event loop that receives messages when a new set of frames arrives from the camera system. This set of frames is called a `CompletedRequest`. The `CompletedRequest` contains:
+
+* all images derived from that single camera frame: often a low-resolution image and a full-size output
+* metadata from the camera and post-processing systems
+
+==== `rpicam-hello`
+
+`rpicam-hello` is the smallest application, and the best place to start understanding `rpicam-apps` design. It extracts the `CompletedRequestPtr`, a shared pointer to the `CompletedRequest`, from the message, and forwards it to the preview window:
+
+[cpp]
+----
+CompletedRequestPtr &completed_request = std::get(msg.payload);
+app.ShowPreview(completed_request, app.ViewfinderStream());
+----
+
+Every `CompletedRequest` must be recycled back to the camera system so that the buffers can be reused. Otherwise, the camera runs out of buffers for new camera frames. This recycling process happens automatically when no references to the `CompletedRequest` remain using {cpp}'s _shared pointer_ and _custom deleter_ mechanisms.
+
+As a result, `rpicam-hello` must complete the following actions to recycle the buffer space:
+
+* The event loop must finish a cycle so the message (`msg` in the code), which holds a reference to `CompletedRequest`, can be replaced with the next message. This discards the reference to the previous message.
+
+* When the event thread calls `ShowPreview`, it passes the preview thread a reference to the `CompletedRequest`. The preview thread discards the last `CompletedRequest` instance each time `ShowPreview` is called.
+
+==== `rpicam-vid`
+
+`rpicam-vid` is similar to `rpicam-hello` with encoding added to the event loop. Before the event loop starts, `rpicam-vid` configures the encoder with a callback. The callback handles the buffer containing the encoded image data. In the code below, we send the buffer to the `Output` object. `Output` could write it to a file or stream it, depending on the options specified.
+
+[cpp]
+----
+app.SetEncodeOutputReadyCallback(std::bind(&Output::OutputReady, output.get(), _1, _2, _3, _4));
+----
+
+Because this code passes the encoder a reference to the `CompletedRequest`, `rpicam-vid` can't recycle buffer data until the event loop, preview window, _and_ encoder all discard their references.
+
+==== `rpicam-raw`
+
+`rpicam-raw` is similar to `rpicam-vid`. It also encodes during the event loop. However, `rpicam-raw` uses a dummy encoder called the `NullEncoder`. This uses the input image as the output buffer instead of encoding it with a codec. `NullEncoder` only discards its reference to the buffer once the output callback completes. This guarantees that the buffer isn't recycled before the callback processes the image.
+
+`rpicam-raw` doesn't forward anything to the preview window.
+
+`NullEncoder` is possibly overkill in `rpicam-raw`. We could probably send images straight to the `Output` object, instead. However, `rpicam-apps` need to limit work in the event loop. `NullEncoder` demonstrates how you can handle most processes (even holding onto a reference) in other threads.
+
+==== `rpicam-jpeg`
+
+`rpicam-jpeg` starts the camera in preview mode in the usual way. When the timer completes, it stops the preview and switches to still capture:
+
+[cpp]
+----
+app.StopCamera();
+app.Teardown();
+app.ConfigureStill();
+app.StartCamera();
+----
+
+The event loop grabs the first frame returned from still mode and saves this as a JPEG.
diff --git a/documentation/asciidoc/computers/camera/rpicam_configuration.adoc b/documentation/asciidoc/computers/camera/rpicam_configuration.adoc
new file mode 100644
index 000000000..c36db3f69
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_configuration.adoc
@@ -0,0 +1,57 @@
+=== Configuration
+
+Most use cases work automatically with no need to alter the camera configuration. However, some common use cases do require configuration tweaks, including:
+
+* Third-party cameras (the manufacturer's instructions should explain necessary configuration changes, if any)
+
+* Using a non-standard driver or overlay with an official Raspberry Pi camera
+
+Raspberry Pi OS recognises the following overlays in `/boot/firmware/config.txt`.
+
+|===
+| Camera Module | In `/boot/firmware/config.txt`
+
+| V1 camera (OV5647)
+| `dtoverlay=ov5647`
+
+| V2 camera (IMX219)
+| `dtoverlay=imx219`
+
+| HQ camera (IMX477)
+| `dtoverlay=imx477`
+
+| GS camera (IMX296)
+| `dtoverlay=imx296`
+
+| Camera Module 3 (IMX708)
+| `dtoverlay=imx708`
+
+| IMX290 and IMX327
+| `dtoverlay=imx290,clock-frequency=74250000` or `dtoverlay=imx290,clock-frequency=37125000` (both modules share the imx290 kernel driver; refer to instructions from the module vendor for the correct frequency)
+
+| IMX378
+| `dtoverlay=imx378`
+
+| OV9281
+| `dtoverlay=ov9281`
+|===
+
+To use one of these overlays, you must disable automatic camera detection. To disable automatic detection, set `camera_auto_detect=0` in `/boot/firmware/config.txt`. If `config.txt` already contains a line assigning an `camera_auto_detect` value, change the value to `0`. Reboot your Raspberry Pi with `sudo reboot` to load your changes.
+
+If your Raspberry Pi has two camera connectors (Raspberry Pi 5 or one of the Compute Modules, for example), then you can specify the use of camera connector 0 by adding `,cam0` to the `dtoverlay` that you used from the table above. If you do not add this, it will default to checking camera connector 1. Note that for official Raspberry Pi camera modules connected to SBCs (not Compute Modules), auto-detection will correctly identify all the cameras connected to your device.
+
+[[tuning-files]]
+==== Tweak camera behaviour with tuning files
+
+Raspberry Pi's `libcamera` implementation includes a **tuning file** for each camera. This file controls algorithms and hardware to produce the best image quality. `libcamera` can only determine the sensor in use, not the module. As a result, some modules require a tuning file override. Use the xref:camera_software.adoc#tuning-file[`tuning-file`] option to specify an override. You can also copy and alter existing tuning files to customise camera behaviour.
+
+For example, the no-IR-filter (NoIR) versions of sensors use Auto White Balance (AWB) settings different from the standard versions. On a Raspberry Pi 5 or later, you can specify the the NoIR tuning file for the IMX219 sensor with the following command:
+
+[source,console]
+----
+$ rpicam-hello --tuning-file /usr/share/libcamera/ipa/rpi/pisp/imx219_noir.json
+----
+
+NOTE: Raspberry Pi models prior to Raspberry Pi 5 use different tuning files. On those devices, use the files stored in `/usr/share/libcamera/ipa/rpi/vc4/` instead.
+
+`libcamera` maintains tuning files for a number of cameras, including third-party models. For instance, you can find the tuning file for the Soho Enterprises SE327M12 in `se327m12.json`.
diff --git a/documentation/asciidoc/computers/camera/rpicam_detect.adoc b/documentation/asciidoc/computers/camera/rpicam_detect.adoc
new file mode 100644
index 000000000..e75a4a630
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_detect.adoc
@@ -0,0 +1,14 @@
+=== `rpicam-detect`
+
+NOTE: Raspberry Pi OS does not include `rpicam-detect`. However, you can build `rpicam-detect` if you have xref:camera_software.adoc#post-processing-with-tensorflow-lite[installed TensorFlow Lite]. For more information, see the xref:camera_software.adoc#build-libcamera-and-rpicam-apps[`rpicam-apps` build instructions]. Don't forget to pass `-Denable_tflite=enabled` when you run `meson`.
+
+`rpicam-detect` displays a preview window and monitors the contents using a Google MobileNet v1 SSD (Single Shot Detector) neural network trained to identify about 80 classes of objects using the Coco dataset. `rpicam-detect` recognises people, cars, cats and many other objects.
+
+Whenever `rpicam-detect` detects a target object, it captures a full-resolution JPEG. Then it returns to monitoring preview mode.
+
+See the xref:camera_software.adoc#object_detect_tf-stage[TensorFlow Lite object detector] section for general information on model usage. For example, you might spy secretly on your cats while you are away with:
+
+[source,console]
+----
+$ rpicam-detect -t 0 -o cat%04d.jpg --lores-width 400 --lores-height 300 --post-process-file object_detect_tf.json --object cat
+----
diff --git a/documentation/asciidoc/computers/camera/rpicam_hello.adoc b/documentation/asciidoc/computers/camera/rpicam_hello.adoc
new file mode 100644
index 000000000..de7dae16f
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_hello.adoc
@@ -0,0 +1,41 @@
+=== `rpicam-hello`
+
+`rpicam-hello` briefly displays a preview window containing the video feed from a connected camera. To use `rpicam-hello` to display a preview window for five seconds, run the following command in a terminal:
+
+[source,console]
+----
+$ rpicam-hello
+----
+
+You can pass an optional duration (in milliseconds) with the xref:camera_software.adoc#timeout[`timeout`] option. A value of `0` runs the preview indefinitely:
+
+[source,console]
+----
+$ rpicam-hello --timeout 0
+----
+
+Use `Ctrl+C` in the terminal or the close button on the preview window to stop `rpicam-hello`.
+
+==== Display an image sensor preview
+
+Most of the `rpicam-apps` display a preview image in a window. If there is no active desktop environment, the preview draws directly to the display using the Linux Direct Rendering Manager (DRM). Otherwise, `rpicam-apps` attempt to use the desktop environment. Both paths use zero-copy GPU buffer sharing: as a result, X forwarding is _not_ supported.
+
+If you run the X window server and want to use X forwarding, pass the xref:camera_software.adoc#qt-preview[`qt-preview`] flag to render the preview window in a https://en.wikipedia.org/wiki/Qt_(software)[Qt] window. The Qt preview window uses more resources than the alternatives.
+
+NOTE: Older systems using Gtk2 may, when linked with OpenCV, produce `Glib-GObject` errors and fail to show the Qt preview window. In this case edit the file `/etc/xdg/qt5ct/qt5ct.conf` as root and replace the line containing `style=gtk2` with `style=gtk3`.
+
+To suppress the preview window entirely, pass the xref:camera_software.adoc#nopreview[`nopreview`] flag:
+
+[source,console]
+----
+$ rpicam-hello -n
+----
+
+The xref:camera_software.adoc#info-text[`info-text`] option displays image information on the window title bar using `%` directives. For example, the following command displays the current red and blue gain values:
+
+[source,console]
+----
+$ rpicam-hello --info-text "red gain %rg, blue gain %bg"
+----
+
+For a full list of directives, see the xref:camera_software.adoc#info-text[`info-text` reference].
diff --git a/documentation/asciidoc/computers/camera/rpicam_jpeg.adoc b/documentation/asciidoc/computers/camera/rpicam_jpeg.adoc
new file mode 100644
index 000000000..253148728
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_jpeg.adoc
@@ -0,0 +1,19 @@
+=== `rpicam-jpeg`
+
+`rpicam-jpeg` helps you capture images on Raspberry Pi devices.
+
+To capture a full resolution JPEG image and save it to a file named `test.jpg`, run the following command:
+
+[source,console]
+----
+$ rpicam-jpeg --output test.jpg
+----
+
+You should see a preview window for five seconds. Then, `rpicam-jpeg` captures a full resolution JPEG image and saves it.
+
+Use the xref:camera_software.adoc#timeout[`timeout`] option to alter display time of the preview window. The xref:camera_software.adoc#width-and-height[`width` and `height`] options change the resolution of the saved image. For example, the following command displays the preview window for 2 seconds, then captures and saves an image with a resolution of 640×480 pixels:
+
+[source,console]
+----
+$ rpicam-jpeg --output test.jpg --timeout 2000 --width 640 --height 480
+----
diff --git a/documentation/asciidoc/computers/camera/rpicam_options_common.adoc b/documentation/asciidoc/computers/camera/rpicam_options_common.adoc
new file mode 100644
index 000000000..90e535ff8
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_options_common.adoc
@@ -0,0 +1,594 @@
+== `rpicam-apps` options reference
+
+=== Common options
+
+The following options apply across all the `rpicam-apps` with similar or identical semantics, unless otherwise noted.
+
+To pass one of the following options to an application, prefix the option name with `--`. If the option requires a value, pass the value immediately after the option name, separated by a single space. If the value contains a space, surround the value in quotes.
+
+Some options have shorthand aliases, for example `-h` instead of `--help`. Use these shorthand aliases instead of the full option name to save space and time at the expense of readability.
+
+==== `help`
+
+Alias: `-h`
+
+Prints the full set of options, along with a brief synopsis of each option. Does not accept a value.
+
+==== `version`
+
+Prints out version strings for `libcamera` and `rpicam-apps`. Does not accept a value.
+
+Example output:
+
+----
+rpicam-apps build: ca559f46a97a 27-09-2021 (14:10:24)
+libcamera build: v0.0.0+3058-c29143f7
+----
+
+==== `list-cameras`
+
+Lists the detected cameras attached to your Raspberry Pi and their available sensor modes. Does not accept a value.
+
+Sensor mode identifiers have the following form: `S_ : `
+
+Crop is specified in native sensor pixels (even in pixel binning mode) as `(, )/×`. `(x, y)` specifies the location of the crop window of size `width × height` in the sensor array.
+
+For example, the following output displays information about an `IMX219` sensor at index 0 and an `IMX477` sensor at index 1:
+
+----
+Available cameras
+-----------------
+0 : imx219 [3280x2464] (/base/soc/i2c0mux/i2c@1/imx219@10)
+ Modes: 'SRGGB10_CSI2P' : 640x480 [206.65 fps - (1000, 752)/1280x960 crop]
+ 1640x1232 [41.85 fps - (0, 0)/3280x2464 crop]
+ 1920x1080 [47.57 fps - (680, 692)/1920x1080 crop]
+ 3280x2464 [21.19 fps - (0, 0)/3280x2464 crop]
+ 'SRGGB8' : 640x480 [206.65 fps - (1000, 752)/1280x960 crop]
+ 1640x1232 [41.85 fps - (0, 0)/3280x2464 crop]
+ 1920x1080 [47.57 fps - (680, 692)/1920x1080 crop]
+ 3280x2464 [21.19 fps - (0, 0)/3280x2464 crop]
+1 : imx477 [4056x3040] (/base/soc/i2c0mux/i2c@1/imx477@1a)
+ Modes: 'SRGGB10_CSI2P' : 1332x990 [120.05 fps - (696, 528)/2664x1980 crop]
+ 'SRGGB12_CSI2P' : 2028x1080 [50.03 fps - (0, 440)/4056x2160 crop]
+ 2028x1520 [40.01 fps - (0, 0)/4056x3040 crop]
+ 4056x3040 [10.00 fps - (0, 0)/4056x3040 crop]
+----
+
+For the IMX219 sensor in the above example:
+
+* all modes have an `RGGB` Bayer ordering
+* all modes provide either 8-bit or 10-bit CSI2 packed readout at the listed resolutions
+
+==== `camera`
+
+Selects the camera to use. Specify an index from the xref:camera_software.adoc#list-cameras[list of available cameras].
+
+==== `config`
+
+Alias: `-c`
+
+Specify a file containing CLI options and values. Consider a file named `example_configuration.txt` that contains the following text, specifying options and values as key-value pairs, one option per line, long (non-alias) option names only:
+
+----
+timeout=99000
+verbose=
+----
+
+TIP: Omit the leading `--` that you normally pass on the command line. For flags that lack a value, such as `verbose` in the above example, you must include a trailing `=`.
+
+You could then run the following command to specify a timeout of 99000 milliseconds and verbose output:
+
+[source,console]
+----
+$ rpicam-hello --config example_configuration.txt
+----
+
+==== `timeout`
+
+Alias: `-t`
+
+Default value: 5000 milliseconds (5 seconds)
+
+Specify how long the application runs before closing. This value is interpreted as a number of milliseconds unless an optional suffix is used to change the unit. The suffix may be one of:
+
+* `min` - minutes
+* `s` or `sec` - seconds
+* `ms` - milliseconds (the default if no suffix used)
+* `us` - microseconds
+* `ns` - nanoseconds.
+
+This time applies to both video recording and preview windows. When capturing a still image, the application shows a preview window for the length of time specified by the `timeout` parameter before capturing the output image.
+
+To run the application indefinitely, specify a value of `0`. Floating point values are also permitted.
+
+Example: `rpicam-hello -t 0.5min` would run for 30 seconds.
+
+==== `preview`
+
+Alias: `-p`
+
+Sets the location (x,y coordinates) and size (w,h dimensions) of the desktop or DRM preview window. Does not affect the resolution or aspect ratio of images requested from the camera. Scales image size and pillar or letterboxes image aspect ratio to fit within the preview window.
+
+Pass the preview window dimensions in the following comma-separated form: `x,y,w,h`
+
+Example: `rpicam-hello --preview 100,100,500,500`
+
+image::images/preview_window.jpg[Letterboxed preview image]
+
+==== `fullscreen`
+
+Alias: `-f`
+
+Forces the preview window to use the entire screen with no border or title bar. Scales image size and pillar or letterboxes image aspect ratio to fit within the entire screen. Does not accept a value.
+
+==== `qt-preview`
+
+Uses the Qt preview window, which consumes more resources than the alternatives, but supports X window forwarding. Incompatible with the xref:camera_software.adoc#fullscreen[`fullscreen`] flag. Does not accept a value.
+
+==== `nopreview`
+
+Alias: `-n`
+
+Causes the application to _not_ display a preview window at all. Does not accept a value.
+
+
+==== `info-text`
+
+Default value: `"#%frame (%fps fps) exp %exp ag %ag dg %dg"`
+
+Sets the supplied string as the title of the preview window when running in a desktop environment. Supports the following image metadata substitutions:
+
+|===
+| Directive | Substitution
+
+| `%frame`
+| Sequence number of the frame.
+
+| `%fps`
+| Instantaneous frame rate.
+
+| `%exp`
+| Shutter speed used to capture the image, in microseconds.
+
+| `%ag`
+| Analogue gain applied to the image in the sensor.
+
+| `%dg`
+| Digital gain applied to the image by the ISP.
+
+| `%rg`
+| Gain applied to the red component of each pixel.
+
+| `%bg`
+| Gain applied to the blue component of each pixel.
+
+| `%focus`
+| Focus metric for the image, where a larger value implies a sharper image.
+
+| `%lp`
+| Current lens position in dioptres (1 / distance in metres).
+
+| `%afstate`
+| Autofocus algorithm state (`idle`, `scanning`, `focused` or `failed`).
+|===
+
+image::images/focus.jpg[Image showing focus measure]
+
+==== `width` and `height`
+
+Each accepts a single number defining the dimensions, in pixels, of the captured image.
+
+For `rpicam-still`, `rpicam-jpeg` and `rpicam-vid`, specifies output resolution.
+
+For `rpicam-raw`, specifies raw frame resolution. For cameras with a 2×2 binned readout mode, specifying a resolution equal to or smaller than the binned mode captures 2×2 binned raw frames.
+
+For `rpicam-hello`, has no effect.
+
+Examples:
+
+* `rpicam-vid -o test.h264 --width 1920 --height 1080` captures 1080p video.
+
+* `rpicam-still -r -o test.jpg --width 2028 --height 1520` captures a 2028×1520 resolution JPEG. If used with the HQ camera, uses 2×2 binned mode, so the raw file (`test.dng`) contains a 2028×1520 raw Bayer image.
+
+==== `viewfinder-width` and `viewfinder-height`
+
+Each accepts a single number defining the dimensions, in pixels, of the image displayed in the preview window. Does not effect the preview window dimensions, since images are resized to fit. Does not affect captured still images or videos.
+
+==== `mode`
+
+Allows you to specify a camera mode in the following colon-separated format: `:::`. The system selects the closest available option for the sensor if there is not an exact match for a provided value. You can use the packed (`P`) or unpacked (`U`) packing formats. Impacts the format of stored videos and stills, but not the format of frames passed to the preview window.
+
+Bit-depth and packing are optional.
+Bit-depth defaults to 12.
+Packing defaults to `P` (packed).
+
+For information about the bit-depth, resolution, and packing options available for your sensor, see xref:camera_software.adoc#list-cameras[`list-cameras`].
+
+Examples:
+
+* `4056:3040:12:P` - 4056×3040 resolution, 12 bits per pixel, packed.
+* `1632:1224:10` - 1632×1224 resolution, 10 bits per pixel.
+* `2592:1944:10:U` - 2592×1944 resolution, 10 bits per pixel, unpacked.
+* `3264:2448` - 3264×2448 resolution.
+
+===== Packed format details
+
+The packed format uses less storage for pixel data.
+
+_On Raspberry Pi 4 and earlier devices_, the packed format packs pixels using the MIPI CSI-2 standard. This means:
+
+* 10-bit camera modes pack 4 pixels into 5 bytes. The first 4 bytes contain the 8 most significant bits (MSBs) of each pixel, and the final byte contains the 4 pairs of least significant bits (LSBs).
+* 12-bit camera modes pack 2 pixels into 3 bytes. The first 2 bytes contain the 8 most significant bits (MSBs) of each pixel, and the final byte contains the 4 least significant bits (LSBs) of both pixels.
+
+_On Raspberry Pi 5 and later devices_, the packed format compresses pixel values with a visually lossless compression scheme into 8 bits (1 byte) per pixel.
+
+===== Unpacked format details
+
+The unpacked format provides pixel values that are much easier to manually manipulate, at the expense of using more storage for pixel data.
+
+On all devices, the unpacked format uses 2 bytes per pixel.
+
+_On Raspberry Pi 4 and earlier devices_, applications apply zero padding at the *most significant end*. In the unpacked format, a pixel from a 10-bit camera mode cannot exceed the value 1023.
+
+_On Raspberry Pi 5 and later devices_, applications apply zero padding at the *least significant end*, so images use the full 16-bit dynamic range of the pixel depth delivered by the sensor.
+
+==== `viewfinder-mode`
+
+Identical to the `mode` option, but it applies to the data passed to the preview window. For more information, see the xref:camera_software.adoc#mode[`mode` documentation].
+
+==== `lores-width` and `lores-height`
+
+Delivers a second, lower-resolution image stream from the camera, scaled down to the specified dimensions.
+
+Each accepts a single number defining the dimensions, in pixels, of the lower-resolution stream.
+
+Available for preview and video modes. Not available for still captures. If you specify a aspect ratio different from the normal resolution stream, generates non-square pixels.
+
+For `rpicam-vid`, disables extra colour-denoise processing.
+
+
+Useful for image analysis when combined with xref:camera_software.adoc#post-processing-with-rpicam-apps[image post-processing].
+
+==== `hflip`
+
+Flips the image horizontally. Does not accept a value.
+
+==== `vflip`
+
+Flips the image vertically. Does not accept a value.
+
+==== `rotation`
+
+Rotates the image extracted from the sensor. Accepts only the values 0 or 180.
+
+==== `roi`
+
+Crops the image extracted from the full field of the sensor. Accepts four decimal values, _ranged 0 to 1_, in the following format: `,,,h>`. Each of these values represents a percentage of the available width and heights as a decimal between 0 and 1.
+
+These values define the following proportions:
+
+* ``: X coordinates to skip before extracting an image
+* ``: Y coordinates to skip before extracting an image
+* ``: image width to extract
+* ``: image height to extract
+
+Defaults to `0,0,1,1` (starts at the first X coordinate and the first Y coordinate, uses 100% of the image width, uses 100% of the image height).
+
+Examples:
+
+* `rpicam-hello --roi 0.25,0.25,0.5,0.5` selects exactly a half of the total number of pixels cropped from the centre of the image (skips the first 25% of X coordinates, skips the first 25% of Y coordinates, uses 50% of the total image width, uses 50% of the total image height).
+* `rpicam-hello --roi 0,0,0.25,0.25` selects exactly a quarter of the total number of pixels cropped from the top left of the image (skips the first 0% of X coordinates, skips the first 0% of Y coordinates, uses 25% of the image width, uses 25% of the image height).
+
+==== `hdr`
+
+Default value: `off`
+
+Runs the camera in HDR mode. If passed without a value, assumes `auto`. Accepts one of the following values:
+
+* `off` - Disables HDR.
+* `auto` - Enables HDR on supported devices. Uses the sensor's built-in HDR mode if available. If the sensor lacks a built-in HDR mode, uses on-board HDR mode, if available.
+* `single-exp` - Uses on-board HDR mode, if available, even if the sensor has a built-in HDR mode. If on-board HDR mode is not available, disables HDR.
+
+Raspberry Pi 5 and later devices have an on-board HDR mode.
+
+To check for built-in HDR modes in a sensor, pass this option in addition to xref:camera_software.adoc#list-cameras[`list-cameras`].
+
+=== Camera control options
+
+The following options control image processing and algorithms that affect camera image quality.
+
+==== `sharpness`
+
+Sets image sharpness. Accepts a numeric value along the following spectrum:
+
+* `0.0` applies no sharpening
+* values greater than `0.0`, but less than `1.0` apply less than the default amount of sharpening
+* `1.0` applies the default amount of sharpening
+* values greater than `1.0` apply extra sharpening
+
+==== `contrast`
+
+Specifies the image contrast. Accepts a numeric value along the following spectrum:
+
+* `0.0` applies minimum contrast
+* values greater than `0.0`, but less than `1.0` apply less than the default amount of contrast
+* `1.0` applies the default amount of contrast
+* values greater than `1.0` apply extra contrast
+
+
+==== `brightness`
+
+Specifies the image brightness, added as an offset to all pixels in the output image. Accepts a numeric value along the following spectrum:
+
+* `-1.0` applies minimum brightness (black)
+* `0.0` applies standard brightness
+* `1.0` applies maximum brightness (white)
+
+For many use cases, prefer xref:camera_software.adoc#ev[`ev`].
+
+==== `saturation`
+
+Specifies the image colour saturation. Accepts a numeric value along the following spectrum:
+
+* `0.0` applies minimum saturation (grayscale)
+* values greater than `0.0`, but less than `1.0` apply less than the default amount of saturation
+* `1.0` applies the default amount of saturation
+* values greater than `1.0` apply extra saturation
+
+==== `ev`
+
+Specifies the https://en.wikipedia.org/wiki/Exposure_value[exposure value (EV)] compensation of the image in stops. Accepts a numeric value that controls target values passed to the Automatic Exposure/Gain Control (AEC/AGC) processing algorithm along the following spectrum:
+
+* `-10.0` applies minimum target values
+* `0.0` applies standard target values
+* `10.0` applies maximum target values
+
+==== `shutter`
+
+Specifies the exposure time, using the shutter, in _microseconds_. Gain can still vary when you use this option. If the camera runs at a framerate so fast it does not allow for the specified exposure time (for instance, a framerate of 1fps and an exposure time of 10000 microseconds), the sensor will use the maximum exposure time allowed by the framerate.
+
+For a list of minimum and maximum shutter times for official cameras, see the xref:../accessories/camera.adoc#hardware-specification[camera hardware documentation]. Values above the maximum result in undefined behaviour.
+
+==== `gain`
+
+Alias: `--analoggain`
+
+Sets the combined analogue and digital gain. When the sensor driver can provide the requested gain, only uses analogue gain. When analogue gain reaches the maximum value, the ISP applies digital gain. Accepts a numeric value.
+
+For a list of analogue gain limits, for official cameras, see the xref:../accessories/camera.adoc#hardware-specification[camera hardware documentation].
+
+Sometimes, digital gain can exceed 1.0 even when the analogue gain limit is not exceeded. This can occur in the following situations:
+
+* Either of the colour gains drops below 1.0, which will cause the digital gain to settle to 1.0/min(red_gain,blue_gain). This keeps the total digital gain applied to any colour channel above 1.0 to avoid discolouration artefacts.
+* Slight variances during Automatic Exposure/Gain Control (AEC/AGC) changes.
+
+==== `metering`
+
+Default value: `centre`
+
+Sets the metering mode of the Automatic Exposure/Gain Control (AEC/AGC) algorithm. Accepts the following values:
+
+* `centre` - centre weighted metering
+* `spot` - spot metering
+* `average` - average or whole frame metering
+* `custom` - custom metering mode defined in the camera tuning file
+
+For more information on defining a custom metering mode, and adjusting region weights in existing metering modes, see the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Tuning guide for the Raspberry Pi cameras and libcamera].
+
+==== `exposure`
+
+Sets the exposure profile. Changing the exposure profile should not affect the image exposure. Instead, different modes adjust gain settings to achieve the same net result. Accepts the following values:
+
+* `sport`: short exposure, larger gains
+* `normal`: normal exposure, normal gains
+* `long`: long exposure, smaller gains
+
+You can edit exposure profiles using tuning files. For more information, see the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Tuning guide for the Raspberry Pi cameras and libcamera].
+
+==== `awb`
+
+Sets the Auto White Balance (AWB) mode. Accepts the following values:
+
+|===
+| Mode name | Colour temperature range
+
+| `auto`
+| 2500K to 8000K
+
+| `incandescent`
+| 2500K to 3000K
+
+| `tungsten`
+| 3000K to 3500K
+
+| `fluorescent`
+| 4000K to 4700K
+
+| `indoor`
+| 3000K to 5000K
+
+| `daylight`
+| 5500K to 6500K
+
+| `cloudy`
+| 7000K to 8500K
+
+| `custom`
+| A custom range defined in the tuning file.
+|===
+
+These values are only approximate: values could vary according to the camera tuning.
+
+No mode fully disables AWB. Instead, you can fix colour gains with xref:camera_software.adoc#awbgains[`awbgains`].
+
+For more information on AWB modes, including how to define a custom one, see the https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf[Tuning guide for the Raspberry Pi cameras and libcamera].
+
+==== `awbgains`
+
+Sets a fixed red and blue gain value to be used instead of an Auto White Balance (AWB) algorithm. Set non-zero values to disable AWB. Accepts comma-separated numeric input in the following format: `,`
+
+==== `denoise`
+
+Default value: `auto`
+
+Sets the denoising mode. Accepts the following values:
+
+* `auto`: Enables standard spatial denoise. Uses extra-fast colour denoise for video, and high-quality colour denoise for images. Enables no extra colour denoise in the preview window.
+
+* `off`: Disables spatial and colour denoise.
+
+* `cdn_off`: Disables colour denoise.
+
+* `cdn_fast`: Uses fast colour denoise.
+
+* `cdn_hq`: Uses high-quality colour denoise. Not appropriate for video/viewfinder due to reduced throughput.
+
+Even fast colour denoise can lower framerates. High quality colour denoise _significantly_ lowers framerates.
+
+==== `tuning-file`
+
+Specifies the camera tuning file. The tuning file allows you to control many aspects of image processing, including the Automatic Exposure/Gain Control (AEC/AGC), Auto White Balance (AWB), colour shading correction, colour processing, denoising and more. Accepts a tuning file path as input.
+
+For more information about tuning files, see xref:camera_software.adoc#tuning-files[Tuning Files].
+
+==== `autofocus-mode`
+
+Default value: `default`
+
+Specifies the autofocus mode. Accepts the following values:
+
+* `default`: puts the camera into continuous autofocus mode unless xref:camera_software.adoc#lens-position[`lens-position`] or xref:camera_software.adoc#autofocus-on-capture[`autofocus-on-capture`] override the mode to manual
+* `manual`: does not move the lens at all unless manually configured with xref:camera_software.adoc#lens-position[`lens-position`]
+* `auto`: only moves the lens for an autofocus sweep when the camera starts or just before capture if xref:camera_software.adoc#autofocus-on-capture[`autofocus-on-capture`] is also used
+* `continuous`: adjusts the lens position automatically as the scene changes
+
+This option is only supported for certain camera modules.
+
+==== `autofocus-range`
+
+Default value: `normal`
+
+Specifies the autofocus range. Accepts the following values:
+
+* `normal`: focuses from reasonably close to infinity
+* `macro`: focuses only on close objects, including the closest focal distances supported by the camera
+* `full`: focus on the entire range, from the very closest objects to infinity
+
+This option is only supported for certain camera modules.
+
+==== `autofocus-speed`
+
+Default value: `normal`
+
+Specifies the autofocus speed. Accepts the following values:
+
+* `normal`: changes the lens position at normal speed
+* `fast`: changes the lens position quickly
+
+This option is only supported for certain camera modules.
+
+==== `autofocus-window`
+
+Specifies the autofocus window within the full field of the sensor. Accepts four decimal values, _ranged 0 to 1_, in the following format: `,,,h>`. Each of these values represents a percentage of the available width and heights as a decimal between 0 and 1.
+
+These values define the following proportions:
+
+* ``: X coordinates to skip before applying autofocus
+* ``: Y coordinates to skip before applying autofocus
+* ``: autofocus area width
+* ``: autofocus area height
+
+The default value uses the middle third of the output image in both dimensions (1/9 of the total image area).
+
+Examples:
+
+* `rpicam-hello --autofocus-window 0.25,0.25,0.5,0.5` selects exactly half of the total number of pixels cropped from the centre of the image (skips the first 25% of X coordinates, skips the first 25% of Y coordinates, uses 50% of the total image width, uses 50% of the total image height).
+* `rpicam-hello --autofocus-window 0,0,0.25,0.25` selects exactly a quarter of the total number of pixels cropped from the top left of the image (skips the first 0% of X coordinates, skips the first 0% of Y coordinates, uses 25% of the image width, uses 25% of the image height).
+
+This option is only supported for certain camera modules.
+
+==== `lens-position`
+
+Default value: `default`
+
+Moves the lens to a fixed focal distance, normally given in dioptres (units of 1 / _distance in metres_). Accepts the following spectrum of values:
+
+* `0.0`: moves the lens to the "infinity" position
+* Any other `number`: moves the lens to the 1 / `number` position. For example, the value `2.0` would focus at approximately 0.5m
+* `default`: move the lens to a default position which corresponds to the hyperfocal position of the lens
+
+Lens calibration is imperfect, so different camera modules of the same model may vary.
+
+==== `verbose`
+
+Alias: `-v`
+
+Default value: `1`
+
+Sets the verbosity level. Accepts the following values:
+
+* `0`: no output
+* `1`: normal output
+* `2`: verbose output
+
+=== Output file options
+
+==== `output`
+
+Alias: `-o`
+
+Sets the name of the file used to record images or video. Besides plaintext file names, accepts the following special values:
+
+* `-`: write to stdout.
+* `udp://` (prefix): a network address for UDP streaming.
+* `tcp://` (prefix): a network address for TCP streaming.
+* Include the `%d` directive in the file name to replace the directive with a count that increments for each opened file. This directive supports standard C format directive modifiers.
+
+Examples:
+
+* `rpicam-vid -t 100000 --segment 10000 -o chunk%04d.h264` records a 100 second file in 10 second segments, where each file includes an incrementing four-digit counter padded with leading zeros: e.g. `chunk0001.h264`, `chunk0002.h264`, etc.
+
+* `rpicam-vid -t 0 --inline -o udp://192.168.1.13:5000` streams H.264 video to network address 192.168.1.13 using UDP on port 5000.
+
+==== `wrap`
+
+Sets a maximum value for the counter used by the xref:camera_software.adoc#output[`output`] `%d` directive. The counter resets to zero after reaching this value. Accepts a numeric value.
+
+==== `flush`
+
+Flushes output files to disk as soon as a frame finishes writing, instead of waiting for the system to handle it. Does not accept a value.
+
+==== `post-process-file`
+
+Specifies a JSON file that configures the post-processing applied by the imaging pipeline. This applies to camera images _before_ they reach the application. This works similarly to the legacy `raspicam` "image effects". Accepts a file name path as input.
+
+Post-processing is a large topic and admits the use of third-party software like OpenCV and TensorFlowLite to analyse and manipulate images. For more information, see xref:camera_software.adoc#post-processing-with-rpicam-apps[post-processing].
+
+==== `buffer-count`
+
+The number of buffers to allocate for still image capture or for video recording. The default value of zero lets each application choose a reasonable number for its own use case (1 for still image capture, and 6 for video recording). Increasing the number can sometimes help to reduce the number of frame drops, particularly at higher framerates.
+
+==== `viewfinder-buffer-count`
+
+As the `buffer-count` option, but applies when running in preview mode (that is `rpicam-hello` or the preview, not capture, phase of `rpicam-still`).
+
+==== `metadata`
+
+Save captured image metadata to a file or `-` for stdout. The fields in the metadata output will depend on the camera model in use.
+
+See also `metadata-format`.
+
+==== `metadata-format`
+
+Format to save the metadata in. Accepts the following values:
+
+* `txt` for text format
+* `json` for JSON format
+
+In text format, each line will have the form
+
+ key=value
+
+In JSON format, the output is a JSON object.
+
+This option does nothing unless `--metadata` is also specified.
diff --git a/documentation/asciidoc/computers/camera/rpicam_options_detect.adoc b/documentation/asciidoc/computers/camera/rpicam_options_detect.adoc
new file mode 100644
index 000000000..298116505
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_options_detect.adoc
@@ -0,0 +1,15 @@
+=== Detection options
+
+The command line options specified in this section apply only to object detection using `rpicam-detect`.
+
+To pass one of the following options to `rpicam-detect`, prefix the option name with `--`. If the option requires a value, pass the value immediately after the option name, separated by a single space. If the value contains a space, surround the value in quotes.
+
+Some options have shorthand aliases, for example `-h` instead of `--help`. Use these shorthand aliases instead of the full option name to save space and time at the expense of readability.
+
+==== `object`
+
+Detects objects with the given name, sourced from the model's label file. Accepts a plaintext file name as input.
+
+==== `gap`
+
+Wait at least this many frames between captures. Accepts numeric values.
diff --git a/documentation/asciidoc/computers/camera/rpicam_options_libav.adoc b/documentation/asciidoc/computers/camera/rpicam_options_libav.adoc
new file mode 100644
index 000000000..3b1f2ce19
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_options_libav.adoc
@@ -0,0 +1,65 @@
+=== `libav` options
+
+The command line options specified in this section apply only to `libav` video backend.
+
+To enable the `libav` backend, pass the xref:camera_software.adoc#codec[`codec`] option the value `libav`.
+
+To pass one of the following options to an application, prefix the option name with `--`. If the option requires a value, pass the value immediately after the option name, separated by a single space. If the value contains a space, surround the value in quotes.
+
+Some options have shorthand aliases, for example `-h` instead of `--help`. Use these shorthand aliases instead of the full option name to save space and time at the expense of readability.
+
+==== `libav-format`
+
+Sets the `libav` output format. Accepts the following values:
+
+* `mkv` encoding
+* `mp4` encoding
+* `avi` encoding
+* `h264` streaming
+* `mpegts` streaming
+
+If you do not provide this option, the file extension passed to the xref:camera_software.adoc#output[`output`] option determines the file format.
+
+==== `libav-audio`
+
+Enables audio recording. When enabled, you must also specify an xref:camera_software.adoc#audio-codec[`audio-codec`]. Does not accept a value.
+
+==== `audio-codec`
+
+Default value: `aac`
+
+Selects an audio codec for output. For a list of available codecs, run `ffmpeg -codecs`.
+
+==== `audio-bitrate`
+
+Sets the bitrate for audio encoding in bits per second. Accepts numeric input.
+
+Example: `rpicam-vid --codec libav -o test.mp4 --audio_codec mp2 --audio-bitrate 16384` (Records audio at 16 kilobits/sec with the mp2 codec)
+
+==== `audio-samplerate`
+
+Default value: `0`
+
+Sets the audio sampling rate in Hz. Accepts numeric input. `0` uses the input sample rate.
+
+==== `audio-device`
+
+Select an ALSA input device for audio recording. For a list of available devices, run the following command:
+
+[source,console]
+----
+$ pactl list | grep -A2 'Source #' | grep 'Name: '
+----
+
+You should see output similar to the following:
+
+----
+Name: alsa_output.platform-bcm2835_audio.analog-stereo.monitor
+Name: alsa_output.platform-fef00700.hdmi.hdmi-stereo.monitor
+Name: alsa_output.usb-GN_Netcom_A_S_Jabra_EVOLVE_LINK_000736B1214E0A-00.analog-stereo.monitor
+Name: alsa_input.usb-GN_Netcom_A_S_Jabra_EVOLVE_LINK_000736B1214E0A-00.mono-fallback
+----
+
+==== `av-sync`
+
+Shifts the audio sample timestamp by a value in microseconds. Accepts positive and negative numeric values.
diff --git a/documentation/asciidoc/computers/camera/rpicam_options_still.adoc b/documentation/asciidoc/computers/camera/rpicam_options_still.adoc
new file mode 100644
index 000000000..4e20880dc
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_options_still.adoc
@@ -0,0 +1,126 @@
+=== Image options
+
+The command line options specified in this section apply only to still image output.
+
+To pass one of the following options to an application, prefix the option name with `--`. If the option requires a value, pass the value immediately after the option name, separated by a single space. If the value contains a space, surround the value in quotes.
+
+Some options have shorthand aliases, for example `-h` instead of `--help`. Use these shorthand aliases instead of the full option name to save space and time at the expense of readability.
+
+==== `quality`
+
+Alias: `-q`
+
+Default value: `93`
+
+Sets the JPEG quality. Accepts a value between `1` and `100`.
+
+==== `exif`
+
+Saves extra EXIF tags in the JPEG output file. Only applies to JPEG output. Because of limitations in the `libexif` library, many tags are currently (incorrectly) formatted as ASCII and print a warning in the terminal.
+
+This option is necessary to add certain EXIF tags related to camera settings. You can add tags unrelated to camera settings to the output JPEG after recording with https://exiftool.org/[ExifTool].
+
+Example: `rpicam-still -o test.jpg --exif IDO0.Artist=Someone`
+
+==== `timelapse`
+
+Records images at the specified interval. Accepts an interval in milliseconds. Combine this setting with xref:camera_software.adoc#timeout[`timeout`] to capture repeated images over time.
+
+You can specify separate filenames for each output file using string formatting, e.g. `--output test%d.jpg`.
+
+Example: `rpicam-still -t 100000 -o test%d.jpg --timelapse 10000` captures an image every 10 seconds for 100 seconds.
+
+==== `framestart`
+
+Configures a starting value for the frame counter accessed in output file names as `%d`. Accepts an integer starting value.
+
+==== `datetime`
+
+Uses the current date and time in the output file name, in the form `MMDDhhmmss.jpg`:
+
+* `MM` = 2-digit month number
+* `DD` = 2-digit day number
+* `hh` = 2-digit 24-hour hour number
+* `mm` = 2-digit minute number
+* `ss` = 2-digit second number
+
+Does not accept a value.
+
+==== `timestamp`
+
+Uses the current system https://en.wikipedia.org/wiki/Unix_time[Unix time] as the output file name. Does not accept a value.
+
+==== `restart`
+
+Default value: `0`
+
+Configures the restart marker interval for JPEG output. JPEG restart markers can help limit the impact of corruption on JPEG images, and additionally enable the use of multi-threaded JPEG encoding and decoding. Accepts an integer value.
+
+==== `immediate`
+
+Captures the image immediately when the application runs.
+
+==== `keypress`
+
+Alias: `-k`
+
+Captures an image when the xref:camera_software.adoc#timeout[`timeout`] expires or on press of the *Enter* key, whichever comes first. Press the `x` key, then *Enter* to exit without capturing. Does not accept a value.
+
+==== `signal`
+
+Captures an image when the xref:camera_software.adoc#timeout[`timeout`] expires or when `SIGUSR1` is received. Use `SIGUSR2` to exit without capturing. Does not accept a value.
+
+==== `thumb`
+
+Default value: `320:240:70`
+
+Configure the dimensions and quality of the thumbnail with the following format: `` (or `none`, which omits the thumbnail).
+
+==== `encoding`
+
+Alias: `-e`
+
+Default value: `jpg`
+
+Sets the encoder to use for image output. Accepts the following values:
+
+* `jpg` - JPEG
+* `png` - PNG
+* `bmp` - BMP
+* `rgb` - binary dump of uncompressed RGB pixels
+* `yuv420` - binary dump of uncompressed YUV420 pixels
+
+This option always determines the encoding, overriding the extension passed to xref:camera_software.adoc#output[`output`].
+
+When using the xref:camera_software.adoc#datetime[`datetime`] and xref:camera_software.adoc#timestamp[`timestamp`] options, this option determines the output file extension.
+
+==== `raw`
+
+Alias: `-r`
+
+Saves a raw Bayer file in DNG format in addition to the output image. Replaces the output file name extension with `.dng`. You can process these standard DNG files with tools like _dcraw_ or _RawTherapee_. Does not accept a value.
+
+The image data in the raw file is exactly what came out of the sensor, with no processing from the ISP or anything else. The EXIF data saved in the file, among other things, includes:
+
+* exposure time
+* analogue gain (the ISO tag is 100 times the analogue gain used)
+* white balance gains (which are the reciprocals of the "as shot neutral" values)
+* the colour matrix used by the ISP
+
+==== `latest`
+
+Creates a symbolic link to the most recently saved file. Accepts a symbolic link name as input.
+
+==== `autofocus-on-capture`
+
+If set, runs an autofocus cycle _just before_ capturing an image. Interacts with the following xref:camera_software.adoc#autofocus-mode[`autofocus_mode`] values:
+
+* `default` or `manual`: only runs the capture-time autofocus cycle.
+
+* `auto`: runs an additional autofocus cycle when the preview window loads.
+
+* `continuous`: ignores this option, instead continually focusing throughout the preview.
+
+Does not require a value, but you can pass `1` to enable and `0` to disable. Not passing a value is equivalent to passing `1`.
+
+Only supported by some camera modules (such as the _Raspberry Pi Camera Module 3_).
diff --git a/documentation/asciidoc/computers/camera/rpicam_options_vid.adoc b/documentation/asciidoc/computers/camera/rpicam_options_vid.adoc
new file mode 100644
index 000000000..00ac1a258
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_options_vid.adoc
@@ -0,0 +1,141 @@
+=== Video options
+
+The command line options specified in this section apply only to video output.
+
+To pass one of the following options to an application, prefix the option name with `--`. If the option requires a value, pass the value immediately after the option name, separated by a single space. If the value contains a space, surround the value in quotes.
+
+Some options have shorthand aliases, for example `-h` instead of `--help`. Use these shorthand aliases instead of the full option name to save space and time at the expense of readability.
+
+==== `quality`
+
+Alias: `-q`
+
+Default value: `50`
+
+Accepts an MJPEG quality level between 1 and 100. Only applies to videos encoded in the MJPEG format.
+
+==== `bitrate`
+
+Alias: `-b`
+
+Controls the target bitrate used by the H.264 encoder in bits per second. Only applies to videos encoded in the H.264 format. Impacts the size of the output video.
+
+
+Example: `rpicam-vid -b 10000000 --width 1920 --height 1080 -o test.h264`
+
+==== `intra`
+
+Alias: `-g`
+
+Default value: `60`
+
+Sets the frequency of Iframes (intra frames) in the H.264 bitstream. Accepts a number of frames. Only applies to videos encoded in the H.264 format.
+
+==== `profile`
+
+Sets the H.264 profile. Accepts the following values:
+
+* `baseline`
+* `main`
+* `high`
+
+Only applies to videos encoded in the H.264 format.
+
+==== `level`
+
+Sets the H.264 level. Accepts the following values:
+
+* `4`
+* `4.1`
+* `4.2`
+
+Only applies to videos encoded in the H.264 format.
+
+==== `codec`
+
+Sets the encoder to use for video output. Accepts the following values:
+
+* `h264` - use H.264 encoder (the default)
+* `mjpeg` - use MJPEG encoder
+* `yuv420` - output uncompressed YUV420 frames.
+* `libav` - use the libav backend to encode audio and video (for more information, see xref:camera_software.adoc#libav-integration-with-rpicam-vid[`libav`])
+
+==== `save-pts`
+
+WARNING: Raspberry Pi 5 does not support the `save-pts` option. Use `libav` to automatically generate timestamps for container formats instead.
+
+Enables frame timestamp output, which allow you to convert the bitstream into a container format using a tool like `mkvmerge`. Accepts a plaintext file name for the timestamp output file.
+
+Example: `rpicam-vid -o test.h264 --save-pts timestamps.txt`
+
+You can then use the following command to generate an MKV container file from the bitstream and timestamps file:
+
+[source,console]
+----
+$ mkvmerge -o test.mkv --timecodes 0:timestamps.txt test.h264
+----
+
+==== `keypress`
+
+Alias: `-k`
+
+Allows the CLI to enable and disable video output using the *Enter* key. Always starts in the recording state unless specified otherwise with xref:camera_software.adoc#initial[`initial`]. Type the `x` key and press *Enter* to exit. Does not accept a value.
+
+==== `signal`
+
+Alias: `-s`
+
+Allows the CLI to enable and disable video output using `SIGUSR1`. Use `SIGUSR2` to exit. Always starts in the recording state unless specified otherwise with xref:camera_software.adoc#initial[`initial`]. Does not accept a value.
+
+==== `initial`
+
+Default value: `record`
+
+Specifies whether to start the application with video output enabled or disabled. Accepts the following values:
+
+* `record`: Starts with video output enabled.
+* `pause`: Starts with video output disabled.
+
+Use this option with either xref:camera_software.adoc#keypress[`keypress`] or xref:camera_software.adoc#signal[`signal`] to toggle between the two states.
+
+==== `split`
+
+When toggling recording with xref:camera_software.adoc#keypress[`keypress`] or xref:camera_software.adoc#signal[`signal`], writes the video output from separate recording sessions into separate files. Does not accept a value. Unless combined with xref:camera_software.adoc#output[`output`] to specify unique names for each file, overwrites each time it writes a file.
+
+==== `segment`
+
+Cuts video output into multiple files of the passed duration. Accepts a duration in milliseconds. If passed a very small duration (for instance, `1`), records each frame to a separate output file to simulate burst capture.
+
+You can specify separate filenames for each file using string formatting, e.g. `--output test%04d.h264`.
+
+==== `circular`
+
+Default value: `4`
+
+Writes video recording into a circular buffer in memory. When the application quits, records the circular buffer to disk. Accepts an optional size in megabytes.
+
+==== `inline`
+
+Writes a sequence header in every Iframe (intra frame). This can help clients decode the video sequence from any point in the video, instead of just the beginning. Recommended with xref:camera_software.adoc#segment[`segment`], xref:camera_software.adoc#split[`split`], xref:camera_software.adoc#circular[`circular`], and streaming options.
+
+Only applies to videos encoded in the H.264 format. Does not accept a value.
+
+==== `listen`
+
+Waits for an incoming client connection before encoding video. Intended for network streaming over TCP/IP. Does not accept a value.
+
+==== `frames`
+
+Records exactly the specified number of frames. Any non-zero value overrides xref:camera_software.adoc#timeout[`timeout`]. Accepts a nonzero integer.
+
+==== `framerate`
+
+Records exactly the specified framerate. Accepts a nonzero integer.
+
+==== `low-latency`
+
+On a Pi 5, the `--low-latency` option will reduce the encoding latency, which may be beneficial for real-time streaming applications, in return for (slightly) less good coding efficiency (for example, B frames and arithmetic coding will no longer be used).
+
+==== `sync`
+
+Run the camera in software synchronisation mode, where multiple cameras synchronise frames to the same moment in time. The `sync` mode can be set to either `client` or `server`. For more information, please refer to the detailed explanation of xref:camera_software.adoc#software-camera-synchronisation[how software synchronisation works].
\ No newline at end of file
diff --git a/documentation/asciidoc/computers/camera/rpicam_raw.adoc b/documentation/asciidoc/computers/camera/rpicam_raw.adoc
new file mode 100644
index 000000000..210e0e20a
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_raw.adoc
@@ -0,0 +1,26 @@
+=== `rpicam-raw`
+
+`rpicam-raw` records video as raw Bayer frames directly from the sensor. It does not show a preview window. To record a two second raw clip to a file named `test.raw`, run the following command:
+
+[source,console]
+----
+$ rpicam-raw -t 2000 -o test.raw
+----
+
+`rpicam-raw` outputs raw frames with no formatting information at all, one directly after another. The application prints the pixel format and image dimensions to the terminal window to help the user interpret the pixel data.
+
+By default, `rpicam-raw` outputs raw frames in a single, potentially very large, file. Use the xref:camera_software.adoc#segment[`segment`] option to direct each raw frame to a separate file, using the `%05d` xref:camera_software.adoc#output[directive] to make each frame filename unique:
+
+[source,console]
+----
+$ rpicam-raw -t 2000 --segment 1 -o test%05d.raw
+----
+
+With a fast storage device, `rpicam-raw` can write 18MB 12-megapixel HQ camera frames to disk at 10fps. `rpicam-raw` has no capability to format output frames as DNG files; for that functionality, use xref:camera_software.adoc#rpicam-still[`rpicam-still`]. Use the xref:camera_software.adoc#framerate[`framerate`] option at a level beneath 10 to avoid dropping frames:
+
+[source,console]
+----
+$ rpicam-raw -t 5000 --width 4056 --height 3040 -o test.raw --framerate 8
+----
+
+For more information on the raw formats, see the xref:camera_software.adoc#mode[`mode` documentation].
diff --git a/documentation/asciidoc/computers/camera/rpicam_still.adoc b/documentation/asciidoc/computers/camera/rpicam_still.adoc
new file mode 100644
index 000000000..08ec164e0
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_still.adoc
@@ -0,0 +1,206 @@
+=== `rpicam-still`
+
+`rpicam-still`, like `rpicam-jpeg`, helps you capture images on Raspberry Pi devices.
+Unlike `rpicam-jpeg`, `rpicam-still` supports many options provided in the legacy `raspistill` application.
+
+To capture a full resolution JPEG image and save it to a file named `test.jpg`, run the following command:
+
+[source,console]
+----
+$ rpicam-still --output test.jpg
+----
+
+==== Encoders
+
+`rpicam-still` can save images in multiple formats, including `png`, `bmp`, and both RGB and YUV binary pixel dumps. To read these binary dumps, any application reading the files must understand the pixel arrangement.
+
+Use the xref:camera_software.adoc#encoding[`encoding`] option to specify an output format. The file name passed to xref:camera_software.adoc#output[`output`] has no impact on the output file type.
+
+To capture a full resolution PNG image and save it to a file named `test.png`, run the following command:
+
+[source,console]
+----
+$ rpicam-still --encoding png --output test.png
+----
+
+For more information about specifying an image format, see the xref:camera_software.adoc#encoding[`encoding` option reference].
+
+==== Capture raw images
+
+Raw images are the images produced directly by the image sensor, before any processing is applied to them either by the Image Signal Processor (ISP) or CPU. Colour image sensors usually use the Bayer format. Use the xref:camera_software.adoc#raw[`raw`] option to capture raw images.
+
+To capture an image, save it to a file named `test.jpg`, and also save a raw version of the image to a file named `test.dng`, run the following command:
+
+[source,console]
+----
+$ rpicam-still --raw --output test.jpg
+----
+
+`rpicam-still` saves raw images in the DNG (Adobe Digital Negative) format. To determine the filename of the raw images, `rpicam-still` uses the same name as the output file, with the extension changed to `.dng`. To work with DNG images, use an application like https://en.wikipedia.org/wiki/Dcraw[Dcraw] or https://en.wikipedia.org/wiki/RawTherapee[RawTherapee].
+
+DNG files contain metadata about the image capture, including black levels, white balance information and the colour matrix used by the ISP to produce the JPEG. Use https://exiftool.org/[ExifTool] to view DNG metadata. The following output shows typical metadata stored in a raw image captured by a Raspberry Pi using the HQ camera:
+
+----
+File Name : test.dng
+Directory : .
+File Size : 24 MB
+File Modification Date/Time : 2021:08:17 16:36:18+01:00
+File Access Date/Time : 2021:08:17 16:36:18+01:00
+File Inode Change Date/Time : 2021:08:17 16:36:18+01:00
+File Permissions : rw-r--r--
+File Type : DNG
+File Type Extension : dng
+MIME Type : image/x-adobe-dng
+Exif Byte Order : Little-endian (Intel, II)
+Make : Raspberry Pi
+Camera Model Name : /base/soc/i2c0mux/i2c@1/imx477@1a
+Orientation : Horizontal (normal)
+Software : rpicam-still
+Subfile Type : Full-resolution Image
+Image Width : 4056
+Image Height : 3040
+Bits Per Sample : 16
+Compression : Uncompressed
+Photometric Interpretation : Color Filter Array
+Samples Per Pixel : 1
+Planar Configuration : Chunky
+CFA Repeat Pattern Dim : 2 2
+CFA Pattern 2 : 2 1 1 0
+Black Level Repeat Dim : 2 2
+Black Level : 256 256 256 256
+White Level : 4095
+DNG Version : 1.1.0.0
+DNG Backward Version : 1.0.0.0
+Unique Camera Model : /base/soc/i2c0mux/i2c@1/imx477@1a
+Color Matrix 1 : 0.8545269369 -0.2382823821 -0.09044229197 -0.1890484985 1.063961506 0.1062747385 -0.01334283455 0.1440163847 0.2593136724
+As Shot Neutral : 0.4754476844 1 0.413686484
+Calibration Illuminant 1 : D65
+Strip Offsets : 0
+Strip Byte Counts : 0
+Exposure Time : 1/20
+ISO : 400
+CFA Pattern : [Blue,Green][Green,Red]
+Image Size : 4056x3040
+Megapixels : 12.3
+Shutter Speed : 1/20
+----
+
+To find the analogue gain, divide the ISO number by 100.
+The Auto White Balance (AWB) algorithm determines a single calibrated illuminant, which is always labelled `D65`.
+
+==== Capture long exposures
+
+To capture very long exposure images, disable the Automatic Exposure/Gain Control (AEC/AGC) and Auto White Balance (AWB). These algorithms will otherwise force the user to wait for a number of frames while they converge.
+
+To disable these algorithms, supply explicit values for gain and AWB. Because long exposures take plenty of time already, it often makes sense to skip the preview phase entirely with the xref:camera_software.adoc#immediate[`immediate`] option.
+
+To perform a 100 second exposure capture, run the following command:
+
+[source,console]
+----
+$ rpicam-still -o long_exposure.jpg --shutter 100000000 --gain 1 --awbgains 1,1 --immediate
+----
+
+To find the maximum exposure times of official Raspberry Pi cameras, see xref:../accessories/camera.adoc#hardware-specification[the camera hardware specification].
+
+==== Create a time lapse video
+
+To create a time lapse video, capture a still image at a regular interval, such as once a minute, then use an application to stitch the pictures together into a video.
+
+[tabs]
+======
+`rpicam-still` time lapse mode::
++
+To use the built-in time lapse mode of `rpicam-still`, use the xref:camera_software.adoc#timelapse[`timelapse`] option. This option accepts a value representing the period of time you want your Raspberry Pi to wait between captures, in milliseconds.
++
+First, create a directory where you can store your time lapse photos:
++
+[source,console]
+----
+$ mkdir timelapse
+----
++
+Run the following command to create a time lapse over 30 seconds, recording a photo every two seconds, saving output into `image0000.jpg` through `image0013.jpg`:
++
+[source,console]
+----
+$ rpicam-still --timeout 30000 --timelapse 2000 -o timelapse/image%04d.jpg
+----
+
+`cron`::
++
+You can also automate time lapses with `cron`. First, create the script, named `timelapse.sh` containing the following commands. Replace the `` placeholder with the name of your user account on your Raspberry Pi:
++
+[source,bash]
+----
+#!/bin/bash
+DATE=$(date +"%Y-%m-%d_%H%M")
+rpicam-still -o /home//timelapse/$DATE.jpg
+----
++
+Then, make the script executable:
++
+[source,console]
+----
+$ chmod +x timelapse.sh
+----
++
+Create the `timelapse` directory into which you'll save time lapse pictures:
++
+[source,console]
+----
+$ mkdir timelapse
+----
++
+Open your crontab for editing:
++
+[source,console]
+----
+$ crontab -e
+----
++
+Once you have the file open in an editor, add the following line to schedule an image capture every minute, replacing the `` placeholder with the username of your primary user account:
++
+----
+* * * * * /home//timelapse.sh 2>&1
+----
++
+Save and exit, and you should see this message:
++
+----
+crontab: installing new crontab
+----
++
+To stop recording images for the time lapse, run `crontab -e` again and remove the above line from your crontab.
+
+======
+
+===== Stitch images together
+
+Once you have a series of time lapse photos, you probably want to combine them into a video. Use `ffmpeg` to do this on a Raspberry Pi.
+
+First, install `ffmpeg`:
+
+[source,console]
+----
+$ sudo apt install ffmpeg
+----
+
+Run the following command from the directory that contains the `timelapse` directory to convert your JPEG files into an mp4 video:
+
+[source,console]
+----
+$ ffmpeg -r 10 -f image2 -pattern_type glob -i 'timelapse/*.jpg' -s 1280x720 -vcodec libx264 timelapse.mp4
+----
+
+The command above uses the following parameters:
+
+* `-r 10`: sets the frame rate (Hz value) to ten frames per second in the output video
+* `-f image2`: sets `ffmpeg` to read from a list of image files specified by a pattern
+* `-pattern_type glob`: use wildcard patterns (globbing) to interpret filename input with `-i`
+* `-i 'timelapse/*.jpg'`: specifies input files to match JPG files in the `timelapse` directory
+* `-s 1280x720`: scales to 720p
+* `-vcodec libx264` use the software x264 encoder.
+* `timelapse.mp4` The name of the output video file.
+
+For more information about `ffmpeg` options, run `ffmpeg --help` in a terminal.
diff --git a/documentation/asciidoc/computers/camera/rpicam_vid.adoc b/documentation/asciidoc/computers/camera/rpicam_vid.adoc
new file mode 100644
index 000000000..e88c5b762
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/rpicam_vid.adoc
@@ -0,0 +1,98 @@
+=== `rpicam-vid`
+
+`rpicam-vid` helps you capture video on Raspberry Pi devices. `rpicam-vid` displays a preview window and writes an encoded bitstream to the specified output. This produces an unpackaged video bitstream that is not wrapped in any kind of container (such as an mp4 file) format.
+
+NOTE: When available, `rpicam-vid` uses hardware H.264 encoding.
+
+For example, the following command writes a ten-second video to a file named `test.h264`:
+
+[source,console]
+----
+$ rpicam-vid -t 10s -o test.h264
+----
+
+You can play the resulting file with ffplay and other video players:
+
+[source,console]
+----
+$ ffplay test.h264
+----
+
+[WARNING]
+====
+Older versions of vlc were able to play H.264 files correctly, but recent versions do not - displaying only a few, or possibly garbled, frames. You should either use a different media player, or save your files in a more widely supported container format - such as MP4 (see below).
+====
+
+On Raspberry Pi 5, you can output to the MP4 container format directly by specifying the `mp4` file extension for your output file:
+
+[source,console]
+----
+$ rpicam-vid -t 10s -o test.mp4
+----
+
+On Raspberry Pi 4, or earlier devices, you can save MP4 files using:
+
+[source,console]
+----
+$ rpicam-vid -t 10s --codec libav -o test.mp4
+----
+
+==== Encoders
+
+`rpicam-vid` supports motion JPEG as well as both uncompressed and unformatted YUV420:
+
+[source,console]
+----
+$ rpicam-vid -t 10000 --codec mjpeg -o test.mjpeg
+----
+
+[source,console]
+----
+$ rpicam-vid -t 10000 --codec yuv420 -o test.data
+----
+
+The xref:camera_software.adoc#codec[`codec`] option determines the output format, not the extension of the output file.
+
+The xref:camera_software.adoc#segment[`segment`] option breaks output files up into chunks of the segment size (given in milliseconds). This is handy for breaking a motion JPEG stream up into individual JPEG files by specifying very short (1 millisecond) segments. For example, the following command combines segments of 1 millisecond with a counter in the output file name to generate a new filename for each segment:
+
+[source,console]
+----
+$ rpicam-vid -t 10000 --codec mjpeg --segment 1 -o test%05d.jpeg
+----
+
+==== Capture high framerate video
+
+To minimise frame drops for high framerate (> 60fps) video, try the following configuration tweaks:
+
+* Set the https://en.wikipedia.org/wiki/Advanced_Video_Coding#Levels[H.264 target level] to 4.2 with `--level 4.2`.
+* Disable software colour denoise processing by setting the xref:camera_software.adoc#denoise[`denoise`] option to `cdn_off`.
+* Disable the display window with xref:camera_software.adoc#nopreview[`nopreview`] to free up some additional CPU cycles.
+* Set `force_turbo=1` in xref:../computers/config_txt.adoc#what-is-config-txt[`/boot/firmware/config.txt`] to ensure that the CPU clock does not throttle during video capture. For more information, see xref:config_txt.adoc#force_turbo[the `force_turbo` documentation].
+* Adjust the ISP output resolution with `--width 1280 --height 720` or something even lower to achieve your framerate target.
+* On Raspberry Pi 4, you can overclock the GPU to improve performance by adding `gpu_freq=550` or higher in `/boot/firmware/config.txt`. See xref:config_txt.adoc#overclocking[the overclocking documentation] for further details.
+
+The following command demonstrates how you might achieve 1280×720 120fps video:
+
+[source,console]
+----
+$ rpicam-vid --level 4.2 --framerate 120 --width 1280 --height 720 --save-pts timestamp.pts -o video.264 -t 10000 --denoise cdn_off -n
+----
+
+==== `libav` integration with `rpicam-vid`
+
+`rpicam-vid` can use the `ffmpeg`/`libav` codec backend to encode audio and video streams. You can either save these streams to a file or stream them over the network. `libav` uses hardware H.264 video encoding when present.
+
+To enable the `libav` backend, pass `libav` to the xref:camera_software.adoc#codec[`codec`] option:
+
+[source,console]
+----
+$ rpicam-vid --codec libav --libav-format avi --libav-audio --output example.avi
+----
+
+==== Low latency video with the Pi 5
+
+Pi 5 uses software video encoders. These generally output frames with a longer latency than the old hardware encoders, and this can sometimes be an issue for real-time streaming applications.
+
+In this case, please add the option `--low-latency` to the `rpicam-vid` command. This will alter certain encoder options to output the encoded frame more quickly.
+
+The downside is that coding efficiency is (slightly) less good, and that the processor's multiple cores may be used (slightly) less efficiently. The maximum framerate that can be encoded may be slightly reduced (though it will still easily achieve 1080p30).
diff --git a/documentation/asciidoc/computers/camera/streaming.adoc b/documentation/asciidoc/computers/camera/streaming.adoc
new file mode 100644
index 000000000..ffcf9a656
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/streaming.adoc
@@ -0,0 +1,206 @@
+== Stream video over a network with `rpicam-apps`
+
+This section describes how to stream video over a network using `rpicam-vid`. Whilst it's possible to stream very simple formats without using `libav`, for most applications we recommend using the xref:camera_software.adoc#libav-integration-with-rpicam-vid[`libav` backend].
+
+=== UDP
+
+To stream video over UDP using a Raspberry Pi as a server, use the following command, replacing the `` placeholder with the IP address of the client or multicast address and replacing the `` placeholder with the port you would like to use for streaming:
+
+[source,console]
+----
+$ rpicam-vid -t 0 -n --inline -o udp://:
+----
+
+To view video streamed over UDP using a Raspberry Pi as a client, use the following command, replacing the `` placeholder with the port you would like to stream from:
+
+[source,console]
+----
+$ ffplay udp://@: -fflags nobuffer -flags low_delay -framedrop
+----
+As noted previously, `vlc` no longer handles unencapsulated H.264 streams.
+
+In fact, support for unencapsulated H.264 can generally be quite poor so it is often better to send an MPEG-2 Transport Stream instead. Making use of `libav`, this can be accomplished with:
+
+[source,console]
+----
+$ rpicam-vid -t 0 -n --codec libav --libav-format mpegts -o udp://:
+----
+
+In this case, we can also play the stream successfully with `vlc`:
+
+[source,console]
+----
+$ vlc udp://@:
+----
+
+=== TCP
+
+You can also stream video over TCP. As before, we can send an unencapsulated H.264 stream over the network. To use a Raspberry Pi as a server:
+
+[source,console]
+----
+$ rpicam-vid -t 0 -n --inline --listen -o tcp://0.0.0.0:
+----
+
+To view video streamed over TCP using a Raspberry Pi as a client, assuming the server is running at 30 frames per second, use the following command:
+
+[source,console]
+----
+$ ffplay tcp://: -vf "setpts=N/30" -fflags nobuffer -flags low_delay -framedrop
+----
+
+But as with the UDP examples, it is often preferable to send an MPEG-2 Transport Stream as this is generally better supported. To do this, use:
+
+[source,console]
+----
+$ rpicam-vid -t 0 -n --codec libav --libav-format mpegts -o tcp://0.0.0.0:?listen=1
+----
+
+We can now play this back using a variety of media players, including `vlc`:
+
+[source,console]
+----
+$ vlc tcp://:
+----
+
+=== RTSP
+
+We can use VLC as an RTSP server, however, we must send it an MPEG-2 Transport Stream as it no longer understands unencapsulated H.264:
+
+[source,console]
+----
+$ rpicam-vid -t 0 -n --codec libav --libav-format mpegts -o - | cvlc stream:///dev/stdin --sout '#rtp{sdp=rtsp://:8554/stream1}'
+----
+
+To view video streamed over RTSP using a Raspberry Pi as a client, use the following command:
+
+[source,console]
+----
+$ ffplay rtsp://:8554/stream1 -fflags nobuffer -flags low_delay -framedrop
+----
+
+Alternatively, use the following command on a client to stream using VLC:
+
+[source,console]
+----
+$ vlc rtsp://:8554/stream1
+----
+
+If you want to see a preview window on the server, just drop the `-n` option (see xref:camera_software.adoc#nopreview[`nopreview`]).
+
+=== `libav` and Audio
+
+We have already been using `libav` as the backend for network streaming. `libav` allows us to add an audio stream, so long as we're using a format - like the MPEG-2 Transport Stream - that permits audio data.
+
+We can take one of our previous commands, like the one for streaming an MPEG-2 Transport Stream over TCP, and simply add the `--libav-audio` option:
+
+[source,console]
+----
+$ rpicam-vid -t 0 --codec libav --libav-format mpegts --libav-audio -o "tcp://:?listen=1"
+----
+
+You can stream over UDP with a similar command:
+
+[source,console]
+----
+$ rpicam-vid -t 0 --codec libav --libav-format mpegts --libav-audio -o "udp://:"
+----
+
+=== GStreamer
+
+https://gstreamer.freedesktop.org/[GStreamer] is a Linux framework for reading, processing and playing multimedia files. We can also use it in conjunction with `rpicam-vid` for network streaming.
+
+This setup uses `rpicam-vid` to output an H.264 bitstream to stdout, though as we've done previously, we're going to encapsulate it in an MPEG-2 Transport Stream for better downstream compatibility.
+
+Then, we use the GStreamer `fdsrc` element to receive the bitstream, and extra GStreamer elements to send it over the network. On the server, run the following command to start the stream, replacing the `` placeholder with the IP address of the client or multicast address and replacing the `` placeholder with the port you would like to use for streaming:
+
+[source,console]
+----
+$ rpicam-vid -t 0 -n --codec libav --libav-format mpegts -o - | gst-launch-1.0 fdsrc fd=0 ! udpsink host= port=
+----
+
+We could of course use anything (such as vlc) as the client, and the best GStreamer clients for playback are beyond the scope of this document. However, we note that the following pipeline (with the obvious substitutions) would work on a Pi 4 or earlier device:
+
+[source,console]
+----
+$ gst-launch-1.0 udpsrc address= port= ! tsparse ! tsdemux ! h264parse ! queue ! v4l2h264dec ! autovideosink
+----
+
+For a Pi 5, replace `v4l2h264dec` by `avdec_h264`.
+
+TIP: To test this configuration, run the server and client commands in separate terminals on the same device, using `localhost` as the address.
+
+==== `libcamerasrc` GStreamer element
+
+`libcamera` provides a `libcamerasrc` GStreamer element which can be used directly instead of `rpicam-vid`. To use this element, run the following command on the server, replacing the `` placeholder with the IP address of the client or multicast address and replacing the `` placeholder with the port you would like to use for streaming. On a Pi 4 or earlier device, use:
+
+[source,console]
+----
+$ gst-launch-1.0 libcamerasrc ! capsfilter caps=video/x-raw,width=640,height=360,format=NV12,interlace-mode=progressive ! v4l2h264enc extra-controls="controls,repeat_sequence_header=1" ! 'video/x-h264,level=(string)4' ! h264parse ! mpegtsmux ! udpsink host= port=
+----
+On a Pi 5 you would have to replace `v4l2h264enc extra-controls="controls,repeat_sequence_header=1"` by `x264enc speed-preset=1 threads=1`.
+
+On the client we could use the same playback pipeline as we did just above, or other streaming media players.
+
+=== WebRTC
+
+Streaming over WebRTC (for example, to web browsers) is best accomplished using third party software. https://github.com/bluenviron/mediamtx[MediaMTX], for example, includes native Raspberry Pi camera support which makes it easy to use.
+
+To install it, download the latest version from the https://github.com/bluenviron/mediamtx/releases[releases] page. Raspberry Pi OS 64-bit users will want the "linux_arm64v8" compressed tar file (ending `.tar.gz`). Unpack it and you will get a `mediamtx` executable and a configuration file called `mediamtx.yml`.
+
+It's worth backing up the `mediamtx.yml` file because it documents many Raspberry Pi camera options that you may want to investigate later.
+
+To stream the camera, replace the contents of `mediamtx.yml` by:
+----
+paths:
+ cam:
+ source: rpiCamera
+----
+and start the `mediamtx` executable. On a browser, enter `http://:8889/cam` into the address bar.
+
+If you want MediaMTX to acquire the camera only when the stream is requested, add the following line to the previous `mediamtx.yml`:
+----
+ sourceOnDemand: yes
+----
+Consult the original `mediamtx.yml` for additional configuration parameters that let you select the image size, the camera mode, the bitrate and so on - just search for `rpi`.
+
+==== Customised image streams with WebRTC
+
+MediaMTX is great if you want to stream just the camera images. But what if we want to add some extra information or overlay, or do some extra processing on the images?
+
+Before starting, ensure that you've built a version of `rpicam-apps` that includes OpenCV support. Check it by running
+
+[source,console]
+----
+$ rpicam-hello --post-process-file rpicam-apps/assets/annotate_cv.json
+----
+and looking for the overlaid text information at the top of the image.
+
+Next, paste the following into your `mediamtx.yml` file:
+----
+paths:
+ cam:
+ source: udp://127.0.0.1:1234
+----
+
+Now, start `mediamtx` and then, if you're using a Pi 5, in a new terminal window, enter:
+
+[source,console]
+----
+$ rpicam-vid -t 0 -n --codec libav --libav-video-codec-opts "profile=baseline" --libav-format mpegts -o udp://127.0.0.1:1234?pkt_size=1316 --post-process-file rpicam-apps/assets/annotate_cv.json
+----
+(On a Pi 4 or earlier device, leave out the `--libav-video-codec-opts "profile=baseline"` part of the command.)
+
+On another computer, you can now visit the same address as before, namely `http://:8889/cam`.
+
+The reason for specifying "baseline" profile on a Pi 5 is that MediaMTX doesn't support B frames, so we need to stop the encoder from producing them. On earlier devices, with hardware encoders, B frames are never generated so there is no issue. On a Pi 5 you could alternatively remove this option and replace it with `--low-latency` which will also prevent B frames, and produce a (slightly less well compressed) stream with reduced latency.
+
+[NOTE]
+====
+If you notice occasional pauses in the video stream, this may be because the UDP receive buffers on the Pi (passing data from `rpicam-vid` to MediaMTX) are too small. To increase them permantently, add
+----
+net.core.rmem_default=1000000
+net.core.rmem_max=1000000
+----
+to your `/etc/sysctl.conf` file (and reboot or run `sudo sysctl -p`).
+====
\ No newline at end of file
diff --git a/documentation/asciidoc/computers/camera/timelapse.adoc b/documentation/asciidoc/computers/camera/timelapse.adoc
deleted file mode 100644
index fc7060a79..000000000
--- a/documentation/asciidoc/computers/camera/timelapse.adoc
+++ /dev/null
@@ -1,78 +0,0 @@
-== Application Notes
-
-=== Creating Timelapse Video
-
-To create a time-lapse video, you simply configure the Raspberry Pi to take a picture at a regular interval, such as once a minute, then use an application to stitch the pictures together into a video. There are a couple of ways of doing this.
-
-==== Using `libcamera-still` or `raspistill` Timelapse Mode
-
-Both `libcamera-still` and `raspistill` have a built in time-lapse mode, using the `--timelapse` command line switch. The value that follows the switch is the time between shots in milliseconds:
-
-----
-libcamera-still -t 30000 --timelapse 2000 -o image%04d.jpg
-----
-
-or
-
-----
-raspistill -t 30000 --timelapse 2000 -o image%04d.jpg
-----
-
-[NOTE]
-====
-The `%04d` in the output filename: this indicates the point in the filename where you want a frame count number to appear. So, for example, the command above will produce a capture every two seconds (2000ms), over a total period of 30 seconds (30000ms), named image0001.jpg, image0002.jpg, and so on, through to image0015.jpg.
-
-The `%04d` indicates a four-digit number, with leading zeros added to make up the required number of digits. So, for example, `%08d` would result in an eight-digit number. You can miss out the `0` if you don't want leading zeros.
-
-If a timelapse value of 0 is entered, the application will take pictures as fast as possible. Note that there's an minimum enforced pause of approximately 30 milliseconds between captures to ensure that exposure calculations can be made.
-====
-
-==== Automating using `cron` Jobs
-
-A good way to automate taking a picture at a regular interval is using `cron`. Open the cron table for editing:
-
-----
-crontab -e
-----
-
-This will either ask which editor you would like to use, or open in your default editor. Once you have the file open in an editor, add the following line to schedule taking a picture every minute (referring to the Bash script from the xref:camera_software.adoc#raspistill[raspistill page], though you can use `libcamera-still` in exactly the same way):
-
-----
-* * * * * /home/pi/camera.sh 2>&1
-----
-
-Save and exit and you should see the message:
-
-----
-crontab: installing new crontab
-----
-
-Make sure that you use e.g. `%04d` to ensure that each image is written to a new file: if you don't, then each new image will overwrite the previous file.
-
-==== Stitching Images Together
-
-Now you'll need to stitch the photos together into a video. You can do this on the Raspberry Pi using `ffmpeg` but the processing will be slow. You may prefer to transfer the image files to your desktop computer or laptop and produce the video there.
-
-First you will need to install `ffmpeg` if it's not already installed.
-
-----
-sudo apt install ffmpeg
-----
-
-Now you can use the `ffmpeg` tool to convert your JPEG files into an mp4 video:
-
-----
-ffmpeg -r 10 -f image2 -pattern_type glob -i 'image*.jpg' -s 1280x720 -vcodec libx264 timelapse.mp4
-----
-
-On a Raspberry Pi 3, this can encode a little more than two frames per second. The performance of other Raspberry Pi models will vary. The parameters used are:
-
-* `-r 10` Set frame rate (Hz value) to ten frames per second in the output video.
-* `-f image2` Set ffmpeg to read from a list of image files specified by a pattern.
-* `-pattern_type glob` When importing the image files, use wildcard patterns (globbing) to interpret the filename input by `-i`, in this case `image*.jpg`, where `*` would be the image number.
-* `-i 'image*.jpg'` The input file specification (to match the files produced during the capture).
-* `-s 1280x720` Scale to 720p. You can also use 1920x1080, or lower resolutions, depending on your requirements.
-* `-vcodec libx264` Use the software x264 encoder.
-* `timelapse.mp4` The name of the output video file.
-
-`ffmpeg` has a comprehensive parameter set for varying encoding options and other settings. These can be listed using `ffmpeg --help`.
diff --git a/documentation/asciidoc/computers/camera/troubleshooting.adoc b/documentation/asciidoc/computers/camera/troubleshooting.adoc
new file mode 100644
index 000000000..4c94ce12f
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/troubleshooting.adoc
@@ -0,0 +1,16 @@
+== Troubleshooting
+
+If your Camera Module doesn't work like you expect, try some of the following fixes:
+
+* On Raspberry Pi 3 and earlier devices running Raspberry Pi OS _Bullseye_ or earlier:
+** To enable hardware-accelerated camera previews, enable *Glamor*. To enable Glamor, enter `sudo raspi-config` in a terminal, select `Advanced Options` > `Glamor` > `Yes`. Then reboot your Raspberry Pi with `sudo reboot`.
+** If you see an error related to the display driver, add `dtoverlay=vc4-fkms-v3d` or `dtoverlay=vc4-kms-v3d` to `/boot/config.txt`. Then reboot your Raspberry Pi with `sudo reboot`.
+* On Raspberry Pi 3 and earlier, the graphics hardware can only support images up to 2048×2048 pixels, which places a limit on the camera images that can be resized into the preview window. As a result, video encoding of images larger than 2048 pixels wide produces corrupted or missing preview images.
+* On Raspberry Pi 4, the graphics hardware can only support images up to 4096×4096 pixels, which places a limit on the camera images that can be resized into the preview window. As a result, video encoding of images larger than 4096 pixels wide produces corrupted or missing preview images.
+* The preview window may show display tearing in a desktop environment. This is a known, unfixable issue.
+* Check that the FFC (Flat Flexible Cable) is firmly seated, fully inserted, and that the contacts face the correct direction. The FFC should be evenly inserted, not angled.
+* If you use a connector between the camera and your Raspberry Pi, check that the ports on the connector are firmly seated, fully inserted, and that the contacts face the correct direction.
+* Check to make sure that the FFC (Flat Flexible Cable) is attached to the CSI (Camera Serial Interface), _not_ the DSI (Display Serial Interface). The connector fits into either port, but only the CSI port powers and controls the camera. Look for the `CSI` label printed on the board near the port.
+* xref:os.adoc#update-software[Update to the latest software.]
+* Try a different power supply. The Camera Module adds about 200-250mA to the power requirements of your Raspberry Pi. If your power supply is low quality, your Raspberry Pi may not be able to power the Camera module.
+* If you've checked all the above issues and your Camera Module still doesn't work like you expect, try posting on our forums for more help.
diff --git a/documentation/asciidoc/computers/camera/v4l2.adoc b/documentation/asciidoc/computers/camera/v4l2.adoc
index 8238cf11f..7cc2ceabc 100644
--- a/documentation/asciidoc/computers/camera/v4l2.adoc
+++ b/documentation/asciidoc/computers/camera/v4l2.adoc
@@ -1,44 +1,44 @@
-== V4L2 Drivers
+== V4L2 drivers
-V4L2 drivers provide a standard Linux interface for accessing camera and codec features. They are loaded automatically when the system is started, though in some non-standard situations you may need to xref:camera_software.adoc#if-you-do-need-to-alter-the-configuration[load camera drivers explicitly].
+V4L2 drivers provide a standard Linux interface for accessing camera and codec features. Normally, Linux loads drivers automatically during boot. But in some situations you may need to xref:camera_software.adoc#configuration[load camera drivers explicitly].
-=== Driver differences when using `libcamera` or the legacy stack
-
-On systems using `libcamera`, `/dev/video0` and `/dev/video1` are V4L2 drivers for Unicam, the Raspberry Pi's CSI-2 receiver. The Raspberry Pi has two CSI-2 receivers, each managed by one of these device nodes.
-
-On systems using the legacy stack, `/dev/video0` is a V4L2 driver that gives access to the full camera system using the proprietary Broadcom driver on the GPU. There is no `/dev/video1`. There are no Unicam drivers, though there is a legacy _MMAL Rawcam_ component.
-
-The other device nodes are always the same, and are listed in the table below.
+=== Device nodes when using `libcamera`
[cols="1,^3"]
|===
-| /dev/videoX | Default Action
+| /dev/videoX | Default action
+
+| `video0`
+| Unicam driver for the first CSI-2 receiver
+
+| `video1`
+| Unicam driver for the second CSI-2 receiver
-| video10
-| Video decode.
+| `video10`
+| Video decode
-| video11
-| Video encode.
+| `video11`
+| Video encode
-| video12
-| Simple ISP. Can perform conversion and resizing between RGB/YUV formats, and also Bayer to RGB/YUV conversion.
+| `video12`
+| Simple ISP, can perform conversion and resizing between RGB/YUV formats in addition to Bayer to RGB/YUV conversion
-| video13
-| Input to fully programmable ISP.
+| `video13`
+| Input to fully programmable ISP
-| video14
-| High resolution output from fully programmable ISP.
+| `video14`
+| High resolution output from fully programmable ISP
-| video15
-| Low result output from fully programmable ISP.
+| `video15`
+| Low result output from fully programmable ISP
-| video16
-| Image statistics from fully programmable ISP.
+| `video16`
+| Image statistics from fully programmable ISP
-| video19
-| HEVC Decode
+| `video19`
+| HEVC decode
|===
-=== Using the Driver
+=== Use the V4L2 drivers
-Please see the https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/v4l2.html[V4L2 documentation] for details on using this driver.
+For more information on how to use the V4L2 drivers, see the https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/v4l2.html[V4L2 documentation].
diff --git a/documentation/asciidoc/computers/camera/webcams.adoc b/documentation/asciidoc/computers/camera/webcams.adoc
new file mode 100644
index 000000000..dbfe0c8e4
--- /dev/null
+++ b/documentation/asciidoc/computers/camera/webcams.adoc
@@ -0,0 +1,169 @@
+== Use a USB webcam
+
+Most Raspberry Pi devices have dedicated ports for camera modules. Camera modules are high-quality, highly-configurable cameras popular with Raspberry Pi users.
+
+However, for many purposes a USB webcam has everything you need to record pictures and videos from your Raspberry Pi. This section explains how to use a USB webcam with your Raspberry Pi.
+
+=== Install dependencies
+
+First, install the `fswebcam` package:
+
+[source,console]
+----
+$ sudo apt install fswebcam
+----
+
+Next, add your username to the `video` group, otherwise you may see 'permission denied' errors:
+
+[source,console]
+----
+$ sudo usermod -a -G video
+----
+
+To check that the user has been added to the group correctly, use the `groups` command.
+
+=== Take a photo
+
+Run the following command to take a picture using the webcam and save the image to a filename named `image.jpg`:
+
+[source,console]
+----
+$ fswebcam image.jpg
+----
+
+You should see output similar to the following:
+
+----
+--- Opening /dev/video0...
+Trying source module v4l2...
+/dev/video0 opened.
+No input was specified, using the first.
+Adjusting resolution from 384x288 to 352x288.
+--- Capturing frame...
+Corrupt JPEG data: 2 extraneous bytes before marker 0xd4
+Captured frame in 0.00 seconds.
+--- Processing captured image...
+Writing JPEG image to 'image.jpg'.
+----
+
+.By default, `fswebcam` uses a low resolution and adds a banner displaying a timestamp.
+image::images/webcam-image.jpg[By default, `fswebcam` uses a low resolution and adds a banner displaying a timestamp]
+
+To specify a different resolution for the captured image, use the `-r` flag, passing a width and height as two numbers separated by an `x`:
+
+[source,console]
+----
+$ fswebcam -r 1280x720 image2.jpg
+----
+
+You should see output similar to the following:
+
+----
+--- Opening /dev/video0...
+Trying source module v4l2...
+/dev/video0 opened.
+No input was specified, using the first.
+--- Capturing frame...
+Corrupt JPEG data: 1 extraneous bytes before marker 0xd5
+Captured frame in 0.00 seconds.
+--- Processing captured image...
+Writing JPEG image to 'image2.jpg'.
+----
+
+.Specify a resolution to capture a higher quality image.
+image::images/webcam-image-high-resolution.jpg[Specify a resolution to capture a higher quality image]
+
+==== Remove the banner
+
+To remove the banner from the captured image, use the `--no-banner` flag:
+
+[source,console]
+----
+$ fswebcam --no-banner image3.jpg
+----
+
+You should see output similar to the following:
+
+----
+--- Opening /dev/video0...
+Trying source module v4l2...
+/dev/video0 opened.
+No input was specified, using the first.
+--- Capturing frame...
+Corrupt JPEG data: 2 extraneous bytes before marker 0xd6
+Captured frame in 0.00 seconds.
+--- Processing captured image...
+Disabling banner.
+Writing JPEG image to 'image3.jpg'.
+----
+
+.Specify `--no-banner` to save the image without the timestamp banner.
+image::images/webcam-image-no-banner.jpg[Specify `--no-banner` to save the image without the timestamp banner]
+
+=== Automate image capture
+
+Unlike xref:camera_software.adoc#rpicam-apps[`rpicam-apps`], `fswebcam` doesn't have any built-in functionality to substitute timestamps and numbers in output image names. This can be useful when capturing multiple images, since manually editing the file name every time you record an image can be tedious. Instead, use a Bash script to implement this functionality yourself.
+
+Create a new file named `webcam.sh` in your home folder. Add the following example code, which uses the `bash` programming language to save images to files with a file name containing the year, month, day, hour, minute, and second:
+
+[,bash]
+----
+#!/bin/bash
+
+DATE=$(date +"%Y-%m-%d_%H-%M-%S")
+
+fswebcam -r 1280x720 --no-banner $DATE.jpg
+----
+
+Then, make the bash script executable by running the following command:
+
+[source,console]
+----
+$ chmod +x webcam.sh
+----
+
+Run the script with the following command to capture an image and save it to a file with a timestamp for a name, similar to `2024-05-10_12-06-33.jpg`:
+
+[source,console]
+----
+$ ./webcam.sh
+----
+
+You should see output similar to the following:
+
+----
+--- Opening /dev/video0...
+Trying source module v4l2...
+/dev/video0 opened.
+No input was specified, using the first.
+--- Capturing frame...
+Corrupt JPEG data: 2 extraneous bytes before marker 0xd6
+Captured frame in 0.00 seconds.
+--- Processing captured image...
+Disabling banner.
+Writing JPEG image to '2024-05-10_12-06-33.jpg'.
+----
+
+=== Capture a time lapse
+
+Use `cron` to schedule photo capture at a given interval. With the right interval, such as once a minute, you can capture a time lapse.
+
+First, open the cron table for editing:
+
+[source,console]
+----
+$ crontab -e
+----
+
+Once you have the file open in an editor, add the following line to the schedule to take a picture every minute, replacing `` with your username:
+
+[,bash]
+----
+* * * * * /home//webcam.sh 2>&1
+----
+
+Save and exit, and you should see the following message:
+
+----
+crontab: installing new crontab
+----
diff --git a/documentation/asciidoc/computers/camera_software.adoc b/documentation/asciidoc/computers/camera_software.adoc
index ecc66032f..a234811a7 100644
--- a/documentation/asciidoc/computers/camera_software.adoc
+++ b/documentation/asciidoc/computers/camera_software.adoc
@@ -1,63 +1,61 @@
include::camera/camera_usage.adoc[]
-include::camera/libcamera_apps_intro.adoc[]
+include::camera/rpicam_apps_intro.adoc[]
-include::camera/libcamera_apps_getting_started.adoc[]
+include::camera/rpicam_hello.adoc[]
-include::camera/libcamera_hello.adoc[]
+include::camera/rpicam_jpeg.adoc[]
-include::camera/libcamera_jpeg.adoc[]
+include::camera/rpicam_still.adoc[]
-include::camera/libcamera_still.adoc[]
+include::camera/rpicam_vid.adoc[]
-include::camera/libcamera_vid.adoc[]
+include::camera/rpicam_raw.adoc[]
-include::camera/libcamera_apps_libav.adoc[]
+include::camera/rpicam_detect.adoc[]
-include::camera/libcamera_raw.adoc[]
+include::camera/rpicam_configuration.adoc[]
-include::camera/libcamera_detect.adoc[]
+include::camera/rpicam_apps_multicam.adoc[]
-include::camera/libcamera_options_common.adoc[]
+include::camera/rpicam_apps_packages.adoc[]
-include::camera/libcamera_options_still.adoc[]
+include::camera/streaming.adoc[]
-include::camera/libcamera_options_vid.adoc[]
+include::camera/rpicam_options_common.adoc[]
-include::camera/libcamera_differences.adoc[]
-
-include::camera/libcamera_apps_post_processing.adoc[]
-
-include::camera/libcamera_apps_post_processing_opencv.adoc[]
+include::camera/rpicam_options_still.adoc[]
-include::camera/libcamera_apps_post_processing_tflite.adoc[]
+include::camera/rpicam_options_vid.adoc[]
-include::camera/libcamera_apps_post_processing_writing.adoc[]
+include::camera/rpicam_options_libav.adoc[]
-include::camera/libcamera_apps_multicam.adoc[]
+include::camera/rpicam_options_detect.adoc[]
-include::camera/libcamera_apps_packages.adoc[]
+include::camera/rpicam_apps_post_processing.adoc[]
-include::camera/libcamera_apps_building.adoc[]
+include::camera/rpicam_apps_post_processing_opencv.adoc[]
-include::camera/libcamera_apps_writing.adoc[]
+include::camera/rpicam_apps_post_processing_tflite.adoc[]
-include::camera/libcamera_python.adoc[]
-
-include::camera/libcamera_3rd_party_tuning.adoc[]
+include::camera/rpicam_apps_post_processing_writing.adoc[]
-include::camera/libcamera_known_issues.adoc[]
+include::camera/rpicam_apps_building.adoc[]
-include::camera/libcamera_apps_getting_help.adoc[]
+include::camera/rpicam_apps_writing.adoc[]
-include::camera/timelapse.adoc[]
+include::camera/qt.adoc[]
-include::camera/gstreamer.adoc[]
+include::camera/libcamera_python.adoc[]
-include::camera/qt.adoc[]
+include::camera/webcams.adoc[]
include::camera/v4l2.adoc[]
include::camera/csi-2-usage.adoc[]
-include::camera/raspicam.adoc[]
+include::camera/libcamera_differences.adoc[]
+
+include::camera/troubleshooting.adoc[]
+
+include::camera/rpicam_apps_getting_help.adoc[]
diff --git a/documentation/asciidoc/computers/compute-module.adoc b/documentation/asciidoc/computers/compute-module.adoc
index 05c090516..97810c8bc 100644
--- a/documentation/asciidoc/computers/compute-module.adoc
+++ b/documentation/asciidoc/computers/compute-module.adoc
@@ -1,15 +1,13 @@
-include::compute-module/datasheet.adoc[]
-
-include::compute-module/designfiles.adoc[]
+include::compute-module/introduction.adoc[]
include::compute-module/cm-emmc-flashing.adoc[]
+include::compute-module/cm-bootloader.adoc[]
+
include::compute-module/cm-peri-sw-guide.adoc[]
include::compute-module/cmio-camera.adoc[]
include::compute-module/cmio-display.adoc[]
-
-
-
+include::compute-module/datasheet.adoc[]
diff --git a/documentation/asciidoc/computers/compute-module/cm-bootloader.adoc b/documentation/asciidoc/computers/compute-module/cm-bootloader.adoc
new file mode 100644
index 000000000..aea936e1a
--- /dev/null
+++ b/documentation/asciidoc/computers/compute-module/cm-bootloader.adoc
@@ -0,0 +1,55 @@
+== Compute Module EEPROM bootloader
+
+Since Compute Module 4, Compute Modules use an EEPROM bootloader. This bootloader lives in a small segment of on-board storage instead of the boot partition. As a result, it requires different procedures to update. Before using a Compute Module with an EEPROM bootloader in production, always follow these best practices:
+
+* Select a specific bootloader release. Verify that every Compute Module you use has that release. The version in the `usbboot` repo is always a recent stable release.
+* Configure the boot device by xref:raspberry-pi.adoc#raspberry-pi-bootloader-configuration[setting the `BOOT_ORDER` ].
+* Enable hardware write-protection on the bootloader EEPROM to ensure that the bootloader can't be modified on inaccessible products (such as remote or embedded devices).
+
+=== Flash Compute Module bootloader EEPROM
+
+To flash the bootloader EEPROM:
+
+. Set up the hardware as you would when xref:../computers/compute-module.adoc#flash-compute-module-emmc[flashing the eMMC], but ensure `EEPROM_nWP` is _not_ pulled low.
+. Run the following command to write `recovery/pieeprom.bin` to the bootloader EEPROM:
++
+[source,console]
+----
+$ ./rpiboot -d recovery
+----
+. Once complete, `EEPROM_nWP` may be pulled low again.
+
+=== Flash storage devices other than SD cards
+
+The Linux-based https://github.com/raspberrypi/usbboot/blob/master/mass-storage-gadget/README.md[`mass-storage-gadget`] supports flashing of NVMe, eMMC and USB block devices. `mass-storage-gadget` writes devices faster than the firmware-based `rpiboot` mechanism, and also provides a UART console to the device for debugging.
+
+`usbboot` also includes a number of https://github.com/raspberrypi/usbboot/blob/master/Readme.md#compute-module-4-extensions[extensions] that enable you to interact with the EEPROM bootloader on a Compute Module.
+
+=== Update the Compute Module bootloader
+
+On Compute Modules with an EEPROM bootloader, ROM never runs `recovery.bin` from SD/eMMC. These Compute Modules disable the `rpi-eeprom-update` service by default, because eMMC is not removable and an invalid `recovery.bin` file could prevent the system from booting.
+
+You can override this behaviour with `self-update` mode. In `self-update` mode, you can update the bootloader from USB MSD or network boot.
+
+WARNING: `self-update` mode does not update the bootloader atomically. If a power failure occurs during an EEPROM update, you could corrupt the EEPROM.
+
+=== Modify the bootloader configuration
+
+To modify the Compute Module EEPROM bootloader configuration:
+
+. Navigate to the `usbboot/recovery` directory.
+. If you require a specific bootloader release, replace `pieeprom.original.bin` with the equivalent from your bootloader release.
+. Edit the default `boot.conf` bootloader configuration file to define a xref:../computers/raspberry-pi.adoc#BOOT_ORDER[`BOOT_ORDER`]:
+ * For network boot, use `BOOT_ORDER=0xf2`.
+ * For SD/eMMC boot, use `BOOT_ORDER=0xf1`.
+ * For USB boot failing over to eMMC, use `BOOT_ORDER=0xf15`.
+ * For NVMe boot, use `BOOT_ORDER=0xf6`.
+. Run `./update-pieeprom.sh` to generate a new EEPROM image `pieeprom.bin` image file.
+. If you require EEPROM write-protection, add `eeprom_write_protect=1` to `/boot/firmware/config.txt`.
+ * Once enabled in software, you can lock hardware write-protection by pulling the `EEPROM_nWP` pin low.
+. Run the following command to write the updated `pieeprom.bin` image to EEPROM:
++
+[source,console]
+----
+$ ../rpiboot -d .
+----
diff --git a/documentation/asciidoc/computers/compute-module/cm-emmc-flashing.adoc b/documentation/asciidoc/computers/compute-module/cm-emmc-flashing.adoc
index e93b31503..664dd97c0 100644
--- a/documentation/asciidoc/computers/compute-module/cm-emmc-flashing.adoc
+++ b/documentation/asciidoc/computers/compute-module/cm-emmc-flashing.adoc
@@ -1,137 +1,164 @@
-== Flashing the Compute Module eMMC
+[[flash-compute-module-emmc]]
+== Flash an image to a Compute Module
-The Compute Module has an on-board eMMC device connected to the primary SD card interface. This guide explains how to write data to the eMMC storage using a Compute Module IO board.
+TIP: To flash the same image to multiple Compute Modules, use the https://github.com/raspberrypi/rpi-sb-provisioner[Raspberry Pi Secure Boot Provisioner]. To customise an OS image to flash onto those devices, use https://github.com/RPi-Distro/pi-gen[pi-gen].
-Please also read the section in the xref:compute-module.adoc#datasheets-and-schematics[Compute Module Datasheets]
+[[flashing-the-compute-module-emmc]]
-IMPORTANT: For mass provisioning of CM3, CM3+ and CM4 the https://github.com/raspberrypi/cmprovision[Raspberry Pi Compute Module Provisioning System] is recommended.
+The Compute Module has an on-board eMMC device connected to the primary SD card interface. This guide explains how to flash (write) an operating system image to the eMMC storage of a single Compute Module.
-=== Steps to Flash the eMMC
+**Lite** variants of Compute Modules do not have on-board eMMC. Instead, follow the procedure to flash a storage device for other Raspberry Pi devices at xref:../computers/getting-started.adoc#installing-the-operating-system[Install an operating system].
-To flash the Compute Module eMMC, you either need a Linux system (a Raspberry Pi is recommended, or Ubuntu on a PC) or a Windows system (Windows 10 is recommended). For BCM2837 (CM3), a bug which affected the Mac has been fixed, so this will also work.
+=== Prerequisites
-NOTE: There is a bug in the BCM2835 (CM1) bootloader which returns a slightly incorrect USB packet to the host. Most USB hosts seem to ignore this benign bug and work fine; we do, however, see some USB ports that don't work due to this bug. We don't quite understand why some ports fail, as it doesn't seem to be correlated with whether they are USB2 or USB3 (we have seen both types working), but it's likely to be specific to the host controller and driver. This bug has been fixed in BCM2837.
+To flash the Compute Module eMMC, you need the following:
-=== Setting up the CMIO board
+* Another computer, referred to in this guide as the *host device*. You can use Linux (we recommend Raspberry Pi OS or Ubuntu), Windows 11, or macOS.
+* The Compute Module IO Board xref:compute-module.adoc#io-board-compatibility[that corresponds to your Compute Module model].
+* A micro USB cable, or a USB-C cable for Compute Module models since CM5IO.
-==== Compute Module 4
+TIP: In some cases, USB hubs can prevent the host device from recognising the Compute Module. If your host device does not recognise the Compute Module, try connecting the Compute Module directly to the host device. For more diagnostic tips, see https://github.com/raspberrypi/usbboot?tab=readme-ov-file#troubleshooting[the usbboot troubleshooting guide].
-Ensure the Compute Module is fitted correctly installed on the IO board. It should lie flat on the IO board.
+=== Set up the IO Board
-* Make sure that `nRPI_BOOT` which is on J2 (`disable eMMC Boot`) on the IO board jumper is fitted
-* Use a micro USB cable to connect the micro USB slave port J11 on IO board to the host device.
-* Do not power up yet.
+To begin, physically set up your IO Board. This includes connecting the Compute Module and host device to the IO Board.
-==== Compute Module 1 and 3
+[tabs]
+======
+Compute Module 5 IO Board::
++
+To set up the Compute Module 5 IO Board:
++
+. Connect the Compute Module to the IO board. When connected, the Compute Module should lie flat.
+. Fit `nRPI_BOOT` to J2 (`disable eMMC Boot`) on the IO board jumper.
+. Connect a cable from USB-C slave port J11 on the IO board to the host device.
-Ensure the Compute Module itself is correctly installed on the IO board. It should lie parallel with the board, with the engagement clips clicked into place.
+Compute Module 4 IO Board::
++
+To set up the Compute Module 4 IO Board:
++
+. Connect the Compute Module to the IO board. When connected, the Compute Module should lie flat.
+. Fit `nRPI_BOOT` to J2 (`disable eMMC Boot`) on the IO board jumper.
+. Connect a cable from micro USB slave port J11 on the IO board to the host device.
-* Make sure that J4 (USB SLAVE BOOT ENABLE) is set to the 'EN' position.
-* Use a micro USB cable to connect the micro USB slave port J15 on IO board to the host device.
-* Do not power up yet.
+Compute Module IO Board::
++
+To set up the Compute Module IO Board:
++
+. Connect the Compute Module to the IO board. When connected, the Compute Module should lie parallel to the board, with the engagement clips firmly clicked into place.
+. Set J4 (`USB SLAVE BOOT ENABLE`) to 1-2 = (`USB BOOT ENABLED`)
+. Connect a cable from micro USB slave port J15 on the IO board to the host device.
+======
-==== For Windows Users
+=== Set up the host device
-Under Windows, an installer is available to install the required drivers and boot tool automatically. Alternatively, a user can compile and run it using Cygwin and/or install the drivers manually.
+Next, let's set up software on the host device.
-==== Windows Installer
+TIP: For a host device, we recommend a Raspberry Pi 4 or newer running 64-bit Raspberry Pi OS.
-For those who just want to enable the Compute Module eMMC as a mass storage device under Windows, the stand-alone installer is the recommended option. This installer has been tested on Windows 10 64-bit.
-
-Please ensure you are not writing to any USB devices whilst the installer is running.
-
-. Download and run the https://github.com/raspberrypi/usbboot/raw/master/win32/rpiboot_setup.exe[Windows installer] to install the drivers and boot tool.
-. Plug your host PC USB into the USB SLAVE port, making sure you have setup the board as described above.
-. Apply power to the board; Windows should now find the hardware and install the driver.
-. Once the driver installation is complete, run the `RPiBoot.exe` tool that was previously installed.
-. After a few seconds, the Compute Module eMMC will pop up under Windows as a disk (USB mass storage device).
-
-==== Building `rpiboot` on your host system.
-
-Instructions for building and running the latest release of `rpiboot` are documented in the https://github.com/raspberrypi/usbboot/blob/master/Readme.md#building[usbboot readme] on Github.
+[tabs]
+======
+Linux::
++
+To set up software on a Linux host device:
++
+. Run the following command to install `rpiboot` (or, alternatively, https://github.com/raspberrypi/usbboot[build `rpiboot` from source]):
++
+[source,console]
+----
+$ sudo apt install rpiboot
+----
+. Connect the IO Board to power.
+. Then, run `rpiboot`:
++
+[source,console]
+----
+$ sudo rpiboot
+----
+. After a few seconds, the Compute Module should appear as a mass storage device. Check the `/dev/` directory, likely `/dev/sda` or `/dev/sdb`, for the device. Alternatively, run `lsblk` and search for a device with a storage capacity that matches the capacity of your Compute Module.
+
+macOS::
++
+To set up software on a macOS host device:
++
+. First, https://github.com/raspberrypi/usbboot?tab=readme-ov-file#macos[build `rpiboot` from source].
+. Connect the IO Board to power.
+. Then, run the `rpiboot` executable with the following command:
++
+[source,console]
+----
+$ rpiboot -d mass-storage-gadget64
+----
+. When the command finishes running, you should see a message stating "The disk you inserted was not readable by this computer." Click **Ignore**. Your Compute Module should now appear as a mass storage device.
-==== Writing to the eMMC (Windows)
+Windows::
++
+To set up software on a Windows 11 host device:
++
+. Download the https://github.com/raspberrypi/usbboot/raw/master/win32/rpiboot_setup.exe[Windows installer] or https://github.com/raspberrypi/usbboot[build `rpiboot` from source].
+. Double-click on the installer to run it. This installs the drivers and boot tool. Do not close any driver installation windows which appear during the installation process.
+. Reboot
+. Connect the IO Board to power. Windows should discover the hardware and configure the required drivers.
+. On CM4 and later devices, select **Raspberry Pi - Mass Storage Gadget - 64-bit** from the start menu. After a few seconds, the Compute Module eMMC or NVMe will appear as USB mass storage devices. This also provides a debug console as a serial port gadget.
+. On CM3 and older devices, select **rpiboot**. Double-click on `RPiBoot.exe` to run it. After a few seconds, the Compute Module eMMC should appear as a USB mass storage device.
-After `rpiboot` completes, a new USB mass storage drive will appear in Windows. We recommend using https://www.raspberrypi.com/software/[Raspberry Pi Imager] to write images to the drive.
+======
-Make sure J4 (USB SLAVE BOOT ENABLE) / J2 (nRPI_BOOT) is set to the disabled position and/or nothing is plugged into the USB slave port. Power cycling the IO board should now result in the Compute Module booting from eMMC.
-==== Writing to the eMMC (Linux)
+=== Flash the eMMC
-After `rpiboot` completes, you will see a new device appear; this is commonly `/dev/sda` on a Raspberry Pi but it could be another location such as `/dev/sdb`, so check in `/dev/` or run `lsblk` before running `rpiboot` so you can see what changes.
+You can use xref:../computers/getting-started.adoc#raspberry-pi-imager[Raspberry Pi Imager] to flash an operating system image to a Compute Module.
-You now need to write a raw OS image (such as https://www.raspberrypi.com/software/operating-systems/#raspberry-pi-os-32-bit[Raspberry Pi OS]) to the device. Note the following command may take some time to complete, depending on the size of the image: (Change `/dev/sdX` to the appropriate device.)
+Alternatively, use `dd` to write a raw OS image (such as xref:../computers/os.adoc#introduction[Raspberry Pi OS]) to your Compute Module. Run the following command, replacing `/dev/sdX` with the path to the mass storage device representation of your Compute Module and `raw_os_image.img` with the path to your raw OS image:
-[,bash]
+[source,console]
----
-sudo dd if=raw_os_image_of_your_choice.img of=/dev/sdX bs=4MiB
+$ sudo dd if=raw_os_image.img of=/dev/sdX bs=4MiB
----
-Once the image has been written, unplug and re-plug the USB; you should see two partitions appear (for Raspberry Pi OS) in `/dev`. In total, you should see something similar to this:
+Once the image has been written, disconnect and reconnect the Compute Module. You should now see two partitions (for Raspberry Pi OS):
-[,bash]
+[source,console]
----
/dev/sdX <- Device
/dev/sdX1 <- First partition (FAT)
/dev/sdX2 <- Second partition (Linux filesystem)
----
-The `/dev/sdX1` and `/dev/sdX2` partitions can now be mounted normally.
-
-Make sure J4 (USB SLAVE BOOT ENABLE) / J2 (nRPI_BOOT) is set to the disabled position and/or nothing is plugged into the USB slave port. Power cycling the IO board should now result in the Compute Module booting from eMMC.
+You can mount the `/dev/sdX1` and `/dev/sdX2` partitions normally.
-[[cm4bootloader]]
-=== Compute Module 4 Bootloader
+=== Boot from eMMC
-The default bootloader configuration on CM4 is designed to support bringup and development on a https://www.raspberrypi.com/products/compute-module-4-io-board/[Compute Module 4 IO board] and the software version flashed at manufacture may be older than the latest release. For final products please consider:-
+[tabs]
+======
+Compute Module 5 IO Board::
++
+Disconnect `nRPI_BOOT` from J2 (`disable eMMC Boot`) on the IO board jumper.
-* Selecting and verifying a specific bootloader release. The version in the `usbboot` repo is always a recent stable release.
-* Configuring the boot device (e.g. network boot). See `BOOT_ORDER` section in the xref:raspberry-pi.adoc#raspberry-pi-4-bootloader-configuration[bootloader configuration] guide.
-* Enabling hardware write protection on the bootloader EEPROM to ensure that the bootloader can't be modified on remote/inaccessible products.
+Compute Module 4 IO Board::
++
+Disconnect `nRPI_BOOT` from J2 (`disable eMMC Boot`) on the IO board jumper.
-N.B. The Compute Module 4 ROM never runs `recovery.bin` from SD/EMMC and the `rpi-eeprom-update` service is not enabled by default. This is necessary because the EMMC is not removable and an invalid `recovery.bin` file would prevent the system from booting. This can be overridden and used with `self-update` mode where the bootloader can be updated from USB MSD or Network boot. However, `self-update` mode is not an atomic update and therefore not safe in the event of a power failure whilst the EEPROM was being updated.
+Compute Module IO Board::
++
+Set J4 (`USB SLAVE BOOT ENABLE`) to 2-3 (`USB BOOT DISABLED`).
+======
-==== Flashing NVMe / other storage devices.
-The new Linux-based https://github.com/raspberrypi/usbboot/blob/master/mass-storage-gadget/README.md[mass-storage gadget] supports flashing of NVMe, EMMC and USB block devices.
-This is normally faster than using the `rpiboot` firmware driver and also provides a UART console to the device for easier debug.
+==== Boot
-See also: https://github.com/raspberrypi/usbboot/blob/master/Readme.md#compute-module-4-extensions[CM4 rpiboot extensions]
+Disconnect the USB slave port. Power-cycle the IO board to boot the Compute Module from the new image you just wrote to eMMC.
-==== Modifying the bootloader configuration
+=== Known issues
-To modify the CM4 bootloader configuration:-
-
-* cd `usbboot/recovery`
-* Replace `pieeprom.original.bin` if a specific bootloader release is required.
-* Edit the default `boot.conf` bootloader configuration file. Typically, at least the BOOT_ORDER must be updated:-
- ** For network boot `BOOT_ORDER=0xf2`
- ** For SD/EMMC boot `BOOT_ORDER=0xf1`
- ** For USB boot failing over to EMMC `BOOT_ORDER=0xf15`
-* Run `./update-pieeprom.sh` to update the EEPROM image `pieeprom.bin` image file.
-* If EEPROM write protection is required then edit `config.txt` and add `eeprom_write_protect=1`. Hardware write-protection must be enabled via software and then locked by pulling the `EEPROM_nWP` pin low.
-* Run `../rpiboot -d .` to update the bootloader using the updated EEPROM image `pieeprom.bin`
-
-The pieeprom.bin file is now ready to be flashed to the Compute Module 4.
-
-==== Flashing the bootloader EEPROM - Compute Module 4
-
-To flash the bootloader EEPROM follow the same hardware setup as for flashing the EMMC but also ensure EEPROM_nWP is NOT pulled low. Once complete `EEPROM_nWP` may be pulled low again.
-
-[,bash]
-----
-# Writes recovery/pieeprom.bin to the bootloader EEPROM.
-./rpiboot -d recovery
+* A small percentage of CM3 devices may experience problems booting. We have traced these back to the method used to create the FAT32 partition; we believe the problem is due to a difference in timing between the CPU and eMMC. If you have trouble booting your CM3, create the partitions manually with the following commands:
++
+[source,console]
----
-
-=== Troubleshooting
-
-For a small percentage of Raspberry Pi Compute Module 3s, booting problems have been reported. We have traced these back to the method used to create the FAT32 partition; we believe the problem is due to a difference in timing between the BCM2835/6/7 and the newer eMMC devices. The following method of creating the partition is a reliable solution in our hands.
-
-[,bash]
-----
-sudo parted /dev/
+$ sudo parted /dev/
(parted) mkpart primary fat32 4MiB 64MiB
(parted) q
-sudo mkfs.vfat -F32 /dev/
-sudo cp -r /*
+$ sudo mkfs.vfat -F32 /dev/
+$ sudo cp -r /*
----
+
+* The CM1 bootloader returns a slightly incorrect USB packet to the host. Most USB hosts ignore it, but some USB ports don't work due to this bug. CM3 fixed this bug.
diff --git a/documentation/asciidoc/computers/compute-module/cm-peri-sw-guide.adoc b/documentation/asciidoc/computers/compute-module/cm-peri-sw-guide.adoc
index 6cd5b3d5b..cb1beac88 100644
--- a/documentation/asciidoc/computers/compute-module/cm-peri-sw-guide.adoc
+++ b/documentation/asciidoc/computers/compute-module/cm-peri-sw-guide.adoc
@@ -1,80 +1,47 @@
-== Attaching and Enabling Peripherals
+== Wire peripherals
-NOTE: Unless explicitly stated otherwise, these instructions will work identically on Compute Module 1 and Compute Module 3 and their CMIO board(s).
+This guide helps developers wire up peripherals to the Compute Module pins, and explains how to enable these peripherals in software.
-This guide is designed to help developers using the Compute Module 1 (and Compute Module 3) get to grips with how to wire up peripherals to the Compute Module pins, and how to make changes to the software to enable these peripherals to work correctly.
+Most of the pins of the SoC, including the GPIO, two CSI camera interfaces, two DSI display interfaces, and HDMI are available for wiring. You can can usually leave unused pins disconnected.
-The Compute Module 1 (CM1) and Compute Module 3 (CM3) contain the Raspberry Pi BCM2835 (or BCM2837 for CM3) system on a chip (SoC) or 'processor', memory, and eMMC. The eMMC is similar to an SD card but is soldered onto the board. Unlike SD cards, the eMMC is specifically designed to be used as a disk and has extra features that make it more reliable in this use case. Most of the pins of the SoC (GPIO, two CSI camera interfaces, two DSI display interfaces, HDMI etc) are freely available and can be wired up as the user sees fit (or, if unused, can usually be left unconnected). The Compute Module is a DDR2 SODIMM form-factor-compatible module, so any DDR2 SODIMM socket should be able to be used
+Compute Modules that come in the DDR2 SODIMM form factor are physically compatible with any DDR2 SODIMM socket. However, the pinout is **not** the same as SODIMM memory modules.
-NOTE: The pinout is NOT the same as an actual SODIMM memory module.
+To use a Compute Module, a user must design a motherboard that:
-To use the Compute Module, a user needs to design a (relatively simple) 'motherboard' which can provide power to the Compute Module (3.3V and 1.8V at minimum), and which connects the pins to the required peripherals for the user's application.
+* provides power to the Compute Module (3.3V and 1.8V at minimum)
+* connects the pins to the required peripherals for the user's application
-Raspberry Pi provides a minimal motherboard for the Compute Module (called the Compute Module IO Board, or CMIO Board) which powers the module, brings out the GPIO to pin headers, and brings the camera and display interfaces out to FFC connectors. It also provides HDMI, USB, and an 'ACT' LED, as well as the ability to program the eMMC of a module via USB from a PC or Raspberry Pi.
+This guide first explains the boot process and how Device Tree describes attached hardware.
-This guide first explains the boot process and how Device Tree is used to describe attached hardware; these are essential things to understand when designing with the Compute Module. It then provides a worked example of attaching an I2C and an SPI peripheral to a CMIO (or CMIO V3 for CM3) Board and creating the Device Tree files necessary to make both peripherals work under Linux, starting from a vanilla Raspberry Pi OS image.
+Then, we'll explain how to attach an I2C and an SPI peripheral to an IO Board. Finally, we'll create the Device Tree files necessary to use both peripherals with Raspberry Pi OS.
=== BCM283x GPIOs
-BCM283x has three banks of General-Purpose Input/Output (GPIO) pins: 28 pins on Bank 0, 18 pins on Bank 1, and 8 pins on Bank 2, making 54 pins in total. These pins can be used as true GPIO pins, i.e. software can set them as inputs or outputs, read and/or set state, and use them as interrupts. They also can be set to 'alternate functions' such as I2C, SPI, I2S, UART, SD card, and others.
+BCM283x has three banks of general-purpose input/output (GPIO) pins: 28 pins on Bank 0, 18 pins on Bank 1, and 8 pins on Bank 2, for a total of 54 pins. These pins can be used as true GPIO pins: software can set them as inputs or outputs, read and/or set state, and use them as interrupts. They also can run alternate functions such as I2C, SPI, I2S, UART, SD card, and others.
-On a Compute Module, both Bank 0 and Bank 1 are free to use. Bank 2 is used for eMMC and HDMI hot plug detect and ACT LED / USB boot control.
+You can use Bank 0 or Bank 1 on any Compute Module. Don't use Bank 2: it controls eMMC, HDMI hot plug detect, and ACT LED/USB boot control.
-It is useful on a running system to look at the state of each of the GPIO pins (what function they are set to, and the voltage level at the pin) so that you can see if the system is set up as expected. This is particularly helpful if you want to see if a Device Tree is working as expected, or to get a look at the pin states during hardware debug.
+Use `pinctrl` to check the voltage and function of the GPIO pins to see if your Device Tree is working as expected.
-Raspberry Pi provides the `raspi-gpio` package which is a tool for hacking and debugging GPIO
+=== BCM283x boot process
-NOTE: You need to run `raspi-gpio` as root.
+BCM283x devices have a VideoCore GPU and Arm CPU cores. The GPU consists of a DSP processor and hardware accelerators for imaging, video encode and decode, 3D graphics, and image compositing.
-To install `raspi-gpio`:
+In BCM283x devices, the DSP core in the GPU boots first. It handles setup before booting up the main Arm processors.
-----
-sudo apt install raspi-gpio
-----
-
-If `apt` can't find the `raspi-gpio` package, you will need to do an update first:
-
-----
-sudo apt update
-----
+Raspberry Pi BCM283x devices have a three-stage boot process:
-To get help on `raspi-gpio`, run it with the `help` argument:
-
-----
-sudo raspi-gpio help
-----
-
-For example, to see the current function and level of all GPIO pins use:
-
-----
-sudo raspi-gpio get
-----
-
-NOTE: `raspi-gpio` can be used with the `funcs` argument to get a list of all supported GPIO functions per pin. It will print out a table in CSV format. The idea is to pipe the table to a `.csv` file and then load this file using e.g. Excel:
-
-----
-sudo raspi-gpio funcs > gpio-funcs.csv
-----
-
-=== BCM283x Boot Process
-
-BCM283x devices consist of a VideoCore GPU and ARM CPU cores. The GPU is in fact a system consisting of a DSP processor and hardware accelerators for imaging, video encode and decode, 3D graphics, and image compositing.
-
-In BCM283x devices, it is the DSP core in the GPU that boots first. It is responsible for general setup and housekeeping before booting up the main ARM processor(s).
-
-The BCM283x devices as used on Raspberry Pi and Compute Module boards have a three-stage boot process:
-
-. The GPU DSP comes out of reset and executes code from a small internal ROM (the boot ROM). The sole purpose of this code is to load a second stage boot loader via one of the external interfaces. On a Raspberry Pi or Compute Module, this code first looks for a second stage boot loader on the SD card (eMMC); it expects this to be called `bootcode.bin` and to be on the first partition (which must be FAT32). If no SD card is found or `bootcode.bin` is not found, the Boot ROM sits and waits in 'USB boot' mode, waiting for a host to give it a second stage boot loader via the USB interface.
-. The second stage boot loader (`bootcode.bin` on the sdcard or `usbbootcode.bin` for usb boot) is responsible for setting up the LPDDR2 SDRAM interface and various other critical system functions and then loading and executing the main GPU firmware (called `start.elf`, again on the primary SD card partition).
-. `start.elf` takes over and is responsible for further system setup and booting up the ARM processor subsystem, and contains the firmware that runs on the various parts of the GPU. It first reads `dt-blob.bin` to determine initial GPIO pin states and GPU-specific interfaces and clocks, then parses `config.txt`. It then loads an ARM device tree file (e.g. `bcm2708-rpi-cm.dtb` for a Compute Module 1) and any device tree overlays specified in `config.txt` before starting the ARM subsystem and passing the device tree data to the booting Linux kernel.
+* The GPU DSP comes out of reset and executes code from the small internal boot ROM. This code loads a second-stage bootloader via an external interface. This code first looks for a second-stage boot loader on the boot device called `bootcode.bin` on the boot partition. If no boot device is found or `bootcode.bin` is not found, the boot ROM waits in USB boot mode for a host to provide a second-stage boot loader (`usbbootcode.bin`).
+* The second-stage boot loader is responsible for setting up the LPDDR2 SDRAM interface and other critical system functions. Once set up, the second-stage boot loader loads and executes the main GPU firmware (`start.elf`).
+* `start.elf` handles additional system setup and boots up the Arm processor subsystem. It contains the GPU firmware. The GPU firmware first reads `dt-blob.bin` to determine initial GPIO pin states and GPU-specific interfaces and clocks, then parses `config.txt`. It then loads a model-specific Arm device tree file and any Device Tree overlays specified in `config.txt` before starting the Arm subsystem and passing the Device Tree data to the booting Linux kernel.
=== Device Tree
-http://www.devicetree.org/[Device Tree] is a special way of encoding all the information about the hardware attached to a system (and consequently required drivers).
+xref:configuration.adoc#device-trees-overlays-and-parameters[Linux Device Tree for Raspberry Pi] encodes information about hardware attached to a system as well as the drivers used to communicate with that hardware.
-On a Raspberry Pi or Compute Module there are several files in the first FAT partition of the SD/eMMC that are binary 'Device Tree' files. These binary files (usually with extension `.dtb`) are compiled from human-readable text descriptions (usually files with extension `.dts`) by the Device Tree compiler.
+The boot partition contains several binary Device Tree (`.dtb`) files. The Device Tree compiler creates these binary files using human-readable Device Tree descriptions (`.dts`).
-On a standard Raspberry Pi OS image in the first (FAT) partition you will find two different types of device tree files, one is used by the GPU only and the rest are standard ARM device tree files for each of the BCM283x based Raspberry Pi products:
+The boot partition contains two different types of Device Tree files. One is used by the GPU only; the rest are standard Arm Device Tree files for each of the BCM283x-based Raspberry Pi products:
* `dt-blob.bin` (used by the GPU)
* `bcm2708-rpi-b.dtb` (Used for Raspberry Pi 1 Models A and B)
@@ -84,186 +51,185 @@ On a standard Raspberry Pi OS image in the first (FAT) partition you will find t
* `bcm2708-rpi-cm.dtb` (Used for Raspberry Pi Compute Module 1)
* `bcm2710-rpi-cm3.dtb` (Used for Raspberry Pi Compute Module 3)
-NOTE: `dt-blob.bin` by default does not exist as there is a 'default' version compiled into `start.elf`, but for Compute Module projects it will often be necessary to provide a `dt-blob.bin` (which overrides the default built-in file).
+During boot, the user can specify a specific Arm Device Tree to use via the `device_tree` parameter in `config.txt`. For example, the line `device_tree=mydt.dtb` in `config.txt` specifies an Arm Device Tree in a file named `mydt.dtb`.
+
+You can create a full Device Tree for a Compute Module product, but we recommend using **overlays** instead. Overlays add descriptions of non-board-specific hardware to the base Device Tree. This includes GPIO pins used and their function, as well as the devices attached, so that the correct drivers can be loaded. The bootloader merges overlays with the base Device Tree before passing the Device Tree to the Linux kernel. Occasionally the base Device Tree changes, usually in a way that will not break overlays.
-NOTE: `dt-blob.bin` is in compiled device tree format, but is only read by the GPU firmware to set up functions exclusive to the GPU - see below.
+Use the `dtoverlay` parameter in `config.txt` to load Device Tree overlays. Raspberry Pi OS assumes that all overlays are located in the `/overlays` directory and use the suffix `-overlay.dtb`. For example, the line `dtoverlay=myoverlay` loads the overlay `/overlays/myoverlay-overlay.dtb`.
-* A guide to xref:configuration.adoc#changing-the-default-pin-configuration[creating `dt-blob.bin`].
-* A guide to the xref:configuration.adoc#device-trees-overlays-and-parameters[Linux Device Tree for Raspberry Pi].
+To wire peripherals to a Compute Module, describe all hardware attached to the Bank 0 and Bank 1 GPIOs in an overlay. This allows you to use standard Raspberry Pi OS images, since the overlay is merged into the standard base Device Tree. Alternatively, you can define a custom Device Tree for your application, but you won't be able to use standard Raspberry Pi OS images. Instead, you must create a modified Raspberry Pi OS image that includes your custom device tree for every OS update you wish to distribute. If the base overlay changes, you might need to update your customised Device Tree.
-During boot, the user can specify a specific ARM device tree to use via the `device_tree` parameter in `config.txt`, for example adding the line `device_tree=mydt.dtb` to `config.txt` where `mydt.dtb` is the dtb file to load instead of one of the standard ARM dtb files. While a user can create a full device tree for their Compute Module product, the recommended way to add hardware is to use overlays (see next section).
+=== `dt-blob.bin`
-In addition to loading an ARM dtb, `start.elf` supports loading additional Device Tree 'overlays' via the `dtoverlay` parameter in `config.txt`, for example adding as many `dtoverlay=myoverlay` lines as required as overlays to `config.txt`, noting that overlays live in `/overlays` and are suffixed `-overlay.dtb` e.g. `/overlays/myoverlay-overlay.dtb`. Overlays are merged with the base dtb file before the data is passed to the Linux kernel when it starts.
+When `start.elf` runs, it first reads `dt-blob.bin`. This is a special form of Device Tree blob which tells the GPU how to set up the GPIO pin states.
-Overlays are used to add data to the base dtb that (nominally) describes non-board-specific hardware. This includes GPIO pins used and their function, as well as the device(s) attached, so that the correct drivers can be loaded. The convention is that on a Raspberry Pi, all hardware attached to the Bank0 GPIOs (the GPIO header) should be described using an overlay. On a Compute Module all hardware attached to the Bank0 and Bank1 GPIOs should be described in an overlay file. You don't have to follow these conventions: you can roll all the information into one single dtb file, as previously described, replacing `bcm2708-rpi-cm.dtb`. However, following the conventions means that you can use a 'standard' Raspberry Pi OS release, with its standard base dtb and all the product-specific information contained in a separate overlay. Occasionally the base dtb might change - usually in a way that will not break overlays - which is why using an overlay is suggested.
+`dt-blob.bin` contains information about GPIOs and peripherals controlled by the GPU, instead of the SoC. For example, the GPU manages Camera Modules. The GPU needs exclusive access to an I2C interface and a couple of pins to talk to a Camera Module.
-=== dt-blob.bin
+On most Raspberry Pi models, I2C0 is reserved for exclusive GPU use. `dt-blob.bin` defines the GPIO pins used for I2C0.
-When `start.elf` runs, it first reads something called `dt-blob.bin`. This is a special form of Device Tree blob which tells the GPU how to (initially) set up the GPIO pin states, and also any information about GPIOs/peripherals that are controlled (owned) by the GPU, rather than being used via Linux on the ARM. For example, the Raspberry Pi Camera peripheral is managed by the GPU, and the GPU needs exclusive access to an I2C interface to talk to it, as well as a couple of control pins. I2C0 on most Raspberry Pi Boards and Compute Modules is nominally reserved for exclusive GPU use. The information on which GPIO pins the GPU should use for I2C0, and to control the camera functions, comes from `dt-blob.bin`.
+By default, `dt-blob.bin` does not exist. Instead, `start.elf` includes a built-in version of the file. Many Compute Module projects provide a custom `dt-blob.bin` which overrides the default built-in file.
-NOTE: The `start.elf` firmware has a xref:configuration.adoc#changing-the-default-pin-configuration['built-in' default] `dt-blob.bin` which is used if no `dt-blob.bin` is found on the root of the first FAT partition. Most Compute Module projects will want to provide their own custom `dt-blob.bin`. Note that `dt-blob.bin` specifies which pin is for HDMI hot plug detect, although this should never change on Compute Module. It can also be used to set up a GPIO as a GPCLK output, and specify an ACT LED that the GPU can use while booting. Other functions may be added in future.
+`dt-blob.bin` specifies:
-https://datasheets.raspberrypi.com/cm/minimal-cm-dt-blob.dts[minimal-cm-dt-blob.dts] is an example `.dts` device tree file that sets up the HDMI hot plug detect and ACT LED and sets all other GPIOs to be inputs with default pulls.
+* the pin used for HDMI hot plug detect
+* GPIO pins used as a GPCLK output
+* an ACT LED that the GPU can use while booting
-To compile the `minimal-cm-dt-blob.dts` to `dt-blob.bin` use the Device Tree Compiler `dtc`:
+https://datasheets.raspberrypi.com/cm/minimal-cm-dt-blob.dts[`minimal-cm-dt-blob.dts`] is an example `.dts` device tree file. It sets up HDMI hot plug detection, an ACT LED, and sets all other GPIOs as inputs with default pulls.
+To compile `minimal-cm-dt-blob.dts` to `dt-blob.bin`, use the xref:configuration.adoc#device-trees-overlays-and-parameters[Device Tree compiler] `dtc`.
+To install `dtc` on a Raspberry Pi, run the following command:
+
+[source,console]
----
-dtc -I dts -O dtb -o dt-blob.bin minimal-cm-dt-blob.dts
+$ sudo apt install device-tree-compiler
----
-=== ARM Linux Device Tree
+Then, run the follow command to compile `minimal-cm-dt-blob.dts` into `dt-blob.bin`:
-After `start.elf` has read `dt-blob.bin` and set up the initial pin states and clocks, it reads xref:config_txt.adoc[`config.txt`] which contains many other options for system setup.
+[source,console]
+----
+$ dtc -I dts -O dtb -o dt-blob.bin minimal-cm-dt-blob.dts
+----
-After reading `config.txt` another device tree file specific to the board the hardware is running on is read: this is `bcm2708-rpi-cm.dtb` for a Compute Module 1, or `bcm2710-rpi-cm.dtb` for Compute Module 3. This file is a standard ARM Linux device tree file, which details how hardware is attached to the processor: what peripheral devices exist in the SoC and where, which GPIOs are used, what functions those GPIOs have, and what physical devices are connected. This file will set up the GPIOs appropriately, overwriting the pin state set up in `dt-blob.bin` if it is different. It will also try to load driver(s) for the specific device(s).
+For more information, see our xref:configuration.adoc#change-the-default-pin-configuration[guide to creating `dt-blob.bin`].
-Although the `bcm2708-rpi-cm.dtb` file can be used to load all attached devices, the recommendation for Compute Module users is to leave this file alone. Instead, use the one supplied in the standard Raspberry Pi OS software image, and add devices using a custom 'overlay' file as previously described. The `bcm2708-rpi-cm.dtb` file contains (disabled) entries for the various peripherals (I2C, SPI, I2S etc.) and no GPIO pin definitions, apart from the eMMC/SD Card peripheral which has GPIO defs and is enabled, because it is always on the same pins. The idea is that the separate overlay file will enable the required interfaces, describe the pins used, and also describe the required drivers. The `start.elf` firmware will read and merge the `bcm2708-rpi-cm.dtb` with the overlay data before giving the merged device tree to the Linux kernel as it boots up.
+=== Arm Linux Device Tree
-=== Device Tree Source and Compilation
+After `start.elf` reads `dt-blob.bin` and sets up the initial pin states and clocks, it reads xref:config_txt.adoc[`config.txt`], which contains many other options for system setup.
-The Raspberry Pi OS image provides compiled dtb files, but where are the source dts files? They live in the Raspberry Pi Linux kernel branch, on https://github.com/raspberrypi/linux[GitHub]. Look in the `arch/arm/boot/dts` folder.
+After reading `config.txt`, `start.elf` reads a model-specific Device Tree file. For instance, Compute Module 3 uses `bcm2710-rpi-cm.dtb`. This file is a standard Arm Linux Device Tree file that details hardware attached to the processor. It enumerates:
-Some default overlay dts files live in `arch/arm/boot/dts/overlays`. Corresponding overlays for standard hardware that can be attached to a *Raspberry Pi* in the Raspberry Pi OS image are on the FAT partition in the `/overlays` directory. Note that these assume certain pins on BANK0, as they are for use on a Raspberry Pi. In general, use the source of these standard overlays as a guide to creating your own, unless you are using the same GPIO pins as you would be using if the hardware was plugged into the GPIO header of a Raspberry Pi.
+* what and where peripheral devices exist
+* which GPIOs are used
+* what functions those GPIOs have
+* what physical devices are connected
-Compiling these dts files to dtb files requires an up-to-date version of the xref:configuration.adoc#device-trees-overlays-and-parameters[Device Tree compiler] `dtc`. The way to install an appropriate version on Raspberry Pi is to run:
+This file sets up the GPIOs by overwriting the pin state in `dt-blob.bin` if it is different. It will also try to load drivers for the specific devices.
-----
-sudo apt install device-tree-compiler
-----
+The model-specific Device Tree file contains disabled entries for peripherals. It contains no GPIO pin definitions other than the eMMC/SD Card peripheral which has GPIO defs and always uses the same pins.
-If you are building your own kernel then the build host also gets a version in `scripts/dtc`. You can arrange for your overlays to be built automatically by adding them to `Makefile` in `arch/arm/boot/dts/overlays`, and using the 'dtbs' make target.
+=== Device Tree source and compilation
-=== Device Tree Debugging
+The Raspberry Pi OS image provides compiled `dtb` files, but the source `dts` files live in the https://github.com/raspberrypi/linux/tree/rpi-6.6.y/arch/arm/boot/dts/broadcom[Raspberry Pi Linux kernel branch]. Look for `rpi` in the file names.
-When the Linux kernel is booted on the ARM core(s), the GPU provides it with a fully assembled device tree, assembled from the base dts and any overlays. This full tree is available via the Linux proc interface in `/proc/device-tree`, where nodes become directories and properties become files.
+Default overlay `dts` files live at https://github.com/raspberrypi/linux/tree/rpi-6.6.y/arch/arm/boot/dts/overlays[`arch/arm/boot/dts/overlays`]. These overlay files are a good starting point for creating your own overlays. To compile these `dts` files to `dtb` files, use the xref:configuration.adoc#device-trees-overlays-and-parameters[Device Tree compiler] `dtc`.
-You can use `dtc` to write this out as a human readable dts file for debugging. You can see the fully assembled device tree, which is often very useful:
+When building your own kernel, the build host requires the Device Tree compiler in `scripts/dtc`. To build your overlays automatically, add them to the `dtbs` make target in `arch/arm/boot/dts/overlays/Makefile`.
-----
-dtc -I fs -O dts -o proc-dt.dts /proc/device-tree
-----
+=== Device Tree debugging
+
+When booting the Linux kernel, the GPU provides a fully assembled Device Tree created using the base `dts` and any overlays. This full tree is available via the Linux `proc` interface in `/proc/device-tree`. Nodes become directories and properties become files.
-As previously explained in the GPIO section, it is also very useful to use `raspi-gpio` to look at the setup of the GPIO pins to check that they are as you expect:
+You can use `dtc` to write this out as a human readable `dts` file for debugging. To see the fully assembled device tree, run the following command:
+[source,console]
----
-raspi-gpio get
+$ dtc -I fs -O dts -o proc-dt.dts /proc/device-tree
----
-If something seems to be going awry, useful information can also be found by dumping the GPU log messages:
+`pinctrl` provides the status of the GPIO pins. If something seems to be going awry, try dumping the GPU log messages:
+[source,console]
----
-sudo vcdbg log msg
+$ sudo vclog --msg
----
-You can include more diagnostics in the output by adding `dtdebug=1` to `config.txt`.
-
-=== Examples
-
-NOTE: Please use the https://forums.raspberrypi.com/viewforum.php?f=107[Device Tree subforum] on the Raspberry Pi forums to ask Device Tree related questions.
+TIP: To include even more diagnostics in the output, add `dtdebug=1` to `config.txt`.
-For these simple examples I used a CMIO board with peripherals attached via jumper wires.
+Use the https://forums.raspberrypi.com/viewforum.php?f=107[Device Tree Raspberry Pi forum] to ask Device Tree-related questions or report an issue.
-For each of the examples we assume a CM1+CMIO or CM3+CMIO3 board with a clean install of the latest Raspberry Pi OS Lite version on the Compute Module.
-
-The examples here require internet connectivity, so a USB hub plus keyboard plus wireless LAN or Ethernet dongle plugged into the CMIO USB port is recommended.
+=== Examples
-Please post any issues, bugs or questions on the Raspberry Pi https://forums.raspberrypi.com/viewforum.php?f=107[Device Tree subforum].
+The following examples use an IO Board with peripherals attached via jumper wires. We assume a CM1+CMIO or CM3+CMIO3, running a clean install of Raspberry Pi OS Lite. The examples here require internet connectivity, so we recommend a USB hub, keyboard, and wireless LAN or Ethernet dongle plugged into the IO Board USB port.
-[discrete]
-=== Example 1 - attaching an I2C RTC to BANK1 pins
+==== Attach an I2C RTC to Bank 1 pins
-In this simple example we wire an NXP PCF8523 real time clock (RTC) to the CMIO board BANK1 GPIO pins: 3V3, GND, I2C1_SDA on GPIO44 and I2C1_SCL on GPIO45.
+In this example, we wire an NXP PCF8523 real time clock (RTC) to the IO Board Bank 1 GPIO pins: 3V3, GND, I2C1_SDA on GPIO44 and I2C1_SCL on GPIO45.
-Download https://datasheets.raspberrypi.com/cm/minimal-cm-dt-blob.dts[minimal-cm-dt-blob.dts] and copy it to the SD card FAT partition, located in `/boot` when the Compute Module has booted.
+Download https://datasheets.raspberrypi.com/cm/minimal-cm-dt-blob.dts[`minimal-cm-dt-blob.dts`] and copy it to the boot partition in `/boot/firmware/`.
Edit `minimal-cm-dt-blob.dts` and change the pin states of GPIO44 and 45 to be I2C1 with pull-ups:
+[source,console]
----
-sudo nano /boot/minimal-cm-dt-blob.dts
+$ sudo nano /boot/firmware/minimal-cm-dt-blob.dts
----
-Change lines:
+Replace the following lines:
+[source,kotlin]
----
pin@p44 { function = "input"; termination = "pull_down"; }; // DEFAULT STATE WAS INPUT NO PULL
pin@p45 { function = "input"; termination = "pull_down"; }; // DEFAULT STATE WAS INPUT NO PULL
----
-to:
+With the following pull-up definitions:
+[source,kotlin]
----
pin@p44 { function = "i2c1"; termination = "pull_up"; }; // SDA1
pin@p45 { function = "i2c1"; termination = "pull_up"; }; // SCL1
----
-NOTE: We could use this `dt-blob.dts` with no changes The Linux Device Tree will (re)configure these pins during Linux kernel boot when the specific drivers are loaded, so it is up to you whether you modify `dt-blob.dts`. I like to configure `dt-blob.dts` to what I expect the final GPIOs to be, as they are then set to their final state as soon as possible during the GPU boot stage, but this is not strictly necessary. You may find that in some cases you do need pins to be configured at GPU boot time, so they are in a specific state when Linux drivers are loaded. For example, a reset line may need to be held in the correct orientation.
+We could use this `dt-blob.dts` with no changes, because the Linux Device Tree re-configures these pins during Linux kernel boot when the specific drivers load. However, if you configure `dt-blob.dts`, the GPIOs reach their final state as soon as possible during the GPU boot stage. In some cases, pins must be configured at GPU boot time so they are in a specific state when Linux drivers are loaded. For example, a reset line may need to be held in the correct orientation.
-Compile `dt-blob.bin`:
+Run the following command to compile `dt-blob.bin`:
+[source,console]
----
-sudo dtc -I dts -O dtb -o /boot/dt-blob.bin /boot/minimal-cm-dt-blob.dts
+$ sudo dtc -I dts -O dtb -o /boot/firmware/dt-blob.bin /boot/firmware/minimal-cm-dt-blob.dts
----
-Grab https://datasheets.raspberrypi.com/cm/example1-overlay.dts[example1-overlay.dts] and put it in `/boot` then compile it:
+Download https://datasheets.raspberrypi.com/cm/example1-overlay.dts[`example1-overlay.dts`], copy it to the boot partition in `/boot/firmware/`, then compile it with the following command:
+[source,console]
----
-sudo dtc -@ -I dts -O dtb -o /boot/overlays/example1.dtbo /boot/example1-overlay.dts
+$ sudo dtc -@ -I dts -O dtb -o /boot/firmware/overlays/example1.dtbo /boot/firmware/example1-overlay.dts
----
-NOTE: The '-@' in the `dtc` command line. This is necessary if you are compiling dts files with external references, as overlays tend to be.
+The `-@` flag compiles `dts` files with external references. It is usually necessary.
-Edit `/boot/config.txt` and add the line:
+Add the following line to xref:../computers/config_txt.adoc#what-is-config-txt[`/boot/firmware/config.txt`]:
+[source,ini]
----
dtoverlay=example1
----
-Now save and reboot.
+Finally, reboot with `sudo reboot`.
-Once rebooted, you should see an rtc0 entry in /dev. Running:
+Once rebooted, you should see an `rtc0` entry in `/dev`. Run the following command to view the hardware clock time:
+[source,console]
----
-sudo hwclock
+$ sudo hwclock
----
-will return with the hardware clock time, and not an error.
-
-[discrete]
-=== Example 2 - Attaching an ENC28J60 SPI Ethernet Controller on BANK0
+==== Attach an ENC28J60 SPI Ethernet controller on Bank 0
-In this example we use one of the already available overlays in /boot/overlays to add an ENC28J60 SPI Ethernet controller to BANK0. The Ethernet controller is connected to SPI pins CE0, MISO, MOSI and SCLK (GPIO8-11 respectively), as well as GPIO25 for a falling edge interrupt, and of course GND and 3V3.
+In this example, we use an overlay already defined in `/boot/firmware/overlays` to add an ENC28J60 SPI Ethernet controller to Bank 0. The Ethernet controller uses SPI pins CE0, MISO, MOSI and SCLK (GPIO8-11 respectively), GPIO25 for a falling edge interrupt, in addition to GND and 3.3V.
-In this example we won't change `dt-blob.bin`, although of course you can if you wish. We should see that Linux Device Tree correctly sets up the pins.
-
-Edit `/boot/config.txt` and add the line:
+In this example, we won't change `dt-blob.bin`. Instead, add the following line to `/boot/firmware/config.txt`:
+[source,ini]
----
dtoverlay=enc28j60
----
-Now save and reboot.
+Reboot with `sudo reboot`.
-Once rebooted you should see, as before, an rtc0 entry in /dev. Running:
+If you now run `ifconfig` you should see an aditional `eth` entry for the ENC28J60 NIC. You should also have Ethernet connectivity. Run the following command to test your connectivity:
+[source,console]
----
-sudo hwclock
+$ ping 8.8.8.8
----
-will return with the hardware clock time, and not an error.
-
-You should also have Ethernet connectivity:
+Run the following command to show GPIO functions; GPIO8-11 should now provide ALT0 (SPI) functions:
+[source,console]
----
-ping 8.8.8.8
+$ pinctrl
----
-should work.
-
-finally running:
-
-----
-sudo raspi-gpio get
-----
-
-should show that GPIO8-11 have changed to ALT0 (SPI) functions.
-
diff --git a/documentation/asciidoc/computers/compute-module/cmio-camera.adoc b/documentation/asciidoc/computers/compute-module/cmio-camera.adoc
index 01274a2d7..a29dbbd82 100644
--- a/documentation/asciidoc/computers/compute-module/cmio-camera.adoc
+++ b/documentation/asciidoc/computers/compute-module/cmio-camera.adoc
@@ -1,165 +1,294 @@
-== Attaching a Raspberry Pi Camera Module
+== Attach a Camera Module
-[NOTE]
-====
-These instructions are intended for advanced users, if anything is unclear please use the https://forums.raspberrypi.com/viewforum.php?f=43[Raspberry Pi Camera forums] for technical help.
+The Compute Module has two CSI-2 camera interfaces: CAM1 and CAM0. This section explains how to connect one or two Raspberry Pi Cameras to a Compute Module using the CAM1 and CAM0 interfaces with a Compute Module I/O Board.
-Unless explicitly stated otherwise, these instructions will work identically on both the Compute Module 1 and Compute Module 3, attached to a Compute Module IO Board. Compute Module 4 is slightly different, so please refer to the appropriate section.
-====
+=== Update your system
-The Compute Module has two CSI-2 camera interfaces. CAM0 has two CSI-2 data lanes, whilst CAM1 has four data lanes. The Compute Module IO board exposes both of these interfaces. Note that the standard Raspberry Pi devices use CAM1, but only expose two data lanes.
-
-Please note that the camera modules are *not* designed to be hot pluggable. They should always be connected or disconnected with the power off.
-
-=== Updating your System
-
-The camera software is under constant development. Please ensure your system is up to date prior to using these instructions.
+Before configuring a camera, xref:../computers/raspberry-pi.adoc#update-the-bootloader-configuration[ensure that your Raspberry Pi firmware is up-to-date].:
+[source,console]
----
-sudo apt update
-sudo apt full-upgrade
+$ sudo apt update
+$ sudo apt full-upgrade
----
-=== Crypto Chip
+=== Connect one camera
+
+To connect a single camera to a Compute Module, complete the following steps:
-When using the Compute Module to drive cameras, it is NOT necessary to incorporate the crypto chip used on the Raspberry Pi--designed camera boards when attaching the OM5647, IMX219 or HQ Camera Modules directly to the Compute Module carrier board. The Raspberry Pi firmware will automatically detect the Compute Module and allow communications with the Camera Module to proceed without the crypto chip being present.
+. Disconnect the Compute Module from power.
+. Connect the Camera Module to the CAM1 port using a RPI-CAMERA board or a Raspberry Pi Zero camera cable.
++
+image::images/CMIO-Cam-Adapter.jpg[alt="Connecting the adapter board", width="60%"]
+
+. _(CM1, CM3, CM3+, and CM4S only)_: Connect the following GPIO pins with jumper cables:
+ * `0` to `CD1_SDA`
+ * `1` to `CD1_SCL`
+ * `2` to `CAM1_I01`
+ * `3` to `CAM1_I00`
++
+image::images/CMIO-Cam-GPIO.jpg[alt="GPIO connection for a single camera", width="60%"]
-=== Quickstart Guide
+. Reconnect the Compute Module to power.
-To connect a single camera:
+. Remove (or comment out with the prefix `#`) the following lines, if they exist, in `/boot/firmware/config.txt`:
++
+[source,ini]
+----
+camera_auto_detect=1
+----
++
+[source,ini]
+----
+dtparam=i2c_arm=on
+----
-. Power the Compute Module down.
-. On the Compute Module, run `sudo raspi-config` and enable the camera.
-. Connect the RPI-CAMERA board and Camera Module to the CAM1 port. As an alternative, the Raspberry Pi Zero camera cable can be used.
+. _(CM1, CM3, CM3+, and CM4S only)_: Add the following directive to `/boot/firmware/config.txt` to accommodate the swapped GPIO pin assignment on the I/O board:
+
-image::images/CMIO-Cam-Adapter.jpg[Connecting the adapter board]
+[source,ini]
+----
+dtoverlay=cm-swap-i2c0
+----
-. (CM1 & CM3 only) Connect GPIO pins together as shown below.
+. _(CM1, CM3, CM3+, and CM4S only)_: Add the following directive to `/boot/firmware/config.txt` to assign GPIO 3 as the CAM1 regulator:
+
-image::images/CMIO-Cam-GPIO.jpg[GPIO connection for a single camera]
+[source,ini]
+----
+dtparam=cam1_reg
+----
-. Power the Compute Module up and run `+sudo wget https://datasheets.raspberrypi.com/cmio/dt-blob-cam1.bin -O /boot/dt-blob.bin+`
-. Finally, reboot for the dt-blob.bin file to be read.
+. Add the appropriate directive to `/boot/firmware/config.txt` to manually configure the driver for your camera model:
++
+[%header,cols="1,1"]
+|===
+| camera model
+| directive
-To connect two cameras, follow the steps as for a single camera and then also:
+| v1 camera
+| `dtoverlay=ov5647`
-. Whilst powered down, repeat step 3 with CAM0.
-. (CM1 and CM3 only) Connect the GPIO pins for the second camera.
- image:images/CMIO-Cam-GPIO2.jpg[GPIO connection with additional camera]
-. (CM4 only) Add jumpers to J6.
-. Power up and run `+sudo wget https://datasheets.raspberrypi.com/cmio/dt-blob-dualcam.bin -O /boot/dt-blob.bin+`
-. Reboot for the dt-blob.bin file to be read.
+| v2 camera
+| `dtoverlay=imx219`
-NOTE: The default wiring uses GPIOs 2&3 to control the primary camera. These GPIOs can also be used for I2C, but doing so will result in a conflict, and the camera is unlikely to work.
-*Do not enable I2C via `dtparam=i2c_arm=on` if you wish to use the camera with the default wiring*
+| v3 camera
+| `dtoverlay=imx708`
-==== Software Support
+| HQ camera
+| `dtoverlay=imx477`
-The supplied camera applications `raspivid` and `raspistill` have the -cs (--camselect) option to specify which camera should be used.
+| GS camera
+| `dtoverlay=imx296`
+|===
-If you are writing your own camera application based on the MMAL API you can use the MMAL_PARAMETER_CAMERA_NUM parameter to set the current camera. E.g.
+. Reboot your Compute Module with `sudo reboot`.
+. Run the following command to check the list of detected cameras:
++
+[source,console]
----
-MMAL_PARAMETER_INT32_T camera_num = {{MMAL_PARAMETER_CAMERA_NUM, sizeof(camera_num)}, CAMERA_NUMBER};
-status = mmal_port_parameter_set(camera->control, &camera_num.hdr);
+$ rpicam-hello --list
----
+You should see your camera model, referred to by the driver directive in the table above, in the output.
+
+=== Connect two cameras
-=== Advanced Issues
+To connect two cameras to a Compute Module, complete the following steps:
-The Compute Module IO board has a 22-way 0.5mm FFC for each camera port, with CAM0 being a two-lane interface and CAM1 being the full four-lane interface. The standard Raspberry Pi uses a 15-way 1mm FFC cable, so you will need either an adapter (part# RPI-CAMERA) or a Raspberry Pi Zero camera cable.
+. Follow the single camera instructions above.
+. Disconnect the Compute Module from power.
+. Connect the Camera Module to the CAM0 port using a RPI-CAMERA board or a Raspberry Pi Zero camera cable.
++
+image::images/CMIO-Cam-Adapter.jpg[alt="Connect the adapter board", width="60%"]
+. _(CM1, CM3, CM3+, and CM4S only)_: Connect the following GPIO pins with jumper cables:
+ * `28` to `CD0_SDA`
+ * `29` to `CD0_SCL`
+ * `30` to `CAM0_I01`
+ * `31` to `CAM0_I00`
++
+image:images/CMIO-Cam-GPIO2.jpg[alt="GPIO connection with additional camera", width="60%"]
-The CMIO board for Compute Modules 1 & 3 differ slightly in approach to that for Compute Module 4. They will be considered separately.
+. _(CM4 and CM5)_: Connect the J6 GPIO pins with two vertical-orientation jumpers.
++
+image:images/j6_vertical.jpg[alt="Connect the J6 GPIO pins in vertical orientation", width="60%"]
-==== Compute Module 1 & 3
+. Reconnect the Compute Module to power.
-On the Compute Module IO board it is necessary to bridge the GPIOs and I2C interface required by the Raspberry Pi OS to the CAM1 connector. This is done by connecting the GPIOs from the J6 GPIO connector to the CD1_SDA/SCL and CAM1_IO0/1 pins on the J5 connector using jumper wires.
+. _(CM1, CM3, CM3+, and CM4S only)_: Add the following directive to `/boot/firmware/config.txt` to assign GPIO 31 as the CAM0 regulator:
++
+[source,ini]
+----
+dtparam=cam0_reg
+----
-NOTE: The pin numbers below are provided only as an example. LED and SHUTDOWN pins can be shared by both cameras, if required.
+. Add the appropriate directive to `/boot/firmware/config.txt` to manually configure the driver for your camera model:
++
+[%header,cols="1,1"]
+|===
+| camera model
+| directive
-The SDA and SCL pins must be either GPIOs 0 and 1, GPIOs 28 and 29, or GPIOs 44 and 45, and must be individual to each camera.
+| v1 camera
+| `dtoverlay=ov5647,cam0`
-===== Steps to attach a Raspberry Pi Camera (to CAM1)
+| v2 camera
+| `dtoverlay=imx219,cam0`
-. Attach the 0.5mm 22W FFC flexi (included with the RPI-CAMERA board) to the CAM1 connector (flex contacts face down). As an alternative, the Raspberry Pi Zero camera cable can be used.
-. Attach the RPI-CAMERA adaptor board to the other end of the 0.5mm flex (flex contacts face down).
-. Attach a Raspberry Pi Camera to the other, larger 15W 1mm FFC on the RPI-CAMERA adaptor board (*contacts on the Raspberry Pi Camera flex must face up*).
-. Attach CD1_SDA (J6 pin 37) to GPIO0 (J5 pin 1).
-. Attach CD1_SCL (J6 pin 39) to GPIO1 (J5 pin 3).
-. Attach CAM1_IO1 (J6 pin 41) to GPIO2 (J5 pin 5).
-. Attach CAM1_IO0 (J6 pin 43) to GPIO3 (J5 pin 7).
+| v3 camera
+| `dtoverlay=imx708,cam0`
-Note, the numbers in brackets are conventional, physical pin numbers, numbered from left to right, top to bottom. The numbers on the silkscreen correspond to the Broadcom SoC GPIO numbers.
+| HQ camera
+| `dtoverlay=imx477,cam0`
-===== Steps to attach a second Raspberry Pi Camera (to CAM0)
+| GS camera
+| `dtoverlay=imx296,cam0`
+|===
-Attach the second camera to the (CAM0) connector as before.
+. Reboot your Compute Module with `sudo reboot`.
-Connect up the I2C and GPIO lines.
+. Run the following command to check the list of detected cameras:
++
+[source,console]
+----
+$ rpicam-hello --list
+----
++
+You should see both camera models, referred to by the driver directives in the table above, in the output.
-. Attach CD0_SDA (J6 pin 45) to GPIO28 (J6 pin 1).
-. Attach CD0_SCL (J6 pin 47) to GPIO29 (J6 pin 3).
-. Attach CAM0_IO1 (J6 pin 49) to GPIO30 (J6 pin 5).
-. Attach CAM0_IO0 (J6 pin 51) to GPIO31 (J6 pin 7).
+=== Software
-==== Compute Module 4
+Raspberry Pi OS includes the `libcamera` library to help you take images with your Raspberry Pi.
-On the Compute Module 4 IO board the CAM1 connector is already wired to the I2C on GPIOs 44 & 45, and the shutdown line is connected to GPIO 5 on the GPIO expander. There is no LED signal wired through. No hardware changes are required to use CAM1 other than connecting the 22pin FFC to the CAM1 connector (flex contacts face down).
+==== Take a picture
-To connect a second Raspberry Pi camera (to CAM0), two jumpers must be added to J6 in a vertical orientation. The CAM0 connector shares the shutdown line with CAM1.
+Use the following command to immediately take a picture and save it to a file in PNG encoding using the `MMDDhhmmss` date format as a filename:
-==== Configuring default pin states (all CM variants)
+[source,console]
+----
+$ rpicam-still --datetime -e png
+----
-The GPIOs that we are using for the camera default to input mode on the Compute Module. To xref:configuration.adoc#changing-the-default-pin-configuration[override these default settings] and also tell the system that these are the pins to be used by the camera, we need to create a `dt-blob.bin` that is loaded by the firmware when the system boots up. This file is built from a source dts file that contains the required settings, and placed on the boot partition.
+Use the `-t` option to add a delay in milliseconds.
+Use the `--width` and `--height` options to specify a width and height for the image.
-<> are provided at the bottom of this document. These use the default wiring as described in this page.
+==== Take a video
-The `pin_config` section in the `pins_cm { }` (Compute Module 1), `pins_cm3 { }` (Compute Module 3), or `pins_cm4 { }` (Compute Module 4) section of the source dts needs the camera's LED and power enable pins set to outputs:
+Use the following command to immediately start recording a ten-second long video and save it to a file with the h264 codec named `video.h264`:
+[source,console]
----
-pin@p2 { function = "output"; termination = "no_pulling"; };
-pin@p3 { function = "output"; termination = "no_pulling"; };
+$ rpicam-vid -t 10000 -o video.h264
----
-To tell the firmware which pins to use and how many cameras to look for, add the following to the `pin_defines` section:
+==== Specify which camera to use
+
+By default, `libcamera` always uses the camera with index `0` in the `--list-cameras` list.
+To specify a camera option, get an index value for each camera from the following command:
+[source,console]
----
-pin_define@CAMERA_0_LED { type = "internal"; number = <2>; };
-pin_define@CAMERA_0_SHUTDOWN { type = "internal"; number = <3>; };
-pin_define@CAMERA_0_UNICAM_PORT { type = "internal"; number = <1>; };
-pin_define@CAMERA_0_I2C_PORT { type = "internal"; number = <0>; };
-pin_define@CAMERA_0_SDA_PIN { type = "internal"; number = <0>; };
-pin_define@CAMERA_0_SCL_PIN { type = "internal"; number = <1>; };
+$ rpicam-hello --list-cameras
+Available cameras
+-----------------
+0 : imx477 [4056x3040] (/base/soc/i2c0mux/i2c@1/imx477@1a)
+ Modes: 'SRGGB10_CSI2P' : 1332x990 [120.05 fps - (696, 528)/2664x1980 crop]
+ 'SRGGB12_CSI2P' : 2028x1080 [50.03 fps - (0, 440)/4056x2160 crop]
+ 2028x1520 [40.01 fps - (0, 0)/4056x3040 crop]
+ 4056x3040 [10.00 fps - (0, 0)/4056x3040 crop]
+
+1 : imx708 [4608x2592] (/base/soc/i2c0mux/i2c@0/imx708@1a)
+ Modes: 'SRGGB10_CSI2P' : 1536x864 [120.13 fps - (768, 432)/3072x1728 crop]
+ 2304x1296 [56.03 fps - (0, 0)/4608x2592 crop]
+ 4608x2592 [14.35 fps - (0, 0)/4608x2592 crop]
----
-Indentation and line breaks are not critical, so the example files expand these blocks out for readability.
+In the above output:
+
+* `imx477` refers to a HQ camera with an index of `0`
+* `imx708` refers to a v3 camera with an index of `1`
-The Compute Module's *pin_config* section needs the second camera's LED and power enable pins configured:
+To use the HQ camera, pass its index (`0`) to the `--camera` `libcamera` option:
+[source,console]
----
-pin@p30 { function = "output"; termination = "no_pulling"; };
-pin@p31 { function = "output"; termination = "no_pulling"; };
+$ rpicam-hello --camera 0
----
-In the Compute Module's *pin_defines* section of the dts file, change the *NUM_CAMERAS* parameter to 2 and add the following:
+To use the v3 camera, pass its index (`1`) to the `--camera` `libcamera` option:
+
+[source,console]
+----
+$ rpicam-hello --camera 1
+----
+
+
+=== I2C mapping of GPIO pins
+
+By default, the supplied camera drivers assume that CAM1 uses `i2c-10` and CAM0 uses `i2c-0`. Compute module I/O boards map the following GPIO pins to `i2c-10` and `i2c-0`:
+
+[%header,cols="1,1,1"]
+|===
+| I/O Board Model
+| `i2c-10` pins
+| `i2c-0` pins
+| CM4 I/O Board
+| GPIOs 44,45
+| GPIOs 0,1
+
+| CM1, CM3, CM3+, CM4S I/O Board
+| GPIOs 0,1
+| GPIOs 28,29
+|===
+
+To connect a camera to the CM1, CM3, CM3+ and CM4S I/O Board, add the following directive to `/boot/firmware/config.txt` to accommodate the swapped pin assignment:
+
+[source,ini]
----
-pin_define@CAMERA_1_LED { type = "internal"; number = <30>; };
-pin_define@CAMERA_1_SHUTDOWN { type = "internal"; number = <31>; };
-pin_define@CAMERA_1_UNICAM_PORT { type = "internal"; number = <0>; };
-pin_define@CAMERA_1_I2C_PORT { type = "internal"; number = <0>; };
-pin_define@CAMERA_1_SDA_PIN { type = "internal"; number = <28>; };
-pin_define@CAMERA_1_SCL_PIN { type = "internal"; number = <29>; };
+dtoverlay=cm-swap-i2c0
----
-[[sample-device-tree-source-files]]
-==== Sample device tree source files
+Alternative boards may use other pin assignments. Check the documentation for your board and use the following alternate overrides depending on your layout:
+
+[%header,cols="1,1"]
+|===
+| Swap
+| Override
+
+| Use GPIOs 0,1 for i2c0
+| `i2c0-gpio0`
+
+| Use GPIOs 28,29 for i2c0 (default)
+| `i2c0-gpio28`
+
+| Use GPIOs 44&45 for i2c0
+| `i2c0-gpio44`
+
+| Use GPIOs 0&1 for i2c10 (default)
+| `i2c10-gpio0`
+
+| Use GPIOs 28&29 for i2c10
+| `i2c10-gpio28`
+
+| Use GPIOs 44&45 for i2c10
+| `i2c10-gpio44`
+|===
+
+==== GPIO pins for shutdown
+
+For camera shutdown, Device Tree uses the pins assigned by the `cam1_reg` and `cam0_reg` overlays.
+
+The CM4 IO board provides a single GPIO pin for both aliases, so both cameras share the same regulator.
+
+The CM1, CM3, CM3+, and CM4S I/O boards provides no GPIO pin for `cam1_reg` and `cam0_reg`, so the regulators are disabled on those boards. However, you can enable them with the following directives in `/boot/firmware/config.txt`:
-https://datasheets.raspberrypi.com/cmio/dt-blob-cam1.dts[Enable CAM1 only]
+* `dtparam=cam1_reg`
+* `dtparam=cam0_reg`
-https://datasheets.raspberrypi.com/cmio/dt-blob-dualcam.dts[Enable CAM1 and CAM0]
+To assign `cam1_reg` and `cam0_reg` to a specific pin on a custom board, use the following directives in `/boot/firmware/config.txt`:
-==== Compiling a DTS file to a device tree blob
+* `dtparam=cam1_reg_gpio=`
+* `dtparam=cam0_reg_gpio=`
-Once all the required changes have been made to the `dts` file, it needs to be compiled and placed on the boot partition of the device.
+For example, to use pin 42 as the regulator for CAM1, add the directive `dtparam=cam1_reg_gpio=42` to `/boot/firmware/config.txt`.
-Instructions for doing this can be found on the xref:configuration.adoc#changing-the-default-pin-configuration[Pin Configuration] page.
+These directives only work for GPIO pins connected directly to the SoC, not for expander GPIO pins.
diff --git a/documentation/asciidoc/computers/compute-module/cmio-display.adoc b/documentation/asciidoc/computers/compute-module/cmio-display.adoc
index d99f1e253..c8f9b4c63 100644
--- a/documentation/asciidoc/computers/compute-module/cmio-display.adoc
+++ b/documentation/asciidoc/computers/compute-module/cmio-display.adoc
@@ -1,125 +1,71 @@
-== Attaching the Official 7-inch Display
+== Attaching the Touch Display LCD panel
-NOTE: These instructions are intended for advanced users, if anything is unclear please use the https://forums.raspberrypi.com/viewforum.php?f=98[Raspberry Pi Compute Module forums] for technical help.
+Update your system software and firmware to the latest version before starting. Compute Modules mostly use the same process, but sometimes physical differences force changes for a particular model.
-Please ensure your system software is updated before starting. Largely speaking the approach taken for Compute Modules 1, 3, and 4 is the same, but there are minor differences in physical setup required. It will be indicated where a step applies only to a specific platform.
+=== Connect a display to DISP1/DSI1
-WARNING: The Raspberry Pi Zero camera cable cannot be used as an alternative to the RPI-DISPLAY adaptor, because its wiring is different.
+NOTE: The Raspberry Pi Zero camera cable can't be used as an alternative to the RPI-DISPLAY adapter. The two cables have distinct wiring.
-WARNING: Please note that the display is *not* designed to be hot pluggable. It (and camera modules) should always be connected or disconnected with the power off.
+To connect a display to `DISP1/DSI1`:
-=== Quickstart Guide (Display Only)
+. Disconnect the Compute Module from power.
+. Connect the display to the `DISP1/DSI1` port on the Compute Module IO board through the 22W to 15W display adapter.
+. Complete the appropriate jumper connections:
+ - For *CM1*, *CM3*, *CM3+*, and *CM4S*, connect the following GPIO pins with jumper cables:
+ * `0` to `CD1_SDA`
+ * `1` to `CD1_SCL`
+ - For *CM5*, on the Compute Module 5 IO board, add the appropriate jumpers to J6, as indicated on the silkscreen.
+. Reconnect the Compute Module to power.
+. Add `dtoverlay=vc4-kms-dsi-7inch` to xref:../computers/config_txt.adoc#what-is-config-txt[`/boot/firmware/config.txt`].
+. Reboot your Compute Module with `sudo reboot`. Your device should detect and begin displaying output to your display.
-Connecting to DISP1
+=== Connect a display to DISP0/DSI0
-. Connect the display to the DISP1 port on the Compute Module IO board through the 22W to 15W display adaptor.
-. (CM1 and CM3 only) Connect these pins together with jumper wires:
-+
-----
- GPIO0 - CD1_SDA
- GPIO1 - CD1_SCL
-----
-
-. Power up the Compute Module and run:
-+
-`+sudo wget https://datasheets.raspberrypi.com/cmio/dt-blob-disp1-only.bin -O /boot/dt-blob.bin+`
-
-. Reboot for the `dt-blob.bin` file to be read.
-
-
-Connecting to DISP0
-
-. Connect the display to the DISP0 port on the Compute Module IO board through the 22W to 15W display adaptor.
-. (CM1 and CM3 only) Connect these pins together with jumper wires:
-+
-----
- GPIO28 - CD0_SDA
- GPIO29 - CD0_SCL
-----
+To connect a display to `DISP0/DSI0` on CM1, CM3, and CM4 IO boards:
-. Power up the Compute Module and run:
-+
-`+sudo wget https://datasheets.raspberrypi.com/cmio/dt-blob-disp0-only.bin -O /boot/dt-blob.bin+`
+. Connect the display to the `DISP0/DSI0` port on the Compute Module IO board through the 22W to 15W display adapter.
+. Complete the appropriate jumper connections:
+ - For *CM1*, *CM3*, *CM3+*, and *CM4S*, connect the following GPIO pins with jumper cables:
+ * `28` to `CD0_SDA`
+ * `29` to `CD0_SCL`
+ - For *CM4*, on the Compute Module 4 IO board, add the appropriate jumpers to J6, as indicated on the silkscreen.
+. Reconnect the Compute Module to power.
+. Add `dtoverlay=vc4-kms-dsi-7inch,dsi0` to `/boot/firmware/config.txt`.
+. Reboot your Compute Module with `sudo reboot`. Your device should detect and begin displaying output to your display.
-. Reboot for the `dt-blob.bin` file to be read.
+=== Disable touchscreen
-=== Quickstart Guide (Display and Cameras)
+The touchscreen requires no additional configuration. Connect it to your Compute Module; both the touchscreen element and display work when successfully detected.
-==== To enable the display and one camera:*
+To disable the touchscreen element, but still use the display, add the following line to `/boot/firmware/config.txt`:
-. Connect the display to the DISP1 port on the Compute Module IO board through the 22W to 15W display adaptor, called RPI-DISPLAY.
-. Connect the Camera Module to the CAM1 port on the Compute Module IO board through the 22W to 15W adaptor called RPI-CAMERA. Alternatively, the Raspberry Pi Zero camera cable can be used.
-. (CM1 and CM3 only) Connect these pins together with jumper wires:
-+
+[source,ini]
----
- GPIO0 - CD1_SDA
- GPIO1 - CD1_SCL
- GPIO2 - CAM1_IO1
- GPIO3 - CAM1_IO0
+disable_touchscreen=1
----
-+
-image:images/CMIO-Cam-Disp-GPIO.jpg[GPIO connection for a single display and Camera Modules]
- (Please note this image needs to be updated to have the extra jumper leads removed and use the standard wiring (2&3 not 4&5))
-. Power up the Compute Module and run:
-+
-`+sudo wget https://datasheets.raspberrypi.com/cmio/dt-blob-disp1-cam1.bin -O /boot/dt-blob.bin+`
+=== Disable display
-. Reboot for the `dt-blob.bin` file to be read.
+To entirely ignore the display when connected, add the following line to `/boot/firmware/config.txt`:
-==== To enable the display and both cameras:*
-
-. Follow the steps for connecting the display and one camera above.
-. Connect the Camera Module to the CAM0 port on the Compute Module IO board through the 22W to 15W adaptor called RPI-CAMERA. Alternatively, the Raspberry Pi Zero camera cable can be used.
-. (CM1 and CM3 only) Add links:
-+
+[source,ini]
----
- GPIO28 - CD0_SDA
- GPIO29 - CD0_SCL
- GPIO30 - CAM0_IO1
- GPIO31 - CAM0_IO0
+ignore_lcd=1
----
-. (CM4 only) Add jumpers to J6.
-. Power up the Compute Module and run:
-+
-`+sudo wget https://datasheets.raspberrypi.com/cmio/dt-blob-disp1-cam2.bin -O /boot/dt-blob.bin+`
-
-. Reboot for the `dt-blob.bin` file to be read.
-+
-image:images/CMIO-Cam-Disp-Example.jpg[Camera Preview on the 7 inch display]
- (Please note this image needs to be updated to show two Camera Modules and the standard wiring)
-
-=== Software Support
-
-There is no additional configuration required to enable the touchscreen. The touch interface should work out of the box once the screen is successfully detected.
-
-If you wish to disable the touchscreen element and only use the display side, you can add the command `disable_touchscreen=1` to /boot/config.txt to do so.
+== Attaching the Touch Display 2 LCD panel
-To make the firmware to ignore the display even if connected, then add `ignore_lcd=1` to /boot/config.txt.
+Touch Display 2 is an LCD display designed for Raspberry Pi devices (see https://www.raspberrypi.com/products/touch-display-2/). It's available in two sizes: 5 inches or 7 inches (diagonally). For more information about these options, see *Specifications* in xref:../accessories/touch-display-2.adoc[Touch Display 2].
-=== Firmware Configuration
-
-The firmware looks at the dt-blob.bin file for the relevant configuration to use
-for the screen. It looks at the pin_number@ defines for
-
-----
-DISPLAY_I2C_PORT
-DISPLAY_SDA
-DISPLAY_SCL
-DISPLAY_DSI_PORT
-----
+Regardless of the size that you use, Touch Display 2 connects in the same way as the original Touch Display, but the software setup on Compute Modules is slightly different because it uses a different display driver. For connection details, see *Connectors* in xref:../accessories/touch-display-2.adoc[Touch Display 2].
-The I2C port, SDA and SCL pin numbers are self explanatory. DISPLAY_DSI_PORT
-selects between DSI1 (the default) and DSI0.
+To enable Touch Display 2 on `DISP1/DSI1`, edit the `/boot/firmware/config.txt` file to add the following. You must also add jumpers to J6 as indicated on the silkscreen.
-Once all the required changes have been made to the `dts` file, it needs to be compiled and placed on the boot partition of the device.
+- For the *5-inch* display: `dtoverlay=vc4-kms-dsi-ili9881-5inch`
+- For the *7-inch* display: `dtoverlay=vc4-kms-dsi-ili9881-7inch`
-Instructions for doing this can be found on the xref:configuration.adoc#changing-the-default-pin-configuration[Pin Configuration] page.
+To use `DISP0/DSI0`, append `,dsi0` to the overlay name.
-==== Sources
+- For the *5-inch* display: `dtoverlay=vc4-kms-dsi-ili9881-5inch,dsi0`
+- For the *7-inch* display: `dtoverlay=vc4-kms-dsi-ili9881-7inch,dsi0`
-* https://datasheets.raspberrypi.com/cmio/dt-blob-disp1-only.dts[dt-blob-disp1-only.dts]
-* https://datasheets.raspberrypi.com/cmio/dt-blob-disp1-cam1.dts[dt-blob-disp1-cam1.dts]
-* https://datasheets.raspberrypi.com/cmio/dt-blob-disp1-cam2.dts[dt-blob-disp1-cam2.dts]
-* https://datasheets.raspberrypi.com/cmio/dt-blob-disp0-only.dts[dt-blob-disp0-only.dts] (Uses wiring as for CAM0)
diff --git a/documentation/asciidoc/computers/compute-module/datasheet.adoc b/documentation/asciidoc/computers/compute-module/datasheet.adoc
index f5b57b92c..11d52ccb8 100644
--- a/documentation/asciidoc/computers/compute-module/datasheet.adoc
+++ b/documentation/asciidoc/computers/compute-module/datasheet.adoc
@@ -1,43 +1,84 @@
-== Datasheets and Schematics
+== Specifications
-=== Compute Module 4
+=== Compute Module 5 datasheet
-The latest version of the Compute Module is the Compute Module 4 (CM4). It is the recommended Compute Module for all current and future development.
+To learn more about Compute Module 5 (CM5) and its corresponding IO Board, see the following documents:
-* https://datasheets.raspberrypi.com/cm4/cm4-datasheet.pdf[Compute Module 4 Datasheet]
-* https://datasheets.raspberrypi.com/cm4io/cm4io-datasheet.pdf[Compute Module 4 IO Board Datasheet]
+* https://datasheets.raspberrypi.com/cm5/cm5-datasheet.pdf[CM5 datasheet]
+* https://rpltd.co/cm5-design-files[CM5 design files]
-NOTE: Schematics are not available for the Compute Module 4, but are available for the IO board. Schematics for the CMIO4 board are included in the datasheet.
+=== Compute Module 5 IO Board datasheet
-There is also a KiCAD PCB design set available:
+Design data for the Compute Module 5 IO Board (CM5IO) can be found in its datasheet:
-* https://datasheets.raspberrypi.com/cm4io/CM4IO-KiCAD.zip[Compute Module 4 IO Board KiCAD files]
+* https://datasheets.raspberrypi.com/cm5/cm5io-datasheet.pdf[CM5IO datasheet]
+* https://rpltd.co/cm5io-design-files[CM5IO design files]
-=== Older Products
+=== Compute Module 4 datasheet
-Raspberry Pi CM1, CM3 and CM3L are supported products with an End-of-Life (EOL) date no earlier than January 2026. The Compute Module 3+ offers improved thermal performance, and a wider range of Flash memory options.
+To learn more about Compute Module 4 (CM4) and its corresponding IO Board, see the following documents:
-* https://datasheets.raspberrypi.com/cm/cm1-and-cm3-datasheet.pdf[Compute Module 1 and Compute Module 3]
+* https://datasheets.raspberrypi.com/cm4/cm4-datasheet.pdf[CM4 datasheet]
-Raspberry Pi CM3+ and CM3+ Lite are supported prodicts with an End-of-Life (EOL) date no earlier than January 2026.
+[.whitepaper, title="Configure the Compute Module 4", subtitle="", link=https://pip.raspberrypi.com/categories/685-whitepapers-app-notes/documents/RP-003470-WP/Configuring-the-Compute-Module-4.pdf]
+****
+The Compute Module 4 is available in a number of different hardware configurations. Some use cases disable certain features that aren't required.
-* https://datasheets.raspberrypi.com/cm/cm3-plus-datasheet.pdf[Compute Module 3+]
+This document describes how to disable various hardware and software interfaces.
+****
-Schematics for the Compute Module 1, 3 and 3L
+=== Compute Module 4 IO Board datasheet
-* https://datasheets.raspberrypi.com/cm/cm1-schematics.pdf[CM1 Rev 1.1]
-* https://datasheets.raspberrypi.com/cm/cm3-schematics.pdf[CM3 and CM3L Rev 1.0]
+Design data for the Compute Module 4 IO Board (CM4IO) can be found in its datasheet:
-Schematics for the Compute Module IO board (CMIO):
+* https://datasheets.raspberrypi.com/cm4io/cm4io-datasheet.pdf[CM4IO datasheet]
-* https://datasheets.raspberrypi.com/cmio/cmio-schematics.pdf[CMIO Rev 3.0] (Supports CM1, CM3, CM3L, CM3+ and CM3+L)
+We also provide a KiCad PCB design set for the CM4 IO Board:
-Schematics for the Compute Module camera/display adapter board (CMCDA):
+* https://datasheets.raspberrypi.com/cm4io/CM4IO-KiCAD.zip[CM4IO KiCad files]
-* https://datasheets.raspberrypi.com/cmcda/cmcda-schematics.pdf[CMCDA Rev 1.1]
+=== Compute Module 4S datasheet
-==== Under Voltage Detection
+Compute Module 4S (CM4S) offers the internals of CM4 in the DDR2-SODIMM form factor of CM1, CM3, and CM3+. To learn more about CM4S, see the following documents:
-Schematic for an under-voltage detection circuit, as used in older models of Raspberry Pi:
+* https://datasheets.raspberrypi.com/cm4s/cm4s-datasheet.pdf[CM4S datasheet]
+
+=== Compute Module 3+ datasheet
+
+Compute Module 3+ (CM3+) is a supported product with an end-of-life (EOL) date no earlier than January 2028. To learn more about CM3+ and its corresponding IO Board, see the following documents:
+
+* https://datasheets.raspberrypi.com/cm/cm3-plus-datasheet.pdf[CM3+ datasheet]
+
+=== Compute Module 1 and Compute Module 3 datasheet
+
+Raspberry Pi Compute Module 1 (CM1) and Compute Module 3 (CM3) are supported products with an end-of-life (EOL) date no earlier than January 2026. To learn more about CM1 and CM3, see the following documents:
+
+* https://datasheets.raspberrypi.com/cm/cm1-and-cm3-datasheet.pdf[CM1 and CM3 datasheet]
+* https://datasheets.raspberrypi.com/cm/cm1-schematics.pdf[Schematics for CM1]
+* https://datasheets.raspberrypi.com/cm/cm3-schematics.pdf[Schematics for CM3]
+
+[.whitepaper, title="Transition from Compute Module 1 or Compute Module 3 to Compute Module 4", subtitle="", link=https://pip.raspberrypi.com/categories/685-whitepapers-app-notes/documents/RP-003469-WP/Transitioning-from-CM3-to-CM4.pdf]
+****
+This white paper helps developers migrate from Compute Module 1 or Compute Module 3 to Compute Module 4.
+****
+
+=== Compute Module IO Board schematics
+
+The Compute Module IO Board (CMIO) provides a variety of interfaces for CM1, CM3, CM3+, and CM4S. The Compute Module IO Board comes in two variants: Version 1 and Version 3. Version 1 is only compatible with CM1. Version 3 is compatible with CM1, CM3, CM3+, and CM4S. Compute Module IO Board Version 3 is sometimes written as the shorthand CMIO3. To learn more about CMIO1 and CMIO3, see the following documents:
+
+* https://datasheets.raspberrypi.com/cmio/cmio-schematics.pdf[Schematics for CMIO]
+* https://datasheets.raspberrypi.com/cmio/RPi-CMIO-R1P2.zip[Design documents for CMIO Version 1.2 (CMIO/CMIO1)]
+* https://datasheets.raspberrypi.com/cmio/RPi-CMIO-R3P0.zip[Design documents for CMIO Version 3.0 (CMIO3)]
+
+=== Compute Module Camera/Display Adapter Board schematics
+
+The Compute Module Camera/Display Adapter Board (CMCDA) provides camera and display interfaces for Compute Modules. To learn more about the CMCDA, see the following documents:
+
+* https://datasheets.raspberrypi.com/cmcda/cmcda-schematics.pdf[Schematics for the CMCDA]
+* https://datasheets.raspberrypi.com/cmcda/RPi-CMCDA-1P1.zip[Design documents for CMCDA Version 1.1]
+
+=== Under-voltage detection
+
+The following schematic describes an under-voltage detection circuit, as used in older models of Raspberry Pi:
image::images/under_voltage_detect.png[Under-voltage detect]
diff --git a/documentation/asciidoc/computers/compute-module/designfiles.adoc b/documentation/asciidoc/computers/compute-module/designfiles.adoc
deleted file mode 100644
index 0392878bf..000000000
--- a/documentation/asciidoc/computers/compute-module/designfiles.adoc
+++ /dev/null
@@ -1,22 +0,0 @@
-== Design Files for CMIO Boards
-
-[discrete]
-=== Compute Module IO board for CM4
-
-Design data for the Compute Module 4 IO board can be found in its datasheet:
-
-* https://datasheets.raspberrypi.com/cm4io/cm4io-datasheet.pdf[Compute Module 4 IO Board datasheet]
-
-There is also a KiCAD PCB design set available:
-
-* https://datasheets.raspberrypi.com/cm4io/CM4IO-KiCAD.zip[Compute Module 4 IO Board KiCAD files]
-
-[discrete]
-=== Older Products
-
-* https://datasheets.raspberrypi.com/cmio/RPi-CMIO-R1P2.zip[CMIO Rev 1.2]
-* https://datasheets.raspberrypi.com/cmio/RPi-CMIO-R3P0.zip[CMIO Rev 3.0]
-
-Design data for the Compute Module camera/display adapter board (CMCDA):
-
-* https://datasheets.raspberrypi.com/cmcda/RPi-CMCDA-1P1.zip[CMCDA Rev 1.1]
diff --git a/documentation/asciidoc/computers/compute-module/images/CMIO-Cam-Disp-Example.jpg b/documentation/asciidoc/computers/compute-module/images/CMIO-Cam-Disp-Example.jpg
deleted file mode 100644
index c7c8a60c2..000000000
Binary files a/documentation/asciidoc/computers/compute-module/images/CMIO-Cam-Disp-Example.jpg and /dev/null differ
diff --git a/documentation/asciidoc/computers/compute-module/images/CMIO-Cam-Disp-GPIO.jpg b/documentation/asciidoc/computers/compute-module/images/CMIO-Cam-Disp-GPIO.jpg
deleted file mode 100644
index e5cbdd81f..000000000
Binary files a/documentation/asciidoc/computers/compute-module/images/CMIO-Cam-Disp-GPIO.jpg and /dev/null differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm1.jpg b/documentation/asciidoc/computers/compute-module/images/cm1.jpg
new file mode 100644
index 000000000..caa01fec3
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm1.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm3-plus.jpg b/documentation/asciidoc/computers/compute-module/images/cm3-plus.jpg
new file mode 100644
index 000000000..dc266211b
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm3-plus.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm3.jpg b/documentation/asciidoc/computers/compute-module/images/cm3.jpg
new file mode 100644
index 000000000..c82500604
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm3.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna-assembly.svg b/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna-assembly.svg
new file mode 100644
index 000000000..596cda012
--- /dev/null
+++ b/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna-assembly.svg
@@ -0,0 +1,297 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 5
+
+
+
+
+ 3
+
+
+
+
+
+ 1
+
+
+
+
+
+ 2
+
+
+
+
+ 4
+
\ No newline at end of file
diff --git a/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna-physical.png b/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna-physical.png
new file mode 100644
index 000000000..7fcd0da44
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna-physical.png differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna-physical.svg b/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna-physical.svg
new file mode 100644
index 000000000..232dc6e76
--- /dev/null
+++ b/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna-physical.svg
@@ -0,0 +1,4711 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 8
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 10
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 87.5 ± 1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1/4–36UNS–2B
+ 1/4–36UNS–2A
+ 11
+
+ Milling unilateral 5.85 ± 0.02
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 2.0
+ 205 ± 1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ S=8
+
+
+ 6.25
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Note: All dimensions in mm All dimensions are app ro ximate and for reference purposes only. The dimensions shown should not be used for p r oducing p r oduction data The dimensions are subject t o pa r t and manufacturing t ole r ances Dimensions may be subject t o change
+
diff --git a/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna.jpg b/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna.jpg
new file mode 100644
index 000000000..2dd3fbcd7
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm4-cm5-antenna.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm4.jpg b/documentation/asciidoc/computers/compute-module/images/cm4.jpg
new file mode 100644
index 000000000..a60f5b73b
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm4.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm4io.jpg b/documentation/asciidoc/computers/compute-module/images/cm4io.jpg
new file mode 100644
index 000000000..fe4ccab2b
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm4io.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm4s.jpg b/documentation/asciidoc/computers/compute-module/images/cm4s.jpg
new file mode 100644
index 000000000..7119617d8
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm4s.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm5-case-physical.png b/documentation/asciidoc/computers/compute-module/images/cm5-case-physical.png
new file mode 100644
index 000000000..05323596a
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm5-case-physical.png differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm5-case-physical.svg b/documentation/asciidoc/computers/compute-module/images/cm5-case-physical.svg
new file mode 100644
index 000000000..4ddf6308f
--- /dev/null
+++ b/documentation/asciidoc/computers/compute-module/images/cm5-case-physical.svg
@@ -0,0 +1,12074 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ SSD
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Power In
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+S TA TUS
+Power
+HDMI0
+HDMI1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+94
+
+170
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+28
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Note: All dimensions in mm All dimensions are approximate and for reference purposes only. The dimensions shown should not be used for producing production data The dimensions are subject to part and manufacturing tolerances Dimensions may be subject to change
+
diff --git a/documentation/asciidoc/computers/compute-module/images/cm5-cooler-physical.png b/documentation/asciidoc/computers/compute-module/images/cm5-cooler-physical.png
new file mode 100644
index 000000000..521410178
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm5-cooler-physical.png differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm5-cooler-physical.svg b/documentation/asciidoc/computers/compute-module/images/cm5-cooler-physical.svg
new file mode 100644
index 000000000..5abb017d8
--- /dev/null
+++ b/documentation/asciidoc/computers/compute-module/images/cm5-cooler-physical.svg
@@ -0,0 +1,9616 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 41
+ 56
+
+
+
+
+
+
+
+
+
+
+ 33
+ 4 × M2.5
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 10
+ 2.7
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 48
+
+
+
+
+
+
+
+
+
+
+ Note:
+ All dimensions in mm
+ All dimensions are app
+ ro
+ ximate and for
+ reference purposes only.
+
+ The dimensions
+ shown should not be used for p
+ r
+ oducing
+ p
+ r
+ oduction data
+ The dimensions are subject
+ t
+ o pa
+ r
+ t and
+ manufacturing
+ t
+ ole
+ r
+ ances
+ Dimensions may be subject
+ t
+ o change
+
+
diff --git a/documentation/asciidoc/computers/compute-module/images/cm5-cooler.jpg b/documentation/asciidoc/computers/compute-module/images/cm5-cooler.jpg
new file mode 100644
index 000000000..d4781a5cd
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm5-cooler.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm5.png b/documentation/asciidoc/computers/compute-module/images/cm5.png
new file mode 100644
index 000000000..0431e3e2d
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm5.png differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm5io-case-front.png b/documentation/asciidoc/computers/compute-module/images/cm5io-case-front.png
new file mode 100644
index 000000000..055875438
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm5io-case-front.png differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm5io-case.png b/documentation/asciidoc/computers/compute-module/images/cm5io-case.png
new file mode 100644
index 000000000..074e802b6
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm5io-case.png differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cm5io.png b/documentation/asciidoc/computers/compute-module/images/cm5io.png
new file mode 100644
index 000000000..382ae0b2c
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cm5io.png differ
diff --git a/documentation/asciidoc/computers/compute-module/images/cmio.jpg b/documentation/asciidoc/computers/compute-module/images/cmio.jpg
new file mode 100644
index 000000000..347f27f28
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/cmio.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/images/j6_vertical.jpg b/documentation/asciidoc/computers/compute-module/images/j6_vertical.jpg
new file mode 100644
index 000000000..90858661a
Binary files /dev/null and b/documentation/asciidoc/computers/compute-module/images/j6_vertical.jpg differ
diff --git a/documentation/asciidoc/computers/compute-module/introduction.adoc b/documentation/asciidoc/computers/compute-module/introduction.adoc
new file mode 100644
index 000000000..aa74d7bd5
--- /dev/null
+++ b/documentation/asciidoc/computers/compute-module/introduction.adoc
@@ -0,0 +1,232 @@
+== Compute Modules
+
+Raspberry Pi Compute Modules are **system-on-module** variants of the flagship Raspberry Pi models. Compute Modules are especially popular for industrial and commercial applications, including digital signage, thin clients, and process automation. Some of these applications use the flagship Raspberry Pi design, but many users want a more compact design or on-board eMMC storage.
+
+Compute Modules come in multiple variants, varying both in memory and soldered-on Multi-Media Card (eMMC) flash storage capacity. Like SD cards, eMMC provides persistent storage with minimal energy impact. Unlike SD cards, eMMC is specifically designed to be used as a disk and includes extra features to improve reliability. **Lite** models have no on-board storage, and are sometimes referred to with the shorthand suffix **L**, e.g. "CM3L".
+
+Compute Modules use the following Raspberry Pi SoCs:
+
+* BCM2835 for CM1
+* BCM2837 for CM3, CM3+
+* BCM2711 for CM4, CM4S
+* BCM2712 for CM5
+
+=== Compute Module 5
+
+.Compute Module 5
+image::images/cm5.png[alt="Compute Module 5", width="60%"]
+
+The Compute Module 5 (CM5) combines the internals of a Raspberry Pi 5 (the BCM2712 processor and 2GB, 4GB, 8GB, or 16GB of RAM) with optional 0GB (Lite), 16GB, 32GB or 64GB of eMMC flash storage.
+
+CM5 uses the same form factor as CM4, featuring two 100-pin high density connectors.
+
+=== Compute Module 4
+
+.Compute Module 4
+image::images/cm4.jpg[alt="Compute Module 4", width="60%"]
+
+The Compute Module 4 (CM4) combines the internals of a Raspberry Pi 4 (the BCM2711 processor and 1GB, 2GB, 4GB, or 8GB of RAM) with an optional 0GB (Lite), 8GB, 16GB or 32GB of eMMC flash storage.
+
+Unlike CM1, CM3, and CM3+, CM4 does not use the DDR2 SO-DIMM form factor. Instead, CM4 uses two 100-pin high density connectors in a smaller physical footprint. This change helped add the following interfaces:
+
+* an additional second HDMI port
+* PCIe
+* Ethernet
+
+The previous form factor could not have supported these interfaces.
+
+=== Compute Module 4S
+
+.Compute Module 4S
+image::images/cm4s.jpg[alt="Compute Module 4S", width="60%"]
+
+The Compute Module 4S (CM4S) combines the internals of a Raspberry Pi 4 (the BCM2711 processor and 1GB, 2GB, 4GB, or 8GB of RAM) with an optional 0GB (Lite), 8GB, 16GB or 32GB of eMMC flash storage. Unlike CM4, CM4S comes in the same DDR2 SO-DIMM form factor as CM1, CM3, and CM3+.
+
+[[compute-module-3-plus]]
+=== Compute Module 3+
+
+.Compute Module 3+
+image::images/cm3-plus.jpg[alt="Compute Module 3+", width="60%"]
+
+The Compute Module 3+ (CM3+) combines the internals of a Raspberry Pi 3 Model B+ (the BCM2837 processor and 1GB of RAM) with an optional 0GB (Lite), 8GB, 16GB or 32GB of eMMC flash storage. CM3+ comes in the DDR2 SO-DIMM form factor.
+
+=== Compute Module 3
+
+.Compute Module 3
+image::images/cm3.jpg[alt="Compute Module 3", width="60%"]
+
+The Compute Module 3 (CM3) combines the internals of a Raspberry Pi 3 (the BCM2837 processor and 1GB of RAM) with an optional 4GB of eMMC flash storage. CM3 comes in the DDR2 SO-DIMM form factor.
+
+=== Compute Module 1
+
+.Compute Module 1
+image::images/cm1.jpg[alt="Compute Module 1", width="60%"]
+
+The Compute Module 1 (CM1) contains the internals of a Raspberry Pi (the BCM2835 processor and 512MB of RAM) as well as an optional 4GB of eMMC flash storage. CM1 comes in the DDR2 SO-DIMM form factor.
+
+== IO Boards
+
+Raspberry Pi IO Boards provide a way to connect a single Compute Module to a variety of I/O (input/output) interfaces. Compute Modules are small, lacking ports and connectors. IO Boards provide a way to connect Compute Modules to a variety of peripherals.
+
+Raspberry Pi IO Boards provide the following functionality:
+
+* power the module
+* connects the GPIO to pin headers
+* connects the camera and display interfaces to FFC connectors
+* connects HDMI to HDMI ports
+* connects USB to USB ports
+* connects activity monitoring to LEDs
+* eMMC programming over USB
+* connects PCIe to connectors used to physically connect storage or peripherals
+
+IO Boards are breakout boards intended for development or personal use; in production, you should use a smaller, potentially custom board that provides only the ports and peripherals required for your use-case.
+
+=== Compute Module 5 IO Board
+
+.Compute Module 5 IO Board
+image::images/cm5io.png[alt="Compute Module 5 IO Board", width="60%"]
+
+Compute Module 5 IO Board provides the following interfaces:
+
+* HAT footprint with 40-pin GPIO connector
+* PoE header
+* 2× HDMI ports
+* 2× USB 3.0 ports
+* Gigabit Ethernet RJ45 with PoE support
+* M.2 M key PCIe socket compatible with the 2230, 2242, 2260, and 2280 form factors
+* microSD card slot (only for use with Lite variants with no eMMC; other variants ignore the slot)
+* 2× MIPI DSI/CSI-2 combined display/camera FPC connectors (22-pin 0.5 mm pitch cable)
+* Real-time clock with battery socket
+* four-pin JST-SH PWM fan connector
+* USB-C power using the same standard as Raspberry Pi 5 (5V, 5A (25W) or 5V, 3A (15W) with a 600mA peripheral limit)
+* Jumpers to disable features such as eMMC boot, EEPROM write, and the USB OTG connection
+
+=== Compute Module 4 IO Board
+
+.Compute Module 4 IO Board
+image::images/cm4io.jpg[alt="Compute Module 4 IO Board", width="60%"]
+
+Compute Module 4 IO Board provides the following interfaces:
+
+* HAT footprint with 40-pin GPIO connector and PoE header
+* 2× HDMI ports
+* 2× USB 2.0 ports
+* Gigabit Ethernet RJ45 with PoE support
+* microSD card slot (only for use with Lite variants with no eMMC; other variants ignore the slot)
+* PCIe Gen 2 socket
+* micro USB upstream port
+* 2× MIPI DSI display FPC connectors (22-pin 0.5 mm pitch cable)
+* 2× MIPI CSI-2 camera FPC connectors (22-pin 0.5 mm pitch cable)
+* Real-time clock with battery socket
+* 12V input via barrel jack (supports up to 26V if PCIe unused)
+
+=== Compute Module IO Board
+
+.Compute Module IO Board
+image::images/cmio.jpg[alt="Compute Module IO Board", width="60%"]
+
+Compute Module IO Board provides the following interfaces:
+
+* 120 GPIO pins
+* HDMI port
+* USB-A port
+* 2× MIPI DSI display FPC connectors (22-pin 0.5 mm pitch cable)
+* 2× MIPI CSI-2 camera FPC connectors (22-pin 0.5 mm pitch cable)
+
+The Compute Module IO Board comes in two variants: Version 1 and Version 3. Version 1 is only compatible with CM1. Version 3 is compatible with CM1, CM3, CM3+, and CM4S. Compute Module IO Board Version 3 is sometimes written as the shorthand CMIO3.
+
+Compute Module IO Board Version 3 added a microSD card slot that did not exist in Compute Module IO Board Version 1.
+
+=== IO Board compatibility
+
+Not all Compute Module IO Boards work with all Compute Module models. The following table shows which Compute Modules work with each IO Board:
+
+[cols="1,1"]
+|===
+| IO Board | Compatible Compute Modules
+
+| Compute Module IO Board Version 1 (CMIO)/(CMIO1)
+a|
+* CM1
+| Compute Module IO Board Version 3 (CMIO)/(CMIO3)
+a|
+* CM1
+* CM3
+* CM3+
+* CM4S
+| Compute Module 4 IO Board (CM4IO)
+a|
+* CM4
+* CM5 (with reduced functionality)
+| Compute Module 5 IO Board (CM5IO)
+a|
+* CM5
+* CM4 (with reduced functionality)
+|===
+
+== CM5 Accessories
+
+=== IO Case
+
+The world can be a dangerous place. The Compute Module 5 IO Board Case provides physical protection for a CM5IO Board.
+
+.Compute Module 5 IO Board Case
+image::images/cm5io-case.png[alt="Compute Module 5 IO Board Case", width="60%"]
+
+The Case provides cut-outs for all externally-facing ports and LEDs on the CM5IO Board, and an attachment point for a Raspberry Pi Antenna Kit.
+
+.Compute Module 5 IO Board Case ports
+image::images/cm5io-case-front.png[alt="the port selection on the Compute Module 5 IO Board Case", width="60%"]
+
+To mount a CM5IO Board within your Case, position your Board in the bottom section of the case, aligning the four mounting points inset slightly from each corner of the Board. Fasten four screws into the mounting points. Take care not to over-tighten the screws.
+
+To use the Case fan, connect the fan cable to the FAN (J14) port on the Board.
+
+To close the case, put the top section of the case on top of the bottom section of the case. Facing the front of the case, which has port pass-throughs, carefully align the screw holes on the left and right side of the case and the power button on the back of the case. Tighten four screws into the screw holes. Take care not to over-tighten the screws.
+
+TIP: The Case comes with a fan pre-installed. To close the case with the passive Cooler attached to your Compute Module, remove the fan. To remove the fan, remove the four screws positioned in the corners of the fan from the bottom of the top case.
+
+.CM5 Case physical specification
+image::images/cm5-case-physical.png[alt="CM5 Case physical specification", width="80%"]
+
+=== Antenna
+
+The Raspberry Pi Antenna Kit provides a certified external antenna to boost wireless reception on a CM4 or CM5.
+
+.CM4 and CM5 Antenna
+image::images/cm4-cm5-antenna.jpg[alt="The Antenna, connected to CM4", width="60%"]
+
+To attach the Antenna to your Compute Module and Case, complete the following steps:
+
+. Connect the https://en.wikipedia.org/wiki/Hirose_U.FL[U.FL connector] on the cable to the U.FL-compatible connector on your Compute Module.
+. Secure the toothed washer onto the male SMA connector at the end of the cable, then insert the SMA connector, with the antenna facing outward, through the hole in the Case.
+. Fasten the SMA connector into place with the retaining hexagonal nut and washer.
+. Tighten the female SMA connector on the Antenna onto the male SMA connector.
+. Adjust the Antenna to its final position by turning it up to 90°.
+
+.CM4 and CM5 Antenna assembly diagram
+image::images/cm4-cm5-antenna-assembly.svg[alt="CM4 and CM5 antenna assembly diagram", width="60%"]
+
+To **use** the Antenna with your Compute Module, add a `dtoverlay` instruction in xref:../computers/config_txt.adoc[`/boot/firmware/config.txt`]. Add the following line to the end of `config.txt`:
+
+[source,ini]
+----
+dtparam=ant2
+----
+
+.CM4 and CM5 Antenna physical specification
+image::images/cm4-cm5-antenna-physical.png[alt="CM4 and CM5 antenna physical specification", width="80%"]
+
+=== Cooler
+
+The CM5 Cooler helps dissipate heat from your CM5, improving CPU performance, longevity, and bumpiness.
+
+.CM5 Cooler
+image::images/cm5-cooler.jpg[alt="CM5 Cooler", width="60%"]
+
+To mount the Cooler to your CM5, attach the thermally conductive silicone at the bottom of the Cooler to the top of your CM5. Align the cut-out in the heatsink with the antenna https://en.wikipedia.org/wiki/Hirose_U.FL[U.FL connector]. Optionally, fasten screws in the mounting points found in each corner to secure the Cooler. If you omit the screws, the bond between your Cooler and your Compute Module will improve through time, use, and trust.
+
+.CM5 Cooler physical specification
+image::images/cm5-cooler-physical.png[alt="CM5 Cooler physical specification", width="80%"]
+
+NOTE: The CM5 Cooler is only compatible with the CM5IO Case if you remove the fan from the case.
diff --git a/documentation/asciidoc/computers/config_txt.adoc b/documentation/asciidoc/computers/config_txt.adoc
index 4b9aaa6f1..500831113 100644
--- a/documentation/asciidoc/computers/config_txt.adoc
+++ b/documentation/asciidoc/computers/config_txt.adoc
@@ -14,17 +14,11 @@ include::config_txt/overclocking.adoc[]
include::config_txt/conditional.adoc[]
-include::config_txt/legacy.adoc[]
-
include::config_txt/memory.adoc[]
include::config_txt/codeclicence.adoc[]
include::config_txt/video.adoc[]
-include::config_txt/pi4-hdmi.adoc[]
-
include::config_txt/camera.adoc[]
-include::config_txt/misc.adoc[]
-
diff --git a/documentation/asciidoc/computers/config_txt/audio.adoc b/documentation/asciidoc/computers/config_txt/audio.adoc
index 31f361306..7ba0b541d 100644
--- a/documentation/asciidoc/computers/config_txt/audio.adoc
+++ b/documentation/asciidoc/computers/config_txt/audio.adoc
@@ -1,4 +1,4 @@
-== Onboard Analogue Audio (3.5mm Jack)
+== Onboard analogue audio (3.5mm jack)
The onboard audio output uses config options to change the way the analogue audio is driven, and whether some firmware features are enabled or not.
@@ -8,11 +8,11 @@ The onboard audio output uses config options to change the way the analogue audi
`audio_pwm_mode=2` (the default) selects high quality analogue audio using an advanced modulation scheme.
-NOTE: This option uses more GPU compute resources and can interfere with some use cases.
+NOTE: This option uses more GPU compute resources and can interfere with some use cases on some models.
=== `disable_audio_dither`
-By default, a 1.0LSB dither is applied to the audio stream if it is routed to the analogue audio output. This can create audible background "hiss" in some situations, for example when the ALSA volume is set to a low level. Set `disable_audio_dither` to `1` to disable dither application.
+By default, a 1.0LSB dither is applied to the audio stream if it is routed to the analogue audio output. This can create audible background hiss in some situations, for example when the ALSA volume is set to a low level. Set `disable_audio_dither` to `1` to disable dither application.
=== `enable_audio_dither`
@@ -21,3 +21,15 @@ Audio dither (see disable_audio_dither above) is normally disabled when the audi
=== `pwm_sample_bits`
The `pwm_sample_bits` command adjusts the bit depth of the analogue audio output. The default bit depth is `11`. Selecting bit depths below `8` will result in nonfunctional audio, as settings below `8` result in a PLL frequency too low to support. This is generally only useful as a demonstration of how bit depth affects quantisation noise.
+
+== HDMI audio
+
+By default, HDMI audio output is enabled on all Raspberry Pi models with HDMI output.
+
+To disable HDMI audio output, append `,noaudio` to the end of the `dtoverlay=vc4-kms-v3d` line in xref:../computers/config_txt.adoc#what-is-config-txt[`/boot/firmware/config.txt`]:
+
+[source,ini]
+----
+dtoverlay=vc4-kms-v3d,noaudio
+----
+
diff --git a/documentation/asciidoc/computers/config_txt/autoboot.adoc b/documentation/asciidoc/computers/config_txt/autoboot.adoc
index 5e1c4ee7a..fa37c855e 100644
--- a/documentation/asciidoc/computers/config_txt/autoboot.adoc
+++ b/documentation/asciidoc/computers/config_txt/autoboot.adoc
@@ -1,8 +1,6 @@
== `autoboot.txt`
-`autoboot.txt` is an optional configuration file that can be used to specify the
-`boot_partition` number. This is sometimes used with NOOBS to bypass the boot menu
-selection and boot a specific partition.
+`autoboot.txt` is an optional configuration file that can be used to specify the `boot_partition` number.
This can also be used in conjunction with the `tryboot` feature to implement A/B booting for OS upgrades.
@@ -11,24 +9,32 @@ This can also be used in conjunction with the `tryboot` feature to implement A/B
See also xref:raspberry-pi.adoc#fail-safe-os-updates-tryboot[TRYBOOT] boot flow.
=== `boot_partition`
-Specifies the partition number for booting unless the partition number was already specified as parameter to the `reboot` command (e.g. `sudo reboot 2`).
+Specifies the partition number for booting unless the partition number was already specified as a parameter to the `reboot` command (e.g. `sudo reboot 2`).
+
+Partition numbers start at `1` and the MBR partitions are `1` to `4`. Specifying partition `0` means boot from the `default` partition which is the first bootable FAT partition.
+
+Bootable partitions must be formatted as FAT12, FAT16 or FAT32 and contain a `start.elf` file (or `config.txt` file on Raspberry Pi 5) in order to be classed as be bootable by the bootloader.
=== The `[tryboot]` filter
This filter passes if the system was booted with the `tryboot` flag set.
+
+[source,console]
----
-sudo reboot "0 tryboot"
+$ sudo reboot "0 tryboot"
----
=== `tryboot_a_b`
Set this property to `1` to load the normal `config.txt` and `boot.img` files instead of `tryboot.txt` and `tryboot.img` when the `tryboot` flag is set.
-This enables the `tryboot` switch to be made at the partition level rather than the file-level without having to modify configuration files in the A/B partitions.
+This enables the `tryboot` switch to be made at the partition level rather than the file-level without having to modify configuration files in the A/B partitions.
=== Example update flow for A/B booting
-The following pseudo code shows how an hypothetical OS `Update Service` could use `tryboot` + `autoboot.txt` to perform an fail-safe OS upgrade.
+The following pseudo-code shows how a hypothetical OS `Update service` could use `tryboot` in `autoboot.txt` to perform a fail-safe OS upgrade.
+
+Initial `autoboot.txt`:
-Initial `autoboot.txt`
+[source,ini]
----
[all]
tryboot_a_b=1
@@ -39,27 +45,29 @@ boot_partition=3
**Installing the update**
-* System is powered on and boots to partition 2 by default.
-* An `Update Service` downloads the next version of the OS to partition 3.
-* The update is tested by rebooting to `tryboot` mode `reboot "0 tryboot"` where `0` means the default partition.
+* System is powered on and boots to partition 2 by default
+* An `Update service` downloads the next version of the OS to partition 3
+* The update is tested by rebooting to `tryboot` mode `reboot "0 tryboot"` where `0` means the default partition
**Committing or cancelling the update**
-* System boots from partition 3 because the `[tryboot]` filter evaluates to true in `tryboot mode`.
+* System boots from partition 3 because the `[tryboot]` filter evaluates to true in `tryboot mode`
* If tryboot is active (`/proc/device-tree/chosen/bootloader/tryboot == 1`)
** If the current boot partition (`/proc/device-tree/chosen/bootloader/partition`) matches the `boot_partition` in the `[tryboot]` section of `autoboot.txt`
- *** The `Update Service` validates the system to verify that the update was successful.
+ *** The `Update Service` validates the system to verify that the update was successful
*** If the update was successful
- **** Replace `autoboot.txt` swapping the `boot_partition` configuration.
- **** Normal reboot - partition 3 is now the default boot partition.
+ **** Replace `autoboot.txt` swapping the `boot_partition` configuration
+ **** Normal reboot - partition 3 is now the default boot partition
*** Else
**** `Update Service` marks the update as failed e.g. it removes the update files.
- **** Normal reboot - partition 2 is still the default boot partition because the `tryboot` flag is automatically cleared.
+ **** Normal reboot - partition 2 is still the default boot partition because the `tryboot` flag is automatically cleared
*** End if
** End If
* End If
-Updated `autoboot.txt`
+Updated `autoboot.txt`:
+
+[source,ini]
----
[all]
tryboot_a_b=1
@@ -68,6 +76,7 @@ boot_partition=3
boot_partition=2
----
-**Notes**
-* It's not mandatory to reboot after updating `autoboot.txt`. However, the `Update Service` must be careful to avoid overwriting the current partition since `autoboot.txt` has already been modified to commit the last update..
-* See also: xref:configuration.adoc#device-trees-overlays-and-parameters[Device-tree parameters].
+[NOTE]
+======
+It's not mandatory to reboot after updating `autoboot.txt`. However, the `Update Service` must be careful to avoid overwriting the current partition since `autoboot.txt` has already been modified to commit the last update. For more information, see xref:configuration.adoc#device-trees-overlays-and-parameters[Device Tree parameters].
+======
diff --git a/documentation/asciidoc/computers/config_txt/boot.adoc b/documentation/asciidoc/computers/config_txt/boot.adoc
index 469793326..50aca0aa5 100644
--- a/documentation/asciidoc/computers/config_txt/boot.adoc
+++ b/documentation/asciidoc/computers/config_txt/boot.adoc
@@ -5,31 +5,13 @@
These options specify the firmware files transferred to the VideoCore GPU prior to booting.
`start_file` specifies the VideoCore firmware file to use.
-`fixup_file` specifies the file used to fix up memory locations used in the `start_file` to match the GPU memory split. Note that the `start_file` and the `fixup_file` are a matched pair - using unmatched files will stop the board from booting. This is an advanced option, so we advise that you use `start_x` and `start_debug` rather than this option.
+`fixup_file` specifies the file used to fix up memory locations used in the `start_file` to match the GPU memory split.
-=== `start_x`, `start_debug`
+The `start_file` and the `fixup_file` are a matched pair - using unmatched files will stop the board from booting. This is an advanced option, so we advise that you use `start_x` and `start_debug` rather than this option.
-These provide a shortcut to some alternative `start_file` and `fixup_file` settings, and are the recommended methods for selecting firmware configurations.
+NOTE: Cut-down firmware (`start*cd.elf` and `fixup*cd.dat`) cannot be selected this way - the system will fail to boot. The only way to enable the cut-down firmware is to specify `gpu_mem=16`. The cut-down firmware removes support for codecs, 3D and debug logging as well as limiting the initial early-boot framebuffer to 1080p @16bpp - although KMS can replace this with up to 32bpp 4K framebuffer(s) at a later stage as with any firmware.
-`start_x=1` implies
-----
- start_file=start_x.elf
- fixup_file=fixup_x.dat
-----
-
-On the Raspberry Pi 4, if the files `start4x.elf` and `fixup4x.dat` are present, these files will be used instead.
-
-`start_debug=1` implies
-----
- start_file=start_db.elf
- fixup_file=fixup_db.dat
-----
-
-`start_x=1` should be specified when using the camera module. Enabling the camera via `raspi-config` will set this automatically.
-
-=== `disable_commandline_tags`
-
-Set the `disable_commandline_tags` command to `1` to stop `start.elf` from filling in ATAGS (memory from `0x100`) before launching the kernel.
+NOTE: The Raspberry Pi 5, Compute Module 5, and Raspberry Pi 500 firmware is self-contained in the bootloader EEPROM.
=== `cmdline`
@@ -37,41 +19,40 @@ Set the `disable_commandline_tags` command to `1` to stop `start.elf` from filli
=== `kernel`
-`kernel` is the alternative filename on the boot partition to use when loading the kernel. The default value on the Raspberry Pi 1, Zero and Zero W, and Raspberry Pi Compute Module 1 is `kernel.img`. The default value on the Raspberry Pi 2, 3, 3+ and Zero 2 W, and Raspberry Pi Compute Modules 3 and 3+ is `kernel7.img`. The default value on the Raspberry Pi 4 and 400, and Raspberry Pi Compute Module 4 is `kernel7l.img`.
-
-=== `arm_64bit`
-
-If set to non-zero, forces the kernel loading system to assume a 64-bit kernel, starts the processors up in 64-bit mode, and sets `kernel8.img` to be the kernel image loaded, unless there is an explicit `kernel` option defined in which case that is used instead. Defaults to 0 on all platforms.
-
-NOTE: 64-bit kernels may be uncompressed image files or a gzip archive of an image (which can still be called kernel8.img; the bootloader will recognize the archive from the signature bytes at the beginning).
-
-NOTE: The 64-bit kernel will only work on the Raspberry Pi 3, 3+, 4, 400, Zero 2 W and 2B rev 1.2, and Raspberry Pi Compute Modules 3, 3+ and 4.
-
-=== `arm_control`
+`kernel` is the alternative filename on the boot partition for loading the kernel. The default value on the Raspberry Pi 1, Zero and Zero W, and Raspberry Pi Compute Module 1 is `kernel.img`. The default value on the Raspberry Pi 2, 3, 3+ and Zero 2 W, and Raspberry Pi Compute Modules 3 and 3+ is `kernel7.img`. The default value on the Raspberry Pi 4 and 400, and Raspberry Pi Compute Module 4 is `kernel8.img`, or `kernel7l.img` if `arm_64bit` is set to 0.
-WARNING: This setting is *DEPRECATED*, use `arm_64bit` instead to enable 64-bit kernels.
+The Raspberry Pi 5, Compute Module 5, and Raspberry Pi 500 firmware defaults to loading `kernel_2712.img` because this image contains optimisations specific to those models (e.g. 16K page-size). If this file is not present, then the common 64-bit kernel (`kernel8.img`) will be loaded instead.
-Sets board-specific control bits.
-
-=== `armstub`
+=== `arm_64bit`
-`armstub` is the filename on the boot partition from which to load the ARM stub. The default ARM stub is stored in firmware and is selected automatically based on the Raspberry Pi model and various settings.
+If set to 1, the kernel will be started in 64-bit mode. Setting to 0 selects 32-bit mode.
-The stub is a small piece of ARM code that is run before the kernel. Its job is to set up low-level hardware like the interrupt controller before passing control to the kernel.
+In 64-bit mode, the firmware will choose an appropriate kernel (e.g. `kernel8.img`), unless there is an explicit `kernel` option defined, in which case that is used instead.
-=== `arm_peri_high`
+Defaults to 1 on Raspberry Pi 4, 400 and Compute Module 4, 4S platforms. Defaults to 0 on all other platforms. However, if the name given in an explicit `kernel` option matches one of the known kernels then `arm_64bit` will be set accordingly.
-Set `arm_peri_high` to `1` to enable "High Peripheral" mode on the Raspberry Pi 4. It is set automatically if a suitable DTB is loaded.
+64-bit kernels come in the following forms:
-NOTE: Enabling "High Peripheral" mode without a compatible device tree will make your system fail to boot. Currently ARM stub support is missing, so you will also need to load a suitable file using `armstub`.
+* uncompressed image files
+* gzip archives of an image
-=== `kernel_address`
+Both forms may use the `img` file extension; the bootloader recognizes archives using signature bytes at the start of the file.
-`kernel_address` is the memory address to which the kernel image should be loaded. 32-bit kernels are loaded to address `0x8000` by default, and 64-bit kernels to address `0x200000`. If `kernel_old` is set, kernels are loaded to the address `0x0`.
+The following Raspberry Pi models support this flag:
-=== `kernel_old`
+* 2B rev 1.2
+* 3B
+* 3A+
+* 3B+
+* 4B
+* 400
+* Zero 2 W
+* Compute Module 3
+* Compute Module 3+
+* Compute Module 4
+* Compute Module 4S
-Set `kernel_old` to `1` to load the kernel to the memory address `0x0`.
+Flagship models since Raspberry Pi 5, Compute Modules since CM5, and Keyboard models since Pi 500 _only_ support the 64-bit kernel. Models that only support a 64-bit kernel ignore this flag.
=== `ramfsfile`
@@ -83,51 +64,30 @@ NOTE: Newer firmware supports the loading of multiple `ramfs` files. You should
`ramfsaddr` is the memory address to which the `ramfsfile` should be loaded.
+[[initramfs]]
=== `initramfs`
The `initramfs` command specifies both the ramfs filename *and* the memory address to which to load it. It performs the actions of both `ramfsfile` and `ramfsaddr` in one parameter. The address can also be `followkernel` (or `0`) to place it in memory after the kernel image. Example values are: `initramfs initramf.gz 0x00800000` or `initramfs init.gz followkernel`. As with `ramfsfile`, newer firmwares allow the loading of multiple files by comma-separating their names.
-NOTE: This option uses different syntax from all the other options, and you should not use a `=` character here.
-
-=== `init_uart_baud`
+NOTE: This option uses different syntax from all the other options, and you should not use the `=` character here.
-`init_uart_baud` is the initial UART baud rate. The default value is `115200`.
-
-=== `init_uart_clock`
-
-`init_uart_clock` is the initial UART clock frequency. The default value is `48000000` (48MHz). Note that this clock only applies to UART0 (ttyAMA0 in Linux), and that the maximum baudrate for the UART is limited to 1/16th of the clock. The default UART on the Raspberry Pi 3 and Raspberry Pi Zero is UART1 (ttyS0 in Linux), and its clock is the core VPU clock - at least 250MHz.
-
-=== `bootcode_delay`
-
-The `bootcode_delay` command delays for a given number of seconds in `bootcode.bin` before loading `start.elf`: the default value is `0`.
-
-This is particularly useful to insert a delay before reading the EDID of the monitor, for example if the Raspberry Pi and monitor are powered from the same source, but the monitor takes longer to start up than the Raspberry Pi. Try setting this value if the display detection is wrong on initial boot, but is correct if you soft-reboot the Raspberry Pi without removing power from the monitor.
-
-=== `boot_delay`
-
-The `boot_delay` command instructs to wait for a given number of seconds in `start.elf` before loading the kernel: the default value is `1`. The total delay in milliseconds is calculated as `(1000 x boot_delay) + boot_delay_ms`. This can be useful if your SD card needs a while to get ready before Linux is able to boot from it.
-
-=== `boot_delay_ms`
-
-The `boot_delay_ms` command means wait for a given number of milliseconds in `start.elf`, together with `boot_delay`, before loading the kernel. The default value is `0`.
+[[auto_initramfs]]
+=== `auto_initramfs`
+If `auto_initramfs` is set to `1`, the firmware looks for an `initramfs` file to match the kernel. The file must be in the same location as the kernel image, and the name is derived from the name of the kernel by replacing the `kernel` prefix with `initramfs`, and removing any extension such as `.img`, e.g. `kernel8.img` requires `initramfs8`. You can make use of `auto_initramfs` with custom kernel names provided the names begin with `kernel` and `initramfs` respectively and everything else matches (except for the absence of the file extension on the initramfs). Otherwise, an explicit xref:config_txt.adoc#initramfs[`initramfs`] statement is required.
[[disable_poe_fan]]
=== `disable_poe_fan`
-By default, a probe on the I2C bus will happen at startup, even when a PoE HAT is not attached. Setting this option to 1 disables control of a PoE HAT fan through I2C (on pins ID_SD & ID_SC). If you are not intending to use a PoE HAT doing this is useful if you need to minimise boot time.
+By default, a probe on the I2C bus will happen at startup, even when a PoE HAT is not attached. Setting this option to 1 disables control of a PoE HAT fan through I2C (on pins ID_SD & ID_SC). If you are not intending to use a PoE HAT, this is a helpful way to minimise boot time.
=== `disable_splash`
If `disable_splash` is set to `1`, the rainbow splash screen will not be shown on boot. The default value is `0`.
-=== `enable_gic` (Raspberry Pi 4 Only)
-
-On the Raspberry Pi 4B, if this value is set to `0` then the interrupts will be routed to the ARM cores using the legacy interrupt controller, rather than via the GIC-400. The default value is `1`.
-
=== `enable_uart`
-`enable_uart=1` (in conjunction with `console=serial0` in `cmdline.txt`) requests that the kernel creates a serial console, accessible using GPIOs 14 and 15 (pins 8 and 10 on the 40-pin header). Editing `cmdline.txt` to remove the line `quiet` enables boot messages from the kernel to also appear there. See also `uart_2ndstage`.
+`enable_uart=1` (in conjunction with `console=serial0,115200` in `cmdline.txt`) requests that the kernel creates a serial console, accessible using GPIOs 14 and 15 (pins 8 and 10 on the 40-pin header). Editing `cmdline.txt` to remove the line `quiet` enables boot messages from the kernel to also appear there. See also `uart_2ndstage`.
=== `force_eeprom_read`
@@ -136,47 +96,251 @@ Set this option to `0` to prevent the firmware from trying to read an I2C HAT EE
[[os_prefix]]
=== `os_prefix`
-`os_prefix` is an optional setting that allows you to choose between multiple versions of the kernel and Device Tree files installed on the same card. Any value in `os_prefix` is prepended to (stuck in front of) the name of any operating system files loaded by the firmware, where "operating system files" is defined to mean kernels, initramfs, cmdline.txt, .dtbs and overlays. The prefix would commonly be a directory name, but it could also be part of the filename such as "test-". For this reason, directory prefixes must include the trailing `/` character.
+`os_prefix` is an optional setting that allows you to choose between multiple versions of the kernel and Device Tree files installed on the same card. Any value in `os_prefix` is prepended to the name of any operating system files loaded by the firmware, where "operating system files" is defined to mean kernels, `initramfs`, `cmdline.txt`, `.dtbs` and overlays. The prefix would commonly be a directory name, but it could also be part of the filename such as "test-". For this reason, directory prefixes must include the trailing `/` character.
In an attempt to reduce the chance of a non-bootable system, the firmware first tests the supplied prefix value for viability - unless the expected kernel and .dtb can be found at the new location/name, the prefix is ignored (set to ""). A special case of this viability test is applied to overlays, which will only be loaded from `+${os_prefix}${overlay_prefix}+` (where the default value of <> is "overlays/") if `+${os_prefix}${overlay_prefix}README+` exists, otherwise it ignores `os_prefix` and treats overlays as shared.
-(The reason the firmware checks for the existence of key files rather than directories when checking prefixes is twofold - the prefix may not be a directory, and not all boot methods support testing for the existence of a directory.)
+(The reason the firmware checks for the existence of key files rather than directories when checking prefixes is twofold: the prefix may not be a directory, and not all boot methods support testing for the existence of a directory.)
NOTE: Any user-specified OS file can bypass all prefixes by using an absolute path (with respect to the boot partition) - just start the file path with a `/`, e.g. `kernel=/my_common_kernel.img`.
-See also <> and <>.
+See also <> and xref:legacy_config_txt.adoc#upstream_kernel[`upstream_kernel`].
-=== `otg_mode` (Raspberry Pi 4 Only)
+=== `otg_mode` (Raspberry Pi 4 only)
USB On-The-Go (often abbreviated to OTG) is a feature that allows supporting USB devices with an appropriate OTG cable to configure themselves as USB hosts. On older Raspberry Pis, a single USB 2 controller was used in both USB host and device mode.
-Raspberry Pi 4B and Raspberry Pi 400 (not CM4 or CM4IO) add a high performance USB 3 controller, attached via PCIe, to drive the main USB ports. The legacy USB 2 controller is still available on the USB-C power connector for use as a device (`otg_mode=0`, the default).
+Flagship models since Raspberry Pi 4B and Keyboard models since Pi 400 add a high-performance USB 3 controller, attached via PCIe, to drive the main USB ports. The legacy USB 2 controller is still available on the USB-C power connector for use as a device (`otg_mode=0`, the default). Compute Modules before CM5 do not include this high-performance USB 3 controller.
-`otg_mode=1` requests that a more capable XHCI USB 2 controller is used as another host controller on that USB-C connector.
+`otg_mode=1` requests that a more capable XHCI USB 2 controller is used as an alternative host controller on that USB-C connector.
+
+NOTE: By default, Raspberry Pi OS includes a line in `/boot/firmware/config.txt` that enables this setting on Compute Module 4.
-NOTE: Because CM4 and CM4IO don't include the external USB 3 controller, Raspberry Pi OS images set `otg_mode=1` on CM4 for better performance.
[[overlay_prefix]]
=== `overlay_prefix`
-Specifies a subdirectory/prefix from which to load overlays - defaults to `overlays/` (note the trailing `/`). If used in conjunction with <>, the `os_prefix` comes before the `overlay_prefix`, i.e. `dtoverlay=disable-bt` will attempt to load `+${os_prefix}${overlay_prefix}disable-bt.dtbo+`.
+Specifies a subdirectory/prefix from which to load overlays, and defaults to `overlays/` (note the trailing `/`). If used in conjunction with <>, the `os_prefix` comes before the `overlay_prefix`, i.e. `dtoverlay=disable-bt` will attempt to load `+${os_prefix}${overlay_prefix}disable-bt.dtbo+`.
NOTE: Unless `+${os_prefix}${overlay_prefix}README+` exists, overlays are shared with the main OS (i.e. `os_prefix` is ignored).
-[[sha256]]
-=== `sha256`
+=== Configuration Properties
+
+Raspberry Pi 5 requires a `config.txt` file to be present to indicate that the partition is bootable.
+
+[[boot_ramdisk]]
+==== `boot_ramdisk`
+
+If this property is set to `1` then the bootloader will attempt load a ramdisk file called `boot.img` containing the xref:configuration.adoc#boot-folder-contents[boot filesystem]. Subsequent files (e.g. `start4.elf`) are read from the ramdisk instead of the original boot file system.
+
+The primary purpose of `boot_ramdisk` is to support `secure-boot`, however, unsigned `boot.img` files can also be useful to Network Boot or `RPIBOOT` configurations.
+
+* The maximum size for a ramdisk file is 96MB.
+* `boot.img` files are raw disk `.img` files. The recommended format is a plain FAT32 partition with no MBR.
+* The memory for the ramdisk filesystem is released before the operating system is started.
+* If xref:raspberry-pi.adoc#fail-safe-os-updates-tryboot[TRYBOOT] is selected then the bootloader will search for `tryboot.img` instead of `boot.img`.
+* See also xref:config_txt.adoc#autoboot-txt[autoboot.txt].
+
+For more information about `secure-boot` and creating `boot.img` files please see https://github.com/raspberrypi/usbboot/blob/master/Readme.md[USBBOOT].
+
+Default: `0`
+
+[[boot_load_flags]]
+==== `boot_load_flags`
+
+Experimental property for custom firmware (bare metal).
+
+Bit 0 (0x1) indicates that the .elf file is custom firmware. This disables any compatibility checks (e.g. is USB MSD boot supported) and resets PCIe before starting the executable.
+
+Not relevant on Raspberry Pi 5 because there is no `start.elf` file.
+
+Default: `0x0`
+
+[[enable_rp1_uart]]
+==== `enable_rp1_uart`
+
+When set to `1`, firmware initialises RP1 UART0 to 115200bps and doesn't reset RP1 before starting the OS (separately configurable using `pciex4_reset=1`).
+This makes it easier to get UART output on the 40-pin header in early boot-code, for instance during bare-metal debug.
+
+Default: `0x0`
+
+[[pciex4_reset]]
+==== `pciex4_reset`
+
+Raspberry Pi 5 only.
+
+By default, the PCIe x4 controller used by `RP1` is reset before starting the operating system. If this parameter is set to `0` then the reset is disabled allowing operating system or bare metal code to inherit the PCIe configuration setup from the bootloader.
+
+Default: `1`
+
+[[uart_2ndstage]]
+==== `uart_2ndstage`
+
+If `uart_2ndstage` is `1` then enable debug logging to the UART. This option also automatically enables UART logging in `start.elf`. This is also described on the xref:config_txt.adoc#boot-options[Boot options] page.
+
+The `BOOT_UART` property also enables bootloader UART logging but does not enable UART logging in `start.elf` unless `uart_2ndstage=1` is also set.
+
+Default: `0`
+
+[[erase_eeprom]]
+==== `erase_eeprom`
+
+If `erase_eeprom` is set to `1` then `recovery.bin` will erase the entire SPI EEPROM instead of flashing the bootloader image. This property has no effect during a normal boot.
+
+Default: `0`
+
+[[set_reboot_arg1]]
+==== `set_reboot_arg1`
+Raspberry Pi 5 only.
+
+Sets the value of `boot_arg1` to be passed via a reset-safe register to the bootloader after a reboot.
+See xref:config_txt.adoc#boot_arg1[`boot_arg1`] for more details.
+Default: ``
+
+[[set_reboot_order]]
+==== `set_reboot_order`
+
+Raspberry Pi 5 only.
+
+Sets the value of xref:raspberry-pi.adoc#BOOT_ORDER[BOOT_ORDER] to be passed via a reset-safe register to the bootloader after a reboot. As with `tryboot`, this is a one-time setting and is automatically cleared after use.
+
+This property could be used to debug different xref:raspberry-pi.adoc#BOOT_ORDER[BOOT_ORDER] settings. Alternatively, it could be used in a provisioning system which has control over power and the `nRPIBOOT` GPIO to override the boot mode without specifying xref:config_txt.adoc#conditional-filters[conditional filter] statements in the EEPROM config.
+
+Default: ``
+
+[[kernel_watchdog_timeout]]
+==== `kernel_watchdog_timeout`
+
+If set to a non-zero value (in seconds), this property enables a hardware watchdog timer that is handed over to the operating system (OS) at boot. If the OS does not regularly "kick" or reset the watchdog, the system will be reset after the specified timeout.
+
+This property sets the `systemd` `watchdog.open_timeout` parameter, which controls how long the OS has to initialize and start servicing the watchdog. The value is passed to the OS via the kernel command line. For ongoing operation, the OS must also regularly reset the watchdog, typically controlled by the `RuntimeWatchdogSec` parameter in `systemd`. For more information, see https://www.freedesktop.org/software/systemd/man/systemd-system.conf.html#RuntimeWatchdogSec=[systemd watchdog documentation].
+
+[NOTE]
+====
+On Raspberry Pi OS Bookworm and earlier, the `RuntimeWatchdogSec` parameter is **not enabled by default** and this setting must be configured first in `/etc/systemd/system.conf` before the firmware kernel watchdog can be used.
+
+If both `BOOT_WATCHDOG_TIMEOUT` (EEPROM/bootloader setting, only supported on Raspberry Pi 4 and 5) and `kernel_watchdog_timeout` are set, the bootloader will seamlessly hand over from the bootloader watchdog to the kernel watchdog at the point the OS is started. This provides continuous watchdog coverage from power-on through to OS runtime.
+
+It is preferred to use `kernel_watchdog_timeout` rather than `dtparam=watchdog` because `kernel_watchdog_timeout` explicitly sets the `open_timeout` parameter, ensuring the watchdog is active until systemd takes over.
+====
+
+This is useful for ensuring that the system can recover from OS hangs or crashes after the boot process has completed.
+
+Default: `0` (disabled)
+
+[[kernel_watchdog_partition]]
+==== `kernel_watchdog_partition`
+
+If the kernel watchdog triggers (i.e. the OS fails to reset the watchdog within the timeout), this property specifies the partition number to boot from after the reset. This allows for automatic failover to a recovery or alternate partition.
+
+You can use this in conjunction with the xref:config_txt.adoc#the-expression-filter[expression filter] to apply different settings or select a different boot flow when the watchdog triggers a reboot to a specific partition.
+
+See also the xref:raspberry-pi.adoc#PARTITION[PARTITION] property for more information about how to use high partition numbers to detect a watchdog trigger.
+
+Default: `0` (default partition)
+
+
+[[eeprom_write_protect]]
+==== `eeprom_write_protect`
+
+Configures the EEPROM `write status register`. This can be set either to mark the entire EEPROM as write-protected, or to clear write-protection.
+
+This option must be used in conjunction with the EEPROM `/WP` pin which controls updates to the EEPROM `Write Status Register`. Pulling `/WP` low (CM4 `EEPROM_nWP` or on a Raspberry Pi 4 `TP5`) does NOT write-protect the EEPROM unless the `Write Status Register` has also been configured.
+
+See the https://www.winbond.com/resource-files/w25x40cl_f%2020140325.pdf[Winbond W25x40cl] or https://www.winbond.com/hq/product/code-storage-flash-memory/serial-nor-flash/?__locale=en&partNo=W25Q16JV[Winbond W25Q16JV] datasheets for further details.
+
+`eeprom_write_protect` settings in `config.txt` for `recovery.bin`.
+
+|===
+| Value | Description
+
+| 1
+| Configures the write protect regions to cover the entire EEPROM.
+
+| 0
+| Clears the write protect regions.
+
+| -1
+| Do nothing.
+|===
+
+NOTE: `flashrom` does not support clearing of the write-protect regions and will fail to update the EEPROM if write-protect regions are defined.
+
+On Raspberry Pi 5 `/WP` is pulled low by default and consequently write-protect is enabled as soon as the `Write Status Register` is configured. To clear write-protect pull `/WP` high by connecting `TP14` and `TP1`.
+
+Default: `-1`
+
+[[os_check]]
+==== `os_check`
+
+On Raspberry Pi 5 the firmware automatically checks for a compatible Device Tree file before attempting to boot from the current partition. Otherwise, older non-compatible kernels would be loaded and then hang.
+To disable this check (e.g. for bare-metal development), set `os_check=0` in config.txt
+
+Default: `1`
+
+[[bootloader_update]]
+==== `bootloader_update`
+
+This option may be set to 0 to block self-update without requiring the EEPROM configuration to be updated. This is sometimes useful when updating multiple Raspberry Pis via network boot because this option can be controlled per Raspberry Pi (e.g. via a serial number filter in `config.txt`).
+
+Default: `1`
+
+=== Secure Boot configuration properties
+
+[.whitepaper, title="How to use Raspberry Pi Secure Boot", subtitle="", link=https://pip.raspberrypi.com/categories/685-whitepapers-app-notes/documents/RP-003466-WP/Boot-Security-Howto.pdf]
+****
+This whitepaper describes how to implement secure boot on devices based on Raspberry Pi 4. For an overview of our approach to implementing secure boot implementation, please see the https://pip.raspberrypi.com/categories/685-whitepapers-app-notes/documents/RP-004651-WP/Raspberry-Pi-4-Boot-Security.pdf[Raspberry Pi 4 Boot Security] whitepaper. The secure boot system is intended for use with `buildroot`-based OS images; using it with Raspberry Pi OS is not recommended or supported.
+****
+
+The following `config.txt` properties are used to program the `secure-boot` OTP settings. These changes are irreversible and can only be programmed via `RPIBOOT` when flashing the bootloader EEPROM image. This ensures that `secure-boot` cannot be set remotely or by accidentally inserting a stale SD card image.
+
+For more information about enabling `secure-boot` please see the https://github.com/raspberrypi/usbboot/blob/master/Readme.md#secure-boot[Secure Boot readme] and the https://github.com/raspberrypi/usbboot/blob/master/secure-boot-example/README.md[Secure Boot tutorial] in the https://github.com/raspberrypi/usbboot[USBBOOT] repo.
+
+[[program_pubkey]]
+==== `program_pubkey`
+
+If this property is set to `1` then `recovery.bin` will write the hash of the public key in the EEPROM image to OTP. Once set, the bootloader will reject EEPROM images signed with different RSA keys or unsigned images.
+
+Default: `0`
+
+[[revoke_devkey]]
+==== `revoke_devkey`
+
+Raspberry Pi 4 only.
+
+If this property is set to `1` then `recovery.bin` will write a value to OTP that prevents the ROM from loading old versions of the second stage bootloader which do not support `secure-boot`. This prevents `secure-boot` from being turned off by reverting to an older release of the bootloader. Therefore, this property must be set if `secure-boot` is enabled on production devices.
+
+This property is automatically is set by `recovery.bin` `2025/05/16` and newer if `program_pubkey=1`.
+
+
+Default: `0`
+
+[[program_rpiboot_gpio]]
+==== `program_rpiboot_gpio`
+
+Raspberry Pi 4B and Raspberry Pi 400 only.
+
+Compute Module 4 and 4S have a dedicated `nRPIBOOT` jumper to select `RPIBOOT` mode. Raspberry Pi 4B and Raspberry Pi 400 lack a dedicated `nRPIBOOT` jumper so one of the following GPIOs must be selected for use as `nRPIBOOT`.
+
+* `2`
+* `4`
+* `5`
+* `6`
+* `7`
+* `8`
+
+The GPIO may be used as a general-purpose I/O pin after the OS has started. However, you should verify that this GPIO configuration does not conflict with any HATs which might pull the GPIO low during boot.
-If set to non-zero, enables the logging of SHA256 hashes for loaded files (the kernel, initramfs, Device Tree .dtb file and overlays), as generated by the `sha256sum` utility. The logging output goes to the UART if enabled, and is also accessible via `sudo vcdbg log msg`. This option may be useful when debugging booting problems, but at the cost of potentially adding _many_ seconds to the boot time. Defaults to 0 on all platforms.
+Although `secure-boot` requires this property to be set on Raspberry Pi 4B and Raspberry Pi 400, it does not depend on `secure-boot`. For example, `RPIBOOT` can be useful for automated testing.
-=== `uart_2ndstage`
+For safety, this OTP value can _only_ be programmed via `RPIBOOT`. As a result, you must first clear the bootloader EEPROM using `erase_eeprom`. The blank EEPROM causes the ROM to failover to `RPIBOOT` mode, which then allows this option to be set.
-Setting `uart_2ndstage=1` causes the second-stage loader (`bootcode.bin` on devices prior to the Raspberry Pi 4, or the boot code in the EEPROM for Raspberry Pi 4 devices) and the main firmware (`start*.elf`) to output diagnostic information to UART0.
+Default: `{nbsp}`
-Be aware that output is likely to interfere with Bluetooth operation unless it is disabled (`dtoverlay=disable-bt`) or switched to the other UART (`dtoverlay=miniuart-bt`), and if the UART is accessed simultaneously to output from Linux then data loss can occur leading to corrupted output. This feature should only be required when trying to diagnose an early boot loading problem.
+[[program_jtag_lock]]
+==== `program_jtag_lock`
-[[upstream_kernel]]
-=== `upstream_kernel`
+If this property is set to `1` then `recovery.bin` will program an OTP value that prevents VideoCore JTAG from being used. This option requires that `program_pubkey` and `revoke_devkey` are also set. This option can prevent failure analysis, and should only be set after the device has been fully tested.
-If `upstream_kernel=1` is used, the firmware sets <> to "upstream/", unless it has been explicitly set to something else, but like other `os_prefix` values it will be ignored if the required kernel and .dtb file can't be found when using the prefix.
+Default: `0`
-The firmware will also prefer upstream Linux names for DTBs (`bcm2837-rpi-3-b.dtb` instead of `bcm2710-rpi-3-b.dtb`, for example). If the upstream file isn't found the firmware will load the downstream variant instead and automatically apply the "upstream" overlay to make some adjustments. Note that this process happens _after_ the `os_prefix` has been finalised.
diff --git a/documentation/asciidoc/computers/config_txt/camera.adoc b/documentation/asciidoc/computers/config_txt/camera.adoc
index 6ccda69a5..a3caa0134 100644
--- a/documentation/asciidoc/computers/config_txt/camera.adoc
+++ b/documentation/asciidoc/computers/config_txt/camera.adoc
@@ -1,9 +1,9 @@
-== Camera Settings
+== Camera settings
=== `disable_camera_led`
-Setting `disable_camera_led` to `1` prevents the red camera LED from turning on when recording video or taking a still picture. This is useful for preventing reflections when the camera is facing a window, for example.
+Setting `disable_camera_led` to `1` prevents the red camera LED from turning on when recording video or taking a still picture. This is useful for preventing reflections, for example when the camera is facing a window.
=== `awb_auto_is_greyworld`
-Setting `awb_auto_is_greyworld` to `1` allows libraries or applications that do not support the greyworld option internally to capture valid images and videos with NoIR cameras. It switches "auto" awb mode to use the "greyworld" algorithm. This should only be needed for NoIR cameras, or when the High Quality camera has had its xref:../accessories/camera.adoc#hq-camera-filter-removal[IR filter removed].
+Setting `awb_auto_is_greyworld` to `1` allows libraries or applications that do not support the greyworld option internally to capture valid images and videos with NoIR cameras. It switches auto awb mode to use the greyworld algorithm. This should only be needed for NoIR cameras, or when the High Quality camera has had its xref:../accessories/camera.adoc#filter-removal[IR filter removed].
diff --git a/documentation/asciidoc/computers/config_txt/codeclicence.adoc b/documentation/asciidoc/computers/config_txt/codeclicence.adoc
index 3b5a28490..688591a12 100644
--- a/documentation/asciidoc/computers/config_txt/codeclicence.adoc
+++ b/documentation/asciidoc/computers/config_txt/codeclicence.adoc
@@ -1,8 +1,10 @@
-== Licence Key and Codec Options
+== Licence key and codec options
Hardware decoding of additional codecs on the Raspberry Pi 3 and earlier models can be enabled by https://codecs.raspberrypi.com/license-keys/[purchasing a licence] that is locked to the CPU serial number of your Raspberry Pi.
-On the Raspberry Pi 4, the hardware codecs for MPEG2 or VC1 are permanently disabled and cannot be enabled even with a licence key; on the Raspberry Pi 4, thanks to its increased processing power compared to earlier models, MPEG2 and VC1 can be decoded in software via applications such as VLC. Therefore, a hardware codec licence key is not needed if you're using a Raspberry Pi 4.
+The Raspberry Pi 4 has permanently disabled hardware decoders for MPEG2 and VC1. These codecs cannot be enabled, so a hardware codec licence key is not needed. Software decoding of MPEG2 and VC1 files performs well enough for typical use cases.
+
+The Raspberry Pi 5 has H.265 (HEVC) hardware decoding. This decoding is enabled by default, so a hardware codec licence key is not needed.
=== `decode_MPG2`
diff --git a/documentation/asciidoc/computers/config_txt/common.adoc b/documentation/asciidoc/computers/config_txt/common.adoc
index 8578a2be6..7f4f89708 100644
--- a/documentation/asciidoc/computers/config_txt/common.adoc
+++ b/documentation/asciidoc/computers/config_txt/common.adoc
@@ -1,51 +1,59 @@
-== Common Options
+== Common options
-=== Common Display Options
+=== Common display options
-==== `disable_overscan`
+==== `hdmi_enable_4kp60`
-The default value for `disable_overscan` is `0` which gives default values of overscan for the left, right, top, and bottom edges of `48` for HD CEA modes, `32` for SD CEA modes, and `0` for DMT modes.
+NOTE: This option applies only to Raspberry Pi 4, Compute Module 4, Compute Module 4S, and Pi 400.
-Set `disable_overscan` to `1` to disable the default values of xref:configuration.adoc#underscan[overscan] that are set by the firmware.
+By default, when connected to a 4K monitor, certain models select a 30Hz refresh rate. Use this option to allow selection of 60Hz refresh rates. Models impacted by this setting do _not_ support 4Kp60 output on both micro HDMI ports simultaneously. Enabling this setting increases power consumption and temperature.
-==== `hdmi_enable_4kp60` (Raspberry Pi 4 Only)
-
-By default, when connected to a 4K monitor, the Raspberry Pi 4B, 400 and CM4 will select a 30Hz refresh rate. Use this option to allow selection of 60Hz refresh rates.
-
-IMPORTANT: It is not possible to output 4Kp60 on both micro HDMI ports simultaneously.
-
-WARNING: Setting `hdmi_enable_4kp60` will increase power consumption and the temperature of your Raspberry Pi.
-
-=== Common Hardware Configuration Options
+=== Common hardware configuration options
==== `camera_auto_detect`
-With this setting enabled (set to `1`), the firmware will automatically load overlays for cameras that it recognises.
+By default, Raspberry Pi OS includes a line in `/boot/firmware/config.txt` that enables this setting.
+
+When enabled, the firmware will automatically load overlays for recognised CSI cameras.
-IMPORTANT: New Raspberry Pi OS images from Bullseye onwards come with this setting by default.
+To disable, set `camera_auto_detect=0` (or remove `camera_auto_detect=1`).
==== `display_auto_detect`
-With this setting enabled (set to `1`), the firmware will automatically load overlays for displays that it recognises.
+By default, Raspberry Pi OS includes a line in `/boot/firmware/config.txt` that enables this setting.
+
+When enabled, the firmware will automatically load overlays for recognised DSI displays.
-IMPORTANT: New Raspberry Pi OS images from Bullseye onwards come with this setting by default.
+To disable, set `display_auto_detect=0` (or remove `display_auto_detect=1`).
==== `dtoverlay`
The `dtoverlay` option requests the firmware to load a named Device Tree overlay - a configuration file that can enable kernel support for built-in and external hardware. For example, `dtoverlay=vc4-kms-v3d` loads an overlay that enables the kernel graphics driver.
-As a special case, if called with no value - `dtoverlay=` - it marks the end of a list of overlay parameters. If used before any other `dtoverlay` or `dtparam` setting it prevents the loading of any HAT overlay.
+As a special case, if called with no value - `dtoverlay=` - the option marks the end of a list of overlay parameters. If used before any other `dtoverlay` or `dtparam` setting, it prevents the loading of any HAT overlay.
For more details, see xref:configuration.adoc#part3.1[DTBs, overlays and config.txt].
==== `dtparam`
-Device Tree configuration files for Raspberry Pis support a number of parameters for such things as enabling I2C and SPI interfaces. Many DT overlays are configurable via the use of parameters. Both types of parameters can be supplied using the `dtparam` setting. In addition, overlay parameters can be appended to the `dtoverlay` option, separated by commas, but beware the line length limit - previously 78 characters, now 98 characters.
+Device Tree configuration files for Raspberry Pi devices support various parameters for such things as enabling I2C and SPI interfaces. Many DT overlays are configurable via the use of parameters. Both types of parameters can be supplied using the `dtparam` setting. In addition, overlay parameters can be appended to the `dtoverlay` option, separated by commas, but keep in mind the line length limit of 98 characters.
For more details, see xref:configuration.adoc#part3.1[DTBs, overlays and config.txt].
-==== `arm_boost` (Raspberry Pi 4 Only)
+==== `arm_boost`
+
+NOTE: This option applies only to later Raspberry Pi 4B revisions which include two-phase power delivery, and all revisions of Pi 400.
+
+By default, Raspberry Pi OS includes a line in `/boot/firmware/config.txt` that enables this setting on supported devices.
+
+Some Raspberry Pi devices have a second switch-mode power supply for the SoC voltage rail. When enabled, increases the default turbo-mode clock from 1.5GHz to 1.8GHz.
+
+To disable, set `arm_boost=0`.
+
+==== `power_force_3v3_pwm`
+
+NOTE: This option applies only to Raspberry Pi 5, Compute Module 5, and Pi 500.
-All Raspberry Pi 400s and newer revisions of the Raspberry Pi 4B are equipped with a second switch-mode power supply for the SoC voltage rail, and this allows the default turbo-mode clock to be increased from 1.5GHz to 1.8GHz. This change should be safe for all such boards, but to avoid unrequested changes for existing installations this change must be accepted by setting `arm_boost=1`.
+Forces PWM on 3.3V output from the GPIO header or CSI connector.
-IMPORTANT: New Raspberry Pi OS images from Bullseye onwards come with this setting by default.
+To disable, set `power_force_3v3_pwm=0`.
diff --git a/documentation/asciidoc/computers/config_txt/conditional.adoc b/documentation/asciidoc/computers/config_txt/conditional.adoc
index a540fcb28..f905c870e 100644
--- a/documentation/asciidoc/computers/config_txt/conditional.adoc
+++ b/documentation/asciidoc/computers/config_txt/conditional.adoc
@@ -1,7 +1,7 @@
[[conditional-filters]]
-== Conditional Filters
+== Conditional filters
-When a single SD Card (or card image) is being used with one Raspberry Pi and one monitor, it is easy to set `config.txt` as required for that specific combination and keep it that way, amending it only when something changes.
+When a single SD card (or card image) is being used with one Raspberry Pi and one monitor, it is easy to set `config.txt` as required for that specific combination and keep it that way, amending it only when something changes.
However, if one Raspberry Pi is swapped between different monitors, or if the SD card (or card image) is being swapped between multiple boards, a single set of settings may no longer be sufficient. Conditional filters allow you to define certain sections of the config file to be used only in specific cases, allowing a single `config.txt` to create different configurations when read by different hardware.
@@ -9,47 +9,65 @@ However, if one Raspberry Pi is swapped between different monitors, or if the SD
The `[all]` filter is the most basic filter. It resets all previously set filters and allows any settings listed below it to be applied to all hardware. It is usually a good idea to add an `[all]` filter at the end of groups of filtered settings to avoid unintentionally combining filters (see below).
-=== Model Filters
+=== Model filters
-The conditional model filters are applied according to the following table.
+The conditional model filters apply according to the following table.
|===
| Filter | Applicable model(s)
-| [pi1]
+| `[pi1]`
| Model 1A, Model 1B, Model 1A+, Model 1B+, Compute Module 1
-| [pi2]
+| `[pi2]`
| Model 2B (BCM2836- or BCM2837-based)
-| [pi3]
+| `[pi3]`
| Model 3B, Model 3B+, Model 3A+, Compute Module 3, Compute Module 3+
-| [pi3+]
-| Model 3A+, Model 3B+
+| `[pi3+]`
+| Model 3A+, Model 3B+ (also sees `[pi3]` contents)
-| [pi4]
+| `[pi4]`
| Model 4B, Pi 400, Compute Module 4, Compute Module 4S
-| [pi400]
-| Pi 400
+| `[pi5]`
+| Raspberry Pi 5, Compute Module 5, Pi 500
-| [cm4]
-| Compute Module 4
+| `[pi400]`
+| Pi 400 (also sees `[pi4]` contents)
-| [cm4s]
-| Compute Module 4S
+| `[pi500]`
+| Pi 500 (also sees `[pi5]` contents)
-| [pi0]
+| `[cm1]`
+| Compute Module 1 (also sees `[pi1]` contents)
+
+| `[cm3]`
+| Compute Module 3 (also sees `[pi3]` contents)
+
+| `[cm3+]`
+| Compute Module 3+ (also sees `[pi3+]` contents)
+
+| `[cm4]`
+| Compute Module 4 (also sees `[pi4]` contents)
+
+| `[cm4s]`
+| Compute Module 4S (also sees `[pi4]` contents)
+
+| `[cm5]`
+| Compute Module 5 (also sees `[pi5]` contents)
+
+| `[pi0]`
| Zero, Zero W, Zero 2 W
-| [pi0w]
-| Zero W
+| `[pi0w]`
+| Zero W (also sees `[pi0]` contents)
-| [pi02]
-| Zero 2 W
+| `[pi02]`
+| Zero 2 W (also sees `[pi0w]` and `[pi0]` contents)
-| [board-type=Type]
+| `[board-type=Type]`
| Filter by `Type` number - see xref:raspberry-pi.adoc#raspberry-pi-revision-codes[Raspberry Pi Revision Codes] E.g `[board-type=0x14]` would match CM4.
|===
@@ -57,146 +75,275 @@ The conditional model filters are applied according to the following table.
These are particularly useful for defining different `kernel`, `initramfs`, and `cmdline` settings, as the Raspberry Pi 1 and Raspberry Pi 2 require different kernels. They can also be useful to define different overclocking settings, as the Raspberry Pi 1 and Raspberry Pi 2 have different default speeds. For example, to define separate `initramfs` images for each:
----
- [pi1]
- initramfs initrd.img-3.18.7+ followkernel
- [pi2]
- initramfs initrd.img-3.18.7-v7+ followkernel
- [all]
+[pi1]
+initramfs initrd.img-3.18.7+ followkernel
+[pi2]
+initramfs initrd.img-3.18.7-v7+ followkernel
+[all]
----
Remember to use the `[all]` filter at the end, so that any subsequent settings aren't limited to Raspberry Pi 2 hardware only.
-It is important to note that the Raspberry Pi Zero W will see the contents of `[pi0w]` AND `[pi0]`. Likewise, a Raspberry Pi 3B+ sees `[pi3+]` AND `[pi3]`, and a Raspberry Pi 400 sees `[pi400]` AND `[pi4]`. If you want a setting to apply only to Raspberry Pi Zero, Raspberry Pi 3B or Raspberry Pi 4B, you need to follow it (order is important) with a setting in the `[pi0w]`, `[pi3+]` or `[pi400]` section that reverts it.
+[NOTE]
+====
+Some models of Raspberry Pi, including Zero, Compute Module, and Keyboard models, read settings from multiple filters. To apply a setting to only one model:
+
+* apply the setting to the base model (e.g. `[pi4]`), then revert the setting for all models that read the base model's filters (e.g. `[pi400]`, `[cm4]`, `[cm4s]`)
+* use the `board-type` filter with a revision code to target a single model (e.g. `[board-type=0x11]`)
+====
=== The `[none]` filter
The `[none]` filter prevents any settings that follow from being applied to any hardware. Although there is nothing that you can't do without `[none]`, it can be a useful way to keep groups of unused settings in config.txt without having to comment out every line.
+[source,ini]
+----
+# Bootloader EEPROM config.
+# If PM_RSTS is partition 62 then set bootloader properties to disable
+# SD high speed and show HDMI diagnostics
+# Boot from partition 2 with debug option.
+[partition=62]
+# Only high (>31) partition can be remapped.
+PARTITION=2
+SD_QUIRKS=0x1
+HDMI_DELAY=0
+----
+
+Example `config.txt` - (Currently Raspberry Pi 5 onwards)
+[source,ini]
+----
+# config.txt - If the original requested partition number in PM_RSTS was a
+# special number then use an alternate cmdline.txt
+[partition=62]
+cmdline=cmdline-recovery.txt
+----
+
+The raw value of the `PM_RSTS` register at bootup is available via `/proc/device-tree/chosen/bootloader/rsts` and the final partition number used for booting is available via `/proc/device-tree/chosen/bootloader/partition`. These are big-endian binary values.
+
+=== The expression filter
+
+The expression filter provides support for comparing unsigned integer "boot variables" to constants using a simple set of operators. It is intended to support OTA update mechanisms, debug and test.
+
+* The "boot variables" are `boot_arg1`, `boot_count`, `boot_partition` and `partition`.
+* Boot variables are always lower case.
+* Integer constants may either be written as decimal or as hex.
+* Expression conditional filters have no side-effects e.g. no assignment operators.
+* As with other filter types the expression filter cannot be nested.
+* Use the `[all]` filter to reset expressions and all other conditional filter types.
+
+Syntax:
+[source,ini]
+----
+# ARG is a boot-variable
+# VALUE and MASK are unsigned integer constants
+[ARG=VALUE] # selected if (ARG == VALUE)
+[ARG&MASK] # selected if ((ARG & VALUE) != 0)
+[ARG&MASK=VALUE] # selected if ((ARG & MASK) == VALUE)
+[ARGVALUE] # selected if (ARG > VALUE)
+
+----
+
+==== `boot_arg1`
+Raspberry Pi 5 and newer devices only.
+
+The `boot_arg1` variable is a 32-bit user defined value which is stored in a reset-safe register allowing parameters to be passed accross a reboot.
+
+Setting `boot_arg1` to 42 via `config.txt`:
+[source,ini]
+----
+set_reboot_arg1=42
+----
+The `set_reboot_arg1` property sets the value for the next boot. It does not change the current value as seen by the config parser.
+
+Setting `boot_arg1` to 42 via vcmailbox:
+[source,console]
+----
+sudo vcmailbox 0x0003808c 8 8 1 42
+----
+
+Reading `boot_arg1` via vcmailbox:
+[source,console]
+----
+sudo vcmailbox 0x0003008c 8 8 1 0
+# Example output - boot_arg1 is 42
+# 0x00000020 0x80000000 0x0003008c 0x00000008 0x80000008 0x00000001 0x0000002a 0x0000000
+----
+The value of the `boot_arg1` variable when the OS was started can be read via xref:configuration.adoc#part4[device-tree] at `/proc/device-tree/chosen/bootloader/arg1`
+
+==== `boot_count`
+Raspberry Pi 5 and newer devices only.
+
+The `boot_count` variable is an 8-bit value stored in a reset-safe register that is incremented at boot (wrapping back to zero at 256). It is cleared if power is disconnected.
+
+To read `boot_count` via vcmailbox:
+[source,console]
+----
+sudo vcmailbox 0x0003008d 4 4 0
+# Example - boot count is 3
+# 0x0000001c 0x80000000 0x0003008d 0x00000004 0x80000004 0x00000003 0x00000000
+----
+
+Setting/clearing `boot_count` via vcmailbox:
+[source,console]
+----
+# Clear boot_count by setting it to zero.
+sudo vcmailbox 0x0003808d 4 4 0
+----
+The value of `boot_count` when the OS was started can be read via xref:configuration.adoc#part4[device-tree] at `/proc/device-tree/chosen/bootloader/count`
+
+==== `boot_partition`
+The `boot_partition` variable can be used to select alternate OS files (e.g. `cmdline.txt`) to be loaded, depending on which partition `config.txt` was loaded from after processing xref:config_txt.adoc#autoboot-txt[autoboot.txt]. This is intended for use with an `A/B` boot-system with `autoboot.txt` where it is desirable to be able to have identical files installed to the boot partition for both the `A` and `B` images.
+
+The value of the `boot_partition` can be different to the requested `partition` variable if it was overriden by setting `boot_partition` in xref:config_txt.adoc#autoboot-txt[autoboot.txt] or if the specified partion was not bootable and xref:raspberry-pi.adoc#PARTITION_WALK[PARTITION_WALK] was enabled in the EEPROM config.
+
+Example `config.txt` - select the matching root filesystem for the `A/B` boot file-system:
+[source,ini]
+----
+# Use different cmdline files to point to different root filesystems based on which partition the system booted from.
+[boot_partition=1]
+cmdline=cmdline_rootfs_a.txt # Points to root filesystem A
+
+[boot_partition=2]
+cmdline=cmdline_rootfs_b.txt # Points to root filesystem B
+----
+
+The value of `boot_partition` i.e. the partition used to boot the OS can be read from xref:configuration.adoc#part4[device-tree] at `/proc/device-tree/chosen/bootloader/partition`
+
+==== `partition`
+The `partition` variable can be used to select alternate boot flows according to the requested partition number (`sudo reboot N`) or via direct usage of the `PM_RSTS` watchdog register.
+
+
=== The `[tryboot]` filter
This filter succeeds if the `tryboot` reboot flag was set.
-It is intented for use in xref:config_txt.adoc#autoboot-txt[autoboot.txt] to select a different `boot_partition` in `tryboot` mode for fail-safe OS updates.
+It is intended for use in xref:config_txt.adoc#autoboot-txt[autoboot.txt] to select a different `boot_partition` in `tryboot` mode for fail-safe OS updates.
+
+The value of `tryboot` at the start of boot can be read via xref:configuration.adoc#part4[device-tree] at `/proc/device-tree/chosen/bootloader/tryboot`
=== The `[EDID=*]` filter
When switching between multiple monitors while using a single SD card in your Raspberry Pi, and where a blank config isn't sufficient to automatically select the desired resolution for each one, this allows specific settings to be chosen based on the monitors' EDID names.
-To view the EDID name of an attached monitor, run the following command:
+To view the EDID name of an attached monitor, you need to follow a few steps. Run the following command to see which output devices you have on your Raspberry Pi:
-[source]
+[source,console]
----
-tvservice -n
+$ ls -1 /sys/class/drm/card?-HDMI-A-?/edid
----
-
-This will print something like this:
-[source]
+On a Raspberry Pi 4, this will print something like:
+
----
-device_name=VSC-TD2220
+/sys/class/drm/card1-HDMI-A-1/edid
+/sys/class/drm/card1-HDMI-A-2/edid
----
-
-You can then specify settings that apply only to this monitor:
-[source]
+You then need to run `edid-decode` against each of these filenames, for example:
+
+[source,console]
----
-[EDID=VSC-TD2220]
-hdmi_group=2
-hdmi_mode=82
+$ edid-decode /sys/class/drm/card1-HDMI-A-1/edid
+----
+
+If there's no monitor connected to that particular output device, it'll tell you the EDID was empty; otherwise it will serve you *lots* of information about your monitor's capabilities. You need to look for the lines specifying the `Manufacturer` and the `Display Product Name`. The "EDID name" is then `-`, with any spaces in either string replaced by underscores. For example, if your `edid-decode` output included:
+
+----
+....
+ Vendor & Product Identification:
+ Manufacturer: DEL
+....
+ Display Product Name: 'DELL U2422H'
+....
+----
+
+The EDID name for this monitor would be `DEL-DELL_U2422H`.
+
+You can then use this as a conditional-filter to specify settings that only apply when this particular monitor is connected:
+
+[source,ini]
+----
+[EDID=DEL-DELL_U2422H]
+cmdline=cmdline_U2422H.txt
[all]
----
-This forces 1920x1080 DVT mode for the specified monitor, without affecting any other monitors.
+These settings apply only at boot. The monitor must be connected at boot time, and the Raspberry Pi must be able to read its EDID information to find the correct name. Hotplugging a different monitor into the Raspberry Pi after boot will not select different settings.
-Note that these settings apply only at boot, so the monitor must be connected at boot time and the Raspberry Pi must be able to read its EDID information to find the correct name. Hotplugging a different monitor into the Raspberry Pi after boot will not select different settings.
+On the Raspberry Pi 4, if both HDMI ports are in use, then the EDID filter will be checked against both of them, and configuration from all matching conditional filters will be applied.
-On the Raspberry Pi 4, if both HDMI ports are in use, then the EDID will be checked against both of them, and subsequent configuration applied only to the first matching device. You can determine the EDID names for both ports by first running `tvservice -l` in a terminal window to list all attached devices and then using the returned numerical IDs in `tvservice -v -n` to find the EDID name for a specific display ID.
+NOTE: This setting is not available on Raspberry Pi 5.
-=== The Serial Number Filter
+=== The serial number filter
-Sometimes settings should only be applied to a single specific Raspberry Pi, even if you swap the SD card to a different one. Examples include licence keys and overclocking settings (although the licence keys already support SD card swapping in a different way). You can also use this to select different display settings, even if the EDID identification above is not possible, provided that you don't swap monitors between your Raspberry Pis. For example, if your monitor doesn't supply a usable EDID name, or if you are using composite output (for which EDID cannot be read).
+Sometimes settings should only be applied to a single specific Raspberry Pi, even if you swap the SD card to a different one. Examples include licence keys and overclocking settings (although the licence keys already support SD card swapping in a different way). You can also use this to select different display settings, even if the EDID identification above is not possible, provided that you don't swap monitors between your Raspberry Pis. For example, if your monitor doesn't supply a usable EDID name, or if you are using composite output (from which EDID cannot be read).
To view the serial number of your Raspberry Pi, run the following command:
-[source]
+[source,console]
----
-cat /proc/cpuinfo
+$ cat /proc/cpuinfo
----
-The serial will be shown as a 16-digit hex value at the bottom. For example, if you see:
+A 16-digit hex value will be displayed near the bottom of the output. Your Raspberry Pi's serial number is the last eight hex-digits. For example, if you see:
-[source]
----
Serial : 0000000012345678
----
-then you can define settings that will only be applied to this specific Raspberry Pi:
+The serial number is `12345678`.
-[source]
+NOTE: On some Raspberry Pi models, the first 8 hex-digits contain values other than `0`. Even in this case, only use the last eight hex-digits as the serial number.
+
+You can define settings that will only be applied to this specific Raspberry Pi:
+
+[source,ini]
----
[0x12345678]
-# settings here are applied only to the Raspberry Pi with this serial
+# settings here apply only to the Raspberry Pi with this serial
+
[all]
-# settings here are applied to all hardware
+# settings here apply to all hardware
+
----
-=== The GPIO Filter
+=== The GPIO filter
-You can also filter depending on the state of a GPIO. For example
+You can also filter depending on the state of a GPIO. For example:
-[source]
+[source,ini]
----
[gpio4=1]
-# Settings here are applied if GPIO 4 is high
+# Settings here apply if GPIO 4 is high
[gpio2=0]
-# Settings here are applied if GPIO 2 is low
+# Settings here apply if GPIO 2 is low
[all]
-# settings here are applied to all hardware
-----
+# settings here apply to all hardware
-=== The `[HDMI:*]` Filter
-
-NOTE: This filter is for the Raspberry Pi 4 only.
+----
-The Raspberry Pi 4 has two HDMI ports, and for many `config.txt` commands related to HDMI, it is necessary to specify which HDMI port is being referred to. The HDMI conditional filters subsequent HDMI configurations to the specific port.
+=== Combine conditional filters
-[source]
-----
- [HDMI:0]
- hdmi_group=2
- hdmi_mode=45
- [HDMI:1]
- hdmi_group=2
- hdmi_mode=67
-----
+Filters of the same type replace each other, so `[pi2]` overrides `[pi1]`, because it is not possible for both to be true at once.
-An alternative `variable:index` syntax is available on all port-specific HDMI commands. You could use the following, which is the same as the previous example:
+Filters of different types can be combined by listing them one after the other, for example:
-[source]
-----
- hdmi_group:0=2
- hdmi_mode:0=45
- hdmi_group:1=2
- hdmi_mode:1=67
+[source,ini]
----
+# settings here apply to all hardware
-=== Combining Conditional Filters
+[EDID=VSC-TD2220]
+# settings here apply only if monitor VSC-TD2220 is connected
-Filters of the same type replace each other, so `[pi2]` overrides `[pi1]`, because it is not possible for both to be true at once.
+[pi2]
+# settings here apply only if monitor VSC-TD2220 is connected *and* on a Raspberry Pi 2
-Filters of different types can be combined simply by listing them one after the other, for example:
+[all]
+# settings here apply to all hardware
-[source]
-----
- # settings here are applied to all hardware
- [EDID=VSC-TD2220]
- # settings here are applied only if monitor VSC-TD2220 is connected
- [pi2]
- # settings here are applied only if monitor VSC-TD2220 is connected *and* on a Raspberry Pi 2
- [all]
- # settings here are applied to all hardware
----
Use the `[all]` filter to reset all previous filters and avoid unintentionally combining different filter types.
diff --git a/documentation/asciidoc/computers/config_txt/gpio.adoc b/documentation/asciidoc/computers/config_txt/gpio.adoc
index afec7e221..2508cbd06 100644
--- a/documentation/asciidoc/computers/config_txt/gpio.adoc
+++ b/documentation/asciidoc/computers/config_txt/gpio.adoc
@@ -1,10 +1,9 @@
-== GPIO Control
+== GPIO control
=== `gpio`
-The `gpio` directive allows GPIO pins to be set to specific modes and values at boot time in a way that would
-previously have needed a custom `dt-blob.bin` file. Each line applies the same settings (or at least makes the same
-changes) to a set of pins, either a single pin (`3`), a range of pins (`3-4`), or a comma-separated list of either (`3-4,6,8`).
+The `gpio` directive allows GPIO pins to be set to specific modes and values at boot time in a way that would previously have needed a custom `dt-blob.bin` file. Each line applies the same settings (or at least makes the same changes) to a set of pins, addressing either a single pin (`3`), a range of pins (`3-4`), or a comma-separated list of either (`3-4,6,8`).
+
The pin set is followed by an `=` and one or more comma-separated attributes from this list:
* `ip` - Input
@@ -16,10 +15,11 @@ The pin set is followed by an `=` and one or more comma-separated attributes fro
* `pd` - Pull down
* `pn/np` - No pull
-`gpio` settings are applied in order, so those appearing later override those appearing earlier.
+`gpio` settings apply in order, so those appearing later override those appearing earlier.
Examples:
+[source,ini]
----
# Select Alt2 for GPIO pins 0 to 27 (for DPI24)
gpio=0-27=a2
@@ -34,38 +34,9 @@ gpio=18,20=pu
gpio=17-21=ip
----
-The `gpio` directive respects the "[...]" section headers in `config.txt`, so it is possible to use different settings
-based on the model, serial number, and EDID.
-
-GPIO changes made through this mechanism do not have any direct effect on the kernel -- they don't cause GPIO pins to
-be exported to the sysfs interface, and they can be overridden by pinctrl entries in the Device Tree as well as
-utilities like `raspi-gpio`.
-
-Note also that there is a delay of a few seconds between power being applied and the changes taking effect -- longer
-if booting over the network or from a USB mass storage device.
-
-=== `enable_jtag_gpio`
-
-Setting `enable_jtag_gpio=1` selects Alt4 mode for GPIO pins 22-27, and sets up some internal SoC connections, thus enabling the JTAG interface for the ARM CPU. It works on all models of Raspberry Pi.
-
-|===
-| Pin # | Function
-
-| GPIO22
-| ARM_TRST
-
-| GPIO23
-| ARM_RTCK
-
-| GPIO24
-| ARM_TDO
+The `gpio` directive respects the "[...]" conditional filters in `config.txt`, so it is possible to use different settings based on the model, serial number, and EDID.
-| GPIO25
-| ARM_TCK
+GPIO changes made through this mechanism do not have any direct effect on the kernel. They don't cause GPIO pins to be exported to the `sysfs` interface, and they can be overridden by `pinctrl` entries in the Device Tree as well as utilities like `pinctrl`.
-| GPIO26
-| ARM_TDI
+Note also that there is a delay of a few seconds between power being applied and the changes taking effect - longer if booting over the network or from a USB mass storage device.
-| GPIO27
-| ARM_TMS
-|===
diff --git a/documentation/asciidoc/computers/config_txt/legacy.adoc b/documentation/asciidoc/computers/config_txt/legacy.adoc
deleted file mode 100644
index fd66f3b0c..000000000
--- a/documentation/asciidoc/computers/config_txt/legacy.adoc
+++ /dev/null
@@ -1,3 +0,0 @@
-== Legacy Options
-
-The remaining groups of `config.txt` options are considered legacy settings, either because they relate to older software such as the firmware graphics driver, or because they have been deprecated or removed altogether.
diff --git a/documentation/asciidoc/computers/config_txt/memory.adoc b/documentation/asciidoc/computers/config_txt/memory.adoc
index aa9455004..8c6d90731 100644
--- a/documentation/asciidoc/computers/config_txt/memory.adoc
+++ b/documentation/asciidoc/computers/config_txt/memory.adoc
@@ -1,60 +1,13 @@
-== Memory Options
-
-=== `gpu_mem`
-
-Specifies how much memory, in megabytes, to reserve for the exclusive use of the GPU: the remaining memory is allocated to the ARM CPU for use by the OS. For Raspberry Pis with less than 1GB of memory, the default is `64`; for Raspberry Pis with 1GB or more of memory the default is `76`.
-
-IMPORTANT: Unlike GPUs found on x86 machines, where increasing memory can improve 3D performance, the architecture of the VideoCore means *there is no performance advantage from specifying values larger than is necessary*, and in fact it can harm performance.
-
-To ensure the best performance of Linux, you should set `gpu_mem` to the lowest possible value. If a particular graphics feature is not working correctly, try increasing the value of `gpu_mem`, being mindful of the recommended maximums shown below.
-
-On the Raspberry Pi 4 the 3D component of the GPU has its own memory management unit (MMU), and does not use memory from the `gpu_mem` allocation. Instead memory is allocated dynamically within Linux. This allows a smaller value to be specified for `gpu_mem` on the Raspberry Pi 4, compared to previous models.
-
-On legacy kernels, the memory allocated to the GPU is used for display, 3D, Codec and camera purposes as well as some basic firmware housekeeping. The maximums specified below assume you are using all these features. If you are not, then smaller values of gpu_mem should be used.
-
-The recommended maximum values are as follows:
-
-|===
-| total RAM | `gpu_mem` recommended maximum
-
-| 256MB
-| `128`
-
-| 512MB
-| `384`
-
-| 1GB or greater
-| `512`, `76` on the Raspberry Pi 4
-|===
-
-IMPORTANT: The default camera stack (libcamera2) on Raspberry Pi OS - Bullseye uses Linux CMA memory to allocate buffers instead of GPU memory so there is no benefit in increasing the GPU memory size.
-
-It is possible to set `gpu_mem` to larger values, however this should be avoided since it can cause problems, such as preventing Linux from booting. The minimum value is `16`, however this disables certain GPU features.
-
-You can also use `gpu_mem_256`, `gpu_mem_512`, and `gpu_mem_1024` to allow swapping the same SD card between Raspberry Pis with different amounts of RAM without having to edit `config.txt` each time:
-
-=== `gpu_mem_256`
-
-The `gpu_mem_256` command sets the GPU memory in megabytes for Raspberry Pis with 256MB of memory. (It is ignored if memory size is not 256MB). This overrides `gpu_mem`.
-
-=== `gpu_mem_512`
-
-The `gpu_mem_512` command sets the GPU memory in megabytes for Raspberry Pis with 512MB of memory. (It is ignored if memory size is not 512MB). This overrides `gpu_mem`.
-
-=== `gpu_mem_1024`
-
-The `gpu_mem_1024` command sets the GPU memory in megabytes for Raspberry Pis with 1GB or more of memory. (It is ignored if memory size is smaller than 1GB). This overrides `gpu_mem`.
+== Memory options
=== `total_mem`
This parameter can be used to force a Raspberry Pi to limit its memory capacity: specify the total amount of RAM, in megabytes, you wish the Raspberry Pi to use. For example, to make a 4GB Raspberry Pi 4B behave as though it were a 1GB model, use the following:
+[source,ini]
----
total_mem=1024
----
This value will be clamped between a minimum of 128MB, and a maximum of the total memory installed on the board.
-=== `disable_l2cache`
-
-Setting this to `1` disables the CPU's access to the GPU's L2 cache and requires a corresponding L2 disabled kernel. Default value on BCM2835 is `0`. On BCM2836, BCM2837, and BCM2711, the ARMs have their own L2 cache and therefore the default is `1`. The standard Raspberry Pi `kernel.img` and `kernel7.img` builds reflect this difference in cache setting.
diff --git a/documentation/asciidoc/computers/config_txt/misc.adoc b/documentation/asciidoc/computers/config_txt/misc.adoc
deleted file mode 100644
index 537c65802..000000000
--- a/documentation/asciidoc/computers/config_txt/misc.adoc
+++ /dev/null
@@ -1,18 +0,0 @@
-== Miscellaneous Options
-
-=== `avoid_warnings`
-
-The xref:configuration.adoc#firmware-warning-icons[warning symbols] can be disabled using this option, although this is not advised.
-
-`avoid_warnings=1` disables the warning overlays.
-`avoid_warnings=2` disables the warning overlays, but additionally allows turbo mode even when low-voltage is present.
-
-=== `logging_level`
-
-Sets the VideoCore logging level. The value is a VideoCore-specific bitmask.
-
-=== `max_usb_current`
-
-WARNING: This command is now deprecated and has no effect.
-
-Originally certain models of Raspberry Pi limited the USB ports to a maximum of 600mA. Setting `max_usb_current=1` changed this default to 1200mA. However, all firmware now has this flag set by default, so it is no longer necessary to use this option.
diff --git a/documentation/asciidoc/computers/config_txt/overclocking.adoc b/documentation/asciidoc/computers/config_txt/overclocking.adoc
index 4aa280824..b76a8ac8a 100644
--- a/documentation/asciidoc/computers/config_txt/overclocking.adoc
+++ b/documentation/asciidoc/computers/config_txt/overclocking.adoc
@@ -1,15 +1,16 @@
-== Overclocking Options
+== Overclocking options
-The kernel has a https://www.kernel.org/doc/html/latest/admin-guide/pm/cpufreq.html[CPUFreq] driver with the "powersave" governor enabled by default, switched to "ondemand" during boot, when xref:configuration.adoc#raspi-config[raspi-config] is installed. With "ondemand" governor, CPU frequency will vary with processor load. You can adjust the minimum values with the `*_min` config options or disable dynamic clocking by applying a static scaling governor ("powersave" or "performance") or with `force_turbo=1`.
+The kernel has a https://www.kernel.org/doc/html/latest/admin-guide/pm/cpufreq.html[CPUFreq] driver with the powersave governor enabled by default, switched to ondemand during boot, when xref:configuration.adoc#raspi-config[raspi-config] is installed. With the ondemand governor, CPU frequency will vary with processor load. You can adjust the minimum values with the `*_min` config options, or disable dynamic clocking by applying a static scaling governor (powersave or performance) or with `force_turbo=1`.
-Overclocking and overvoltage will be disabled at runtime when the SoC reaches `temp_limit` (see below), which defaults to 85°C, in order to cool down the SoC. You should not hit this limit with Raspberry Pi 1 and Raspberry Pi 2, but you are more likely to with Raspberry Pi 3 and Raspberry Pi 4. Overclocking and overvoltage are also disabled when an undervoltage situation is detected.
+Overclocking and overvoltage will be disabled at runtime when the SoC reaches `temp_limit` (see below), which defaults to 85°C, in order to cool down the SoC. You should not hit this limit with Raspberry Pi 1 and Raspberry Pi 2, but you are more likely to with Raspberry Pi 3 and newer. Overclocking and overvoltage are also disabled when an undervoltage situation is detected.
NOTE: For more information xref:raspberry-pi.adoc#frequency-management-and-thermal-control[see the section on frequency management and thermal control].
-WARNING: Setting any overclocking parameters to values other than those used by xref:configuration.adoc#overclock[raspi-config] may set a permanent bit within the SoC, making it possible to detect that your Raspberry Pi has been overclocked. The specific circumstances where the overclock bit is set are if `force_turbo` is set to `1` and any of the `over_voltage_*` options are set to a value > `0`. See the https://www.raspberrypi.com/news/introducing-turbo-mode-up-to-50-more-performance-for-free/[blog post on Turbo Mode] for more information.
+WARNING: Setting any overclocking parameters to values other than those used by xref:configuration.adoc#overclock[`raspi-config`] may set a permanent bit within the SoC. This makes it possible to detect that your Raspberry Pi was once overclocked. The overclock bit sets when `force_turbo` is set to `1` and any of the `over_voltage_*` options are set to a value of more than `0`. See the https://www.raspberrypi.com/news/introducing-turbo-mode-up-to-50-more-performance-for-free/[blog post on Turbo mode] for more information.
=== Overclocking
+[cols="1m,3"]
|===
| Option | Description
@@ -20,49 +21,53 @@ WARNING: Setting any overclocking parameters to values other than those used by
| Increases `arm_freq` to the highest supported frequency for the board-type and firmware. Set to `1` to enable.
| gpu_freq
-| Sets `core_freq`, `h264_freq`, `isp_freq`, `v3d_freq` and `hevc_freq` together
+| Sets `core_freq`, `h264_freq`, `isp_freq`, `v3d_freq` and `hevc_freq` together.
| core_freq
-| Frequency of the GPU processor core in MHz, influences CPU performance because it drives the L2 cache and memory bus; the L2 cache benefits only Raspberry Pi Zero / Raspberry Pi Zero W / Raspberry Pi 1, there is a small benefit for SDRAM on Raspberry Pi 2 / Raspberry Pi 3. See section below for use on the Raspberry Pi 4.
+| Frequency of the GPU processor core in MHz. Influences CPU performance because it drives the L2 cache and memory bus; the L2 cache benefits only Raspberry Pi Zero/Raspberry Pi Zero W/Raspberry Pi 1; and there is a small benefit for SDRAM on Raspberry Pi 2 and Raspberry Pi 3. See section below for use on Raspberry Pi 4.
| h264_freq
-| Frequency of the hardware video block in MHz; individual override of the `gpu_freq` setting
+| Frequency of the hardware video block in MHz; individual override of the `gpu_freq` setting.
| isp_freq
-| Frequency of the image sensor pipeline block in MHz; individual override of the `gpu_freq` setting
+| Frequency of the image sensor pipeline block in MHz; individual override of the `gpu_freq` setting.
| v3d_freq
-| Frequency of the 3D block in MHz; individual override of the `gpu_freq` setting
+| Frequency of the 3D block in MHz; individual override of the `gpu_freq` setting. On Raspberry Pi 5, V3D is independent of `core_freq`, `isp_freq` and `hevc_freq`.
| hevc_freq
| Frequency of the High Efficiency Video Codec block in MHz; individual override of the `gpu_freq` setting. Raspberry Pi 4 only.
| sdram_freq
-| Frequency of the SDRAM in MHz. SDRAM overclocking on Raspberry Pi 4B is not currently supported
+| Frequency of the SDRAM in MHz. SDRAM overclocking on Raspberry Pi 4 or newer is not supported.
| over_voltage
-| CPU/GPU core upper voltage limit. The value should be in the range [-16,8] which equates to the range [0.95V,1.55V] ([0.8,1.4V] on Raspberry Pi 1) with 0.025V steps. In other words, specifying -16 will give 0.95V (0.8V on Raspberry Pi 1) as the maximum CPU/GPU core voltage, and specifying 8 will allow up to 1.55V (1.4V on Raspberry Pi 1). For defaults see table below. Values above 6 are only allowed when `force_turbo=1` is specified: this sets the warranty bit if `over_voltage_*` > `0` is also set.
+| CPU/GPU core upper voltage limit. The value should be in the range [-16,8] which equates to the range [0.95V,1.55V] ([0.8,1.4V] on Raspberry Pi 1) with 0.025V steps. In other words, specifying -16 will give 0.95V (0.8V on Raspberry Pi 1) as the maximum CPU/GPU core voltage, and specifying 8 will allow up to 1.55V (1.4V on Raspberry Pi 1). For defaults, see the table below. Values above 6 are only allowed when `force_turbo=1` is specified: this sets the warranty bit if `over_voltage_*` > `0` is also set.
| over_voltage_sdram
| Sets `over_voltage_sdram_c`, `over_voltage_sdram_i`, and `over_voltage_sdram_p` together.
| over_voltage_sdram_c
-| SDRAM controller voltage adjustment. [-16,8] equates to [0.8V,1.4V] with 0.025V steps.
+| SDRAM controller voltage adjustment. [-16,8] equates to [0.8V,1.4V] with 0.025V steps. Not supported on Raspberry Pi 4 or later devices.
| over_voltage_sdram_i
-| SDRAM I/O voltage adjustment. [-16,8] equates to [0.8V,1.4V] with 0.025V steps.
+| SDRAM I/O voltage adjustment. [-16,8] equates to [0.8V,1.4V] with 0.025V steps. Not supported on Raspberry Pi 4 or later devices.
| over_voltage_sdram_p
-| SDRAM phy voltage adjustment. [-16,8] equates to [0.8V,1.4V] with 0.025V steps.
-
+| SDRAM phy voltage adjustment. [-16,8] equates to [0.8V,1.4V] with 0.025V steps. Not supported on Raspberry Pi 4 or later devices.
+
| force_turbo
| Forces turbo mode frequencies even when the ARM cores are not busy. Enabling this may set the warranty bit if `over_voltage_*` is also set.
| initial_turbo
-| Enables https://forums.raspberrypi.com/viewtopic.php?f=29&t=6201&start=425#p180099[turbo mode from boot] for the given value in seconds, or until cpufreq sets a frequency. The maximum value is `60`.
+| Enables https://forums.raspberrypi.com/viewtopic.php?f=29&t=6201&start=425#p180099[turbo mode from boot] for the given value in seconds, or until `cpufreq` sets a frequency. The maximum value is `60`. The November 2024 firmware update made the following changes:
+
+* changed the default from `0` to `60` to reduce boot time
+* switched the kernel CPU performance governor from `powersave` to `ondemand`
+
| arm_freq_min
-| Minimum value of `arm_freq` used for dynamic frequency clocking. Note that reducing this value below the default does not result in any significant power savings and is not currently supported.
+| Minimum value of `arm_freq` used for dynamic frequency clocking. Note that reducing this value below the default does not result in any significant power savings, and is not currently supported.
| core_freq_min
| Minimum value of `core_freq` used for dynamic frequency clocking.
@@ -86,20 +91,27 @@ WARNING: Setting any overclocking parameters to values other than those used by
| Minimum value of `sdram_freq` used for dynamic frequency clocking.
| over_voltage_min
-| Minimum value of `over_voltage` used for dynamic frequency clocking. The value should be in the range [-16,8] which equates to the range [0.8V,1.4V] with 0.025V steps. In other words, specifying -16 will give 0.8V as the CPU/GPU core idle voltage, and specifying 8 will give a minimum of 1.4V.
+| Minimum value of `over_voltage` used for dynamic frequency clocking. The value should be in the range [-16,8] which equates to the range [0.8V,1.4V] with 0.025V steps. In other words, specifying -16 will give 0.8V as the CPU/GPU core idle voltage, and specifying 8 will give a minimum of 1.4V. This setting is deprecated on Raspberry Pi 4 and Raspberry Pi 5.
+
+| over_voltage_delta
+| On Raspberry Pi 4 and Raspberry Pi 5 the over_voltage_delta parameter adds the given offset in microvolts to the number calculated by the DVFS algorithm.
| temp_limit
| Overheat protection. This sets the clocks and voltages to default when the SoC reaches this value in degree Celsius. Values over 85 are clamped to 85.
| temp_soft_limit
| *3A+/3B+ only*. CPU speed throttle control. This sets the temperature at which the CPU clock speed throttling system activates. At this temperature, the clock speed is reduced from 1400MHz to 1200MHz. Defaults to `60`, can be raised to a maximum of `70`, but this may cause instability.
+
+| core_freq_fixed
+| Setting to 1 disables active scaling of the core clock frequency and ensures that any peripherals that use the core clock will maintain a consistent speed. The fixed clock speed is the higher/turbo frequency for the platform in use. Use this in preference to setting specific core_clock frequencies as it provides portability of config files between platforms.
+
|===
This table gives the default values for the options on various Raspberry Pi models, all frequencies are stated in MHz.
-[cols=",^,^,^,^,^,^,^,^,^"]
+[cols="m,^,^,^,^,^,^,^,^,^,^"]
|===
-| Option | Pi 0/W | Pi1 | Pi2 | Pi3 | Pi3A+/Pi3B+ | CM4 & Pi4B <= R1.3 | Pi4B R1.4 | Pi 400 | Pi Zero 2 W
+| Option | Pi 0/W | Pi1 | Pi2 | Pi3 | Pi3A+/Pi3B+ | CM4 & Pi4B <= R1.3 | Pi4B R1.4 | Pi 400 | Pi Zero 2 W | Pi 5
| arm_freq
| 1000
@@ -108,9 +120,10 @@ This table gives the default values for the options on various Raspberry Pi mode
| 1200
| 1400
| 1500
-| 1500 or 1800 if arm_boost=1
+| 1500 or 1800 if `arm_boost`=1
| 1800
| 1000
+| 2400
| core_freq
| 400
@@ -122,6 +135,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 500
| 500
| 400
+| 910
| h264_freq
| 300
@@ -133,6 +147,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 500
| 500
| 300
+| N/A
| isp_freq
| 300
@@ -144,6 +159,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 500
| 500
| 300
+| 910
| v3d_freq
| 300
@@ -155,6 +171,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 500
| 500
| 300
+| 910
| hevc_freq
| N/A
@@ -166,6 +183,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 500
| 500
| N/A
+| 910
| sdram_freq
| 450
@@ -177,6 +195,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 3200
| 3200
| 450
+| 4267
| arm_freq_min
| 700
@@ -188,6 +207,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 600
| 600
| 600
+| 1500
| core_freq_min
| 250
@@ -199,6 +219,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 200
| 200
| 250
+| 500
| gpu_freq_min
| 250
@@ -210,6 +231,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 250
| 250
| 250
+| 500
| h264_freq_min
| 250
@@ -221,6 +243,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 250
| 250
| 250
+| N/A
| isp_freq_min
| 250
@@ -232,6 +255,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 250
| 250
| 250
+| 500
| v3d_freq_min
| 250
@@ -243,6 +267,7 @@ This table gives the default values for the options on various Raspberry Pi mode
| 250
| 250
| 250
+| 500
| sdram_freq_min
| 400
@@ -254,11 +279,12 @@ This table gives the default values for the options on various Raspberry Pi mode
| 3200
| 3200
| 400
+| 4267
|===
-This table gives defaults for options that are the same across all models.
+This table gives defaults for options which are the same across all models.
-[cols=",^"]
+[cols="m,^"]
|===
| Option | Default
@@ -290,7 +316,7 @@ This table gives defaults for options that are the same across all models.
The firmware uses Adaptive Voltage Scaling (AVS) to determine the optimum CPU/GPU core voltage in the range defined by `over_voltage` and `over_voltage_min`.
[discrete]
-===== Specific to Raspberry Pi 4, Raspberry Pi 400 and CM4
+==== Specific to Raspberry Pi 4, Raspberry Pi 400 and CM4
The minimum core frequency when the system is idle must be fast enough to support the highest pixel clock (ignoring blanking) of the display(s). Consequently, `core_freq` will be boosted above 500 MHz if the display mode is 4Kp60.
@@ -300,57 +326,72 @@ The minimum core frequency when the system is idle must be fast enough to suppor
| Default
| 500
-| hdmi_enable_4kp60
+| `hdmi_enable_4kp60`
| 550
|===
+NOTE: There is no need to use `hdmi_enable_4kp60` on Flagship models since Raspberry Pi 5, Compute Modules since CM5, and Keyboard models since Pi 500; they support dual-4Kp60 displays by default.
+
* Overclocking requires the latest firmware release.
* The latest firmware automatically scales up the voltage if the system is overclocked. Manually setting `over_voltage` disables automatic voltage scaling for overclocking.
-* It is recommended when overclocking to use the individual frequency settings (`isp_freq`, `v3d_freq` etc) rather than `gpu_freq` because the maximum stable frequency will be different for ISP, V3D, HEVC etc.
-* The SDRAM frequency is not configurable on Raspberry Pi 4.
+* It is recommended when overclocking to use the individual frequency settings (`isp_freq`, `v3d_freq` etc) rather than `gpu_freq`, because the maximum stable frequency will be different for ISP, V3D, HEVC etc.
+* The SDRAM frequency is not configurable on Raspberry Pi 4 or later devices.
==== `force_turbo`
-By default (`force_turbo=0`) the "On Demand" CPU frequency driver will raise clocks to their maximum frequencies when the ARM cores are busy and will lower them to the minimum frequencies when the ARM cores are idle.
+By default (`force_turbo=0`) the on-demand CPU frequency driver will raise clocks to their maximum frequencies when the ARM cores are busy, and will lower them to the minimum frequencies when the ARM cores are idle.
`force_turbo=1` overrides this behaviour and forces maximum frequencies even when the ARM cores are not busy.
-==== `never_over_voltage`
+=== Clocks relationship
-Sets a bit in the OTP memory (one time programmable) that prevents the device from being overvoltaged. This is intended to lock the device down so the warranty bit cannot be set either inadvertently or maliciously by using an invalid overvoltage.
+==== Raspberry Pi 4
-==== `disable_auto_turbo`
+The GPU core, CPU, SDRAM and GPU each have their own PLLs and can have unrelated frequencies. The h264, v3d and ISP blocks share a PLL.
-On Raspberry Pi 2 / Raspberry Pi 3, setting this flag will disable the GPU from moving into turbo mode, which it can do in particular load cases.
+To view the Raspberry Pi's current frequency in KHz, type: `cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq`. Divide the result by 1000 to find the value in MHz. Note that this frequency is the kernel _requested_ frequency, and it is possible that any throttling (for example at high temperatures) may mean the CPU is actually running more slowly than reported. An instantaneous measurement of the actual ARM CPU frequency can be retrieved using the vcgencmd `vcgencmd measure_clock arm`. This is displayed in Hertz.
-=== Clocks Relationship
+=== Monitoring core temperature
+[.whitepaper, title="Cooling a Raspberry Pi device", subtitle="", link=https://pip.raspberrypi.com/categories/685-whitepapers-app-notes/documents/RP-003608-WP/Cooling-a-Raspberry-Pi-device.pdf]
+****
+This white paper goes through the reasons why your Raspberry Pi may get hot and why you might want to cool it back down, offering options on the cooling process.
+****
-The GPU core, CPU, SDRAM and GPU each have their own PLLs and https://forums.raspberrypi.com/viewtopic.php?f=29&t=6201&start=275#p168042[can have unrelated frequencies]. The h264, v3d and ISP blocks share a PLL.
+To view the temperature of a Raspberry Pi, run the following command:
-To view the Raspberry Pi's current frequency in KHz, type: `cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq`. Divide the result by 1000 to find the value in MHz. Note that this frequency is the kernel _requested_ frequency, and it is possible that any throttling (for example at high temperatures) may mean the CPU is actually running more slowly than reported. An instantaneous measurement of the actual ARM CPU frequency can be retrieved using the vcgencmd `vcgencmd measure_clock arm`. This is displayed in Hertz.
+[source,console]
+----
+$ cat /sys/class/thermal/thermal_zone0/temp
+----
+
+Divide the result by 1000 to find the value in degrees Celsius. Alternatively, you can use `vcgencmd measure_temp` to report the GPU temperature.
-=== Monitoring Core Temperature
+Hitting the temperature limit is not harmful to the SoC, but it will cause the CPU to throttle. A heat sink can help to control the core temperature, and therefore performance. This is especially useful if the Raspberry Pi is running inside a case. Airflow over the heat sink will make cooling more efficient.
-To view the Raspberry Pi's temperature, type `cat /sys/class/thermal/thermal_zone0/temp`. Divide the result by 1000 to find the value in degrees Celsius. Alternatively, there is a vcgencmd, `vcgencmd measure_temp` that interrogates the GPU directly for its temperature.
+When the core temperature is between 80°C and 85°C, the ARM cores will be throttled back. If the temperature exceeds 85°C, the ARM cores and the GPU will be throttled back.
-Whilst hitting the temperature limit is not harmful to the SoC, it will cause CPU throttling. A heatsink can help to control the core temperature and therefore performance. This is especially useful if the Raspberry Pi is running inside a case. Airflow over the heatsink will make cooling more efficient.
+For the Raspberry Pi 3 Model B+, the PCB technology has been changed to provide better heat dissipation and increased thermal mass. In addition, a soft temperature limit has been introduced, with the goal of maximising the time for which a device can "sprint" before reaching the hard limit at 85°C. When the soft limit is reached, the clock speed is reduced from 1.4GHz to 1.2GHz, and the operating voltage is reduced slightly. This reduces the rate of temperature increase: we trade a short period at 1.4GHz for a longer period at 1.2GHz. By default, the soft limit is 60°C. This can be changed via the `temp_soft_limit` setting in `config.txt`.
-With firmware from 12th September 2016 or later, when the core temperature is between 80'C and 85'C, a warning icon showing a red half-filled thermometer will be displayed, and the ARM cores will be throttled back. If the temperature exceeds 85'C, an icon showing a fully-filled thermometer will be displayed, and both the ARM cores and the GPU will be throttled back.
+=== Monitoring voltage
-For the Raspberry Pi 3 Model B+, the PCB technology has been changed to provide better heat dissipation and increased thermal mass. In addition, a soft temperature limit has been introduced, with the goal of maximising the time for which a device can "sprint" before reaching the hard limit at 85°C. When the soft limit is reached, the clock speed is reduced from 1.4GHz to 1.2GHz, and the operating voltage is reduced slightly. This reduces the rate of temperature increase: we trade a short period at 1.4GHz for a longer period at 1.2GHz. By default, the soft limit is 60°C, and this can be changed via the `temp_soft_limit` setting in config.txt.
+It is essential to keep the supply voltage above 4.8V for reliable performance. Note that the voltage from some USB chargers/power supplies can fall as low as 4.2V. This is because they are usually designed to charge a 3.7V LiPo battery, not to supply 5V to a computer.
-See the page on xref:configuration.adoc#firmware-warning-icons[warning icons] for more details.
+To monitor the Raspberry Pi's PSU voltage, you will need to use a multimeter to measure between the VCC and GND pins on the GPIO. More information is available in the xref:raspberry-pi.adoc#power-supply[power] section of the documentation.
-=== Monitoring Voltage
+If the voltage drops below 4.63V (±5%), the ARM cores and the GPU will be throttled back, and a message indicating the low voltage state will be added to the kernel log.
-It is essential to keep the supply voltage above 4.8V for reliable performance. Note that the voltage from some USB chargers/power supplies can fall as low as 4.2V. This is because they are usually designed to charge a 3.7V LiPo battery, not to supply 5V to a computer.
+The Raspberry Pi 5 PMIC has built in ADCs that allow the supply voltage to be measured. To view the current supply voltage, run the following command:
-To monitor the Raspberry Pi's PSU voltage, you will need to use a multimeter to measure between the VCC and GND pins on the GPIO. More information is available in xref:raspberry-pi.adoc#power-supply[power].
+[source,console]
+----
+$ vcgencmd pmic_read_adc EXT5V_V
+----
-If the voltage drops below 4.63V (+-5%), recent versions of the firmware will show a yellow lightning bolt symbol on the display to indicate a lack of power, and a message indicating the low voltage state will be added to the kernel log.
+=== Overclocking problems
-See the page on xref:configuration.adoc#firmware-warning-icons[warning icons] for more details.
+Most overclocking issues show up immediately, when the device fails to boot. If your device fails to boot due to an overclocking configuration change, use the following steps to return your device to a bootable state:
-=== Overclocking Problems
+. Remove any clock frequency overrides from `config.txt`.
+. Increase the core voltage using `over_voltage_delta`.
+. Re-apply overclocking parameters, taking care to avoid the previous known-bad overclocking parameters.
-Most overclocking issues show up immediately with a failure to boot. If this occurs, hold down the `shift` key during the next boot. This will temporarily disable all overclocking, allowing you to boot successfully and then edit your settings.
diff --git a/documentation/asciidoc/computers/config_txt/pi4-hdmi.adoc b/documentation/asciidoc/computers/config_txt/pi4-hdmi.adoc
deleted file mode 100644
index d3e217883..000000000
--- a/documentation/asciidoc/computers/config_txt/pi4-hdmi.adoc
+++ /dev/null
@@ -1,28 +0,0 @@
-== Raspberry Pi 4 HDMI Pipeline
-
-IMPORTANT: When using the VC4 KMS graphics driver, the complete display pipeline is managed by Linux - this includes the HDMI outputs. These settings only apply to the legacy FKMS and firmware-based graphics driver.
-
-In order to support dual displays, and modes up to 4k60, the Raspberry Pi 4 has updated the HDMI composition pipeline hardware in a number of ways. One of the major changes is that it generates 2 output pixels for every clock cycle.
-
-Every HDMI mode has a list of timings that control all the parameters around sync pulse durations. These are typically defined via a pixel clock, and then a number of active pixels, a front porch, sync pulse, and back porch for each of the horizontal and vertical directions.
-
-Running everything at 2 pixels per clock means that the Raspberry Pi 4 can not support a timing where _any_ of the horizontal timings are not divisible by 2. The firmware and Linux kernel will filter out any mode that does not fulfill this criteria.
-
-There is only one mode in the CEA and DMT standards that falls into this category - DMT mode 81, which is 1366x768 @ 60Hz. This mode has odd values for the horizontal sync and back porch timings. It's also an unusual mode for having a width that isn't divisible by 8.
-
-If your monitor is of this resolution, then the Raspberry Pi 4 will automatically drop down to the next mode that is advertised by the monitor; this is typically 1280x720.
-
-On some monitors it is possible to configure them to use 1360x768 @ 60Hz. They typically do not advertise this mode via their EDID so the selection can't be made automatically, but it can be manually chosen by adding
-
-[source]
-----
-hdmi_group=2
-hdmi_mode=87
-hdmi_cvt=1360 768 60
-----
-
-to xref:config_txt.adoc#video-options[config.txt].
-
-Timings specified manually via a `hdmi_timings=` line in `config.txt` will also need to comply with the restriction of all horizontal timing parameters being divisible by 2.
-
-`dpi_timings=` are not restricted in the same way as that pipeline still only runs at a single pixel per clock cycle.
diff --git a/documentation/asciidoc/computers/config_txt/video.adoc b/documentation/asciidoc/computers/config_txt/video.adoc
index c78ec8d2e..eac9fba9f 100644
--- a/documentation/asciidoc/computers/config_txt/video.adoc
+++ b/documentation/asciidoc/computers/config_txt/video.adoc
@@ -1,1472 +1,28 @@
-== Video Options
+== Video options
-=== HDMI Mode
+=== HDMI mode
-NOTE: Because the Raspberry Pi 4 and Raspberry Pi 400 have two HDMI ports, some HDMI commands can be applied to either port. You can use the syntax `:`, where port is 0 or 1, to specify which port the setting should apply to. If no port is specified, the default is 0. If you specify a port number on a command that does not require a port number, the port is ignored. Further details on the syntax and alternatives mechanisms can be found in the HDMI sub-section of the xref:config_txt.adoc#conditional-filters[conditionals section] of the documentation.
+To control HDMI settings, use the xref:configuration.adoc#set-resolution-and-rotation[Screen Configuration utility] or xref:configuration.adoc#set-the-kms-display-mode[KMS video settings] in `cmdline.txt`.
-In order to support dual 4k displays, the Raspberry Pi 4 has xref:config_txt.adoc#raspberry-pi-4-hdmi-pipeline[updated video hardware], which imposes minor restrictions on the modes supported.
+==== HDMI Pipeline for 4-series devices
-==== `hdmi_safe`
+In order to support dual displays and modes up to 4Kp60, Raspberry Pi 4, Compute Module 4, and Pi 400 generate 2 output pixels for every clock cycle.
-Setting `hdmi_safe` to `1` will lead to "safe mode" settings being used to try to boot with maximum HDMI compatibility. This is the same as setting the following parameters:
+Every HDMI mode has a list of timings that control all the parameters around sync pulse durations. These are typically defined via a pixel clock, and then a number of active pixels, a front porch, sync pulse, and back porch for each of the horizontal and vertical directions.
-----
-hdmi_force_hotplug=1
-hdmi_ignore_edid=0xa5000080
-config_hdmi_boost=4
-hdmi_group=2
-hdmi_mode=4
-disable_overscan=0
-overscan_left=24
-overscan_right=24
-overscan_top=24
-overscan_bottom=24
-----
-
-==== `hdmi_ignore_edid`
-
-Setting `hdmi_ignore_edid` to `0xa5000080` enables the ignoring of EDID/display data if your display does not have an accurate https://en.wikipedia.org/wiki/Extended_display_identification_data[EDID]. It requires this unusual value to ensure that it is not triggered accidentally.
-
-==== `hdmi_edid_file`
-
-Setting `hdmi_edid_file` to `1` will cause the GPU to read EDID data from the `edid.dat` file, located in the boot partition, instead of reading it from the monitor. More information is available https://forums.raspberrypi.com/viewtopic.php?p=173430#p173430[on the forums].
-
-==== `hdmi_edid_filename`
-
-On the Raspberry Pi 4B, you can use the `hdmi_edid_filename` command to specify the filename of the EDID file to use, and also to specify which port the file is to be applied to. This also requires `hdmi_edid_file=1` to enable EDID files.
-
-For example:
-
-----
-hdmi_edid_file=1
-hdmi_edid_filename:0=FileForPortZero.edid
-hdmi_edid_filename:1=FileForPortOne.edid
-----
-
-==== `hdmi_force_edid_audio`
-
-Setting `hdmi_force_edid_audio` to `1` pretends that all audio formats are supported by the display, allowing passthrough of DTS/AC3 even when this is not reported as supported.
-
-==== `hdmi_ignore_edid_audio`
-
-Setting `hdmi_ignore_edid_audio` to `1` pretends that all audio formats are unsupported by the display. This means ALSA will default to the analogue audio (headphone) jack.
-
-==== `hdmi_force_edid_3d`
-
-Setting `hdmi_force_edid_3d` to `1` pretends that all CEA modes support 3D, even when the EDID does not indicate support for this.
-
-==== `hdmi_ignore_cec_init`
-
-Setting `hdmi_ignore_cec_init` to `1` will stop the initial active source message being sent during bootup. This prevents a CEC-enabled TV from coming out of standby and channel-switching when you are rebooting your Raspberry Pi.
-
-==== `hdmi_ignore_cec`
-
-Setting `hdmi_ignore_cec` to `1` pretends that https://en.wikipedia.org/wiki/Consumer_Electronics_Control#CEC[CEC] is not supported at all by the TV. No CEC functions will be supported.
-
-==== `cec_osd_name`
-
-The `cec_osd_name` command sets the initial CEC name of the device. The default is Raspberry Pi.
-
-==== `hdmi_pixel_encoding`
-
-The `hdmi_pixel_encoding` command forces the pixel encoding mode. By default, it will use the mode requested from the EDID, so you shouldn't need to change it.
-
-|===
-| hdmi_pixel_encoding | result
-
-| 0
-| default (RGB limited for CEA, RGB full for DMT)
-
-| 1
-| RGB limited (16-235)
-
-| 2
-| RGB full (0-255)
-
-| 3
-| YCbCr limited (16-235)
-
-| 4
-| YCbCr full (0-255)
-|===
-
-==== `hdmi_max_pixel_freq`
-
-The pixel frequency is used by the firmware and KMS to filter HDMI modes. Note, this is not the same as the frame rate. It specifies the maximum frequency that a valid mode can have, thereby culling out higher frequency modes. So for example, if you wish to disable all 4K modes, you could specify a maximum frequency of 200000000, since all 4K modes have frequencies greater than this.
-
-==== `hdmi_blanking`
-
-The `hdmi_blanking` command controls what happens when the operating system asks for the display to be put into standby mode, using DPMS, to save power. If this option is not set or set to 0, the HDMI output is blanked but not switched off. In order to mimic the behaviour of other computers, you can set the HDMI output to switch off as well by setting this option to 1: the attached display will go into a low power standby mode.
-
-NOTE: On the Raspberry Pi 4, setting `hdmi_blanking=1` will not cause the HDMI output to be switched off, since this feature has not yet been implemented. This feature may cause issues when using applications which don't use the framebuffer, such as `omxplayer`.
-
-|===
-| hdmi_blanking | result
-
-| 0
-| HDMI output will be blanked
-
-| 1
-| HDMI output will be switched off and blanked
-|===
-
-==== `hdmi_drive`
-
-The `hdmi_drive` command allows you to choose between HDMI and DVI output modes.
-
-|===
-| hdmi_drive | result
-
-| 1
-| Normal DVI mode (no sound)
-
-| 2
-| Normal HDMI mode (sound will be sent if supported and enabled)
-|===
-
-==== `config_hdmi_boost`
-
-Configures the signal strength of the HDMI interface. The minimum value is `0` and the maximum is `11`.
-
-The default value for the original Model B and A is `2`. The default value for the Model B+ and all later models is `5`.
-
-If you are seeing HDMI issues (speckling, interference) then try `7`. Very long HDMI cables may need up to `11`, but values this high should not be used unless absolutely necessary.
-
-This option is ignored on the Raspberry Pi 4.
-
-==== `hdmi_group`
-
-The `hdmi_group` command defines the HDMI output group to be either CEA (Consumer Electronics Association, the standard typically used by TVs) or DMT (Display Monitor Timings, the standard typically used by monitors). This setting should be used in conjunction with `hdmi_mode`.
-
-|===
-| hdmi_group | result
-
-| 0
-| Auto-detect from EDID
-
-| 1
-| CEA
-
-| 2
-| DMT
-|===
-
-==== `hdmi_mode`
-
-Together with `hdmi_group`, `hdmi_mode` defines the HDMI output format. Format mode numbers are derived from the https://web.archive.org/web/20171201033424/https://standards.cta.tech/kwspub/published_docs/CTA-861-G_FINAL_revised_2017.pdf[CTA specification].
-
-To set a custom display mode not listed here, see more information on https://forums.raspberrypi.com/viewtopic.php?f=29&t=24679[the forums].
-
-NOTE: Not all modes are available on all models.
-
-These values are valid if `hdmi_group=1` (CEA):
-
-[cols=",,,^,"]
-|===
-| hdmi_mode | Resolution | Frequency | Screen Aspect | Notes
-
-| 1
-| VGA (640x480)
-| 60Hz
-| 4:3
-|
-
-| 2
-| 480p
-| 60Hz
-| 4:3
-|
-
-| 3
-| 480p
-| 60Hz
-| 16:9
-|
-
-| 4
-| 720p
-| 60Hz
-| 16:9
-|
-
-| 5
-| 1080i
-| 60Hz
-| 16:9
-|
-
-| 6
-| 480i
-| 60Hz
-| 4:3
-|
-
-| 7
-| 480i
-| 60Hz
-| 16:9
-|
-
-| 8
-| 240p
-| 60Hz
-| 4:3
-|
-
-| 9
-| 240p
-| 60Hz
-| 16:9
-|
-
-| 10
-| 480i
-| 60Hz
-| 4:3
-| pixel quadrupling
-
-| 11
-| 480i
-| 60Hz
-| 16:9
-| pixel quadrupling
-
-| 12
-| 240p
-| 60Hz
-| 4:3
-| pixel quadrupling
-
-| 13
-| 240p
-| 60Hz
-| 16:9
-| pixel quadrupling
-
-| 14
-| 480p
-| 60Hz
-| 4:3
-| pixel doubling
-
-| 15
-| 480p
-| 60Hz
-| 16:9
-| pixel doubling
-
-| 16
-| 1080p
-| 60Hz
-| 16:9
-|
-
-| 17
-| 576p
-| 50Hz
-| 4:3
-|
-
-| 18
-| 576p
-| 50Hz
-| 16:9
-|
-
-| 19
-| 720p
-| 50Hz
-| 16:9
-|
-
-| 20
-| 1080i
-| 50Hz
-| 16:9
-|
-
-| 21
-| 576i
-| 50Hz
-| 4:3
-|
-
-| 22
-| 576i
-| 50Hz
-| 16:9
-|
-
-| 23
-| 288p
-| 50Hz
-| 4:3
-|
-
-| 24
-| 288p
-| 50Hz
-| 16:9
-|
-
-| 25
-| 576i
-| 50Hz
-| 4:3
-| pixel quadrupling
-
-| 26
-| 576i
-| 50Hz
-| 16:9
-| pixel quadrupling
-
-| 27
-| 288p
-| 50Hz
-| 4:3
-| pixel quadrupling
-
-| 28
-| 288p
-| 50Hz
-| 16:9
-| pixel quadrupling
-
-| 29
-| 576p
-| 50Hz
-| 4:3
-| pixel doubling
-
-| 30
-| 576p
-| 50Hz
-| 16:9
-| pixel doubling
-
-| 31
-| 1080p
-| 50Hz
-| 16:9
-|
-
-| 32
-| 1080p
-| 24Hz
-| 16:9
-|
-
-| 33
-| 1080p
-| 25Hz
-| 16:9
-|
-
-| 34
-| 1080p
-| 30Hz
-| 16:9
-|
-
-| 35
-| 480p
-| 60Hz
-| 4:3
-| pixel quadrupling
+Running everything at 2 pixels per clock means that the 4-series devices cannot support a timing where _any_ of the horizontal timings are not divisible by 2. The firmware and Linux kernel filter out any mode that does not fulfil this criteria.
-| 36
-| 480p
-| 60Hz
-| 16:9
-| pixel quadrupling
+There is only one incompatible mode in the CEA and DMT standards: DMT mode 81, 1366x768 @ 60Hz. This mode has odd-numbered values for the horizontal sync and back porch timings and a width that indivisible by 8.
-| 37
-| 576p
-| 50Hz
-| 4:3
-| pixel quadrupling
+If your monitor has this resolution, 4-series devices automatically drop down to the next mode advertised by the monitor; typically 1280x720.
-| 38
-| 576p
-| 50Hz
-| 16:9
-| pixel quadrupling
+==== HDMI Pipeline for 5-series devices
-| 39
-| 1080i
-| 50Hz
-| 16:9
-| reduced blanking
+Flagship models since Raspberry Pi 5, Compute Module models since CM5, and Keyboard models since Pi 500 also work at 2 output pixels per clock cycle. These models have special handling for odd timings and can handle these modes directly.
-| 40
-| 1080i
-| 100Hz
-| 16:9
-|
+=== Composite video mode
-| 41
-| 720p
-| 100Hz
-| 16:9
-|
-
-| 42
-| 576p
-| 100Hz
-| 4:3
-|
-
-| 43
-| 576p
-| 100Hz
-| 16:9
-|
-
-| 44
-| 576i
-| 100Hz
-| 4:3
-|
-
-| 45
-| 576i
-| 100Hz
-| 16:9
-|
-
-| 46
-| 1080i
-| 120Hz
-| 16:9
-|
-
-| 47
-| 720p
-| 120Hz
-| 16:9
-|
-
-| 48
-| 480p
-| 120Hz
-| 4:3
-|
-
-| 49
-| 480p
-| 120Hz
-| 16:9
-|
-
-| 50
-| 480i
-| 120Hz
-| 4:3
-|
-
-| 51
-| 480i
-| 120Hz
-| 16:9
-|
-
-| 52
-| 576p
-| 200Hz
-| 4:3
-|
-
-| 53
-| 576p
-| 200Hz
-| 16:9
-|
-
-| 54
-| 576i
-| 200Hz
-| 4:3
-|
-
-| 55
-| 576i
-| 200Hz
-| 16:9
-|
-
-| 56
-| 480p
-| 240Hz
-| 4:3
-|
-
-| 57
-| 480p
-| 240Hz
-| 16:9
-|
-
-| 58
-| 480i
-| 240Hz
-| 4:3
-|
-
-| 59
-| 480i
-| 240Hz
-| 16:9
-|
-
-| 60
-| 720p
-| 24Hz
-| 16:9
-|
-
-| 61
-| 720p
-| 25Hz
-| 16:9
-|
-
-| 62
-| 720p
-| 30Hz
-| 16:9
-|
-
-| 63
-| 1080p
-| 120Hz
-| 16:9
-|
-
-| 64
-| 1080p
-| 100Hz
-| 16:9
-|
-
-| 65
-| Custom
-|
-|
-|
-
-| 66
-| 720p
-| 25Hz
-| 64:27
-| Pi 4
-
-| 67
-| 720p
-| 30Hz
-| 64:27
-| Pi 4
-
-| 68
-| 720p
-| 50Hz
-| 64:27
-| Pi 4
-
-| 69
-| 720p
-| 60Hz
-| 64:27
-| Pi 4
-
-| 70
-| 720p
-| 100Hz
-| 64:27
-| Pi 4
-
-| 71
-| 720p
-| 120Hz
-| 64:27
-| Pi 4
-
-| 72
-| 1080p
-| 24Hz
-| 64:27
-| Pi 4
-
-| 73
-| 1080p
-| 25Hz
-| 64:27
-| Pi 4
-
-| 74
-| 1080p
-| 30Hz
-| 64:27
-| Pi 4
-
-| 75
-| 1080p
-| 50Hz
-| 64:27
-| Pi 4
-
-| 76
-| 1080p
-| 60Hz
-| 64:27
-| Pi 4
-
-| 77
-| 1080p
-| 100Hz
-| 64:27
-| Pi 4
-
-| 78
-| 1080p
-| 120Hz
-| 64:27
-| Pi 4
-
-| 79
-| 1680x720
-| 24Hz
-| 64:27
-| Pi 4
-
-| 80
-| 1680x720
-| 25z
-| 64:27
-| Pi 4
-
-| 81
-| 1680x720
-| 30Hz
-| 64:27
-| Pi 4
-
-| 82
-| 1680x720
-| 50Hz
-| 64:27
-| Pi 4
-
-| 83
-| 1680x720
-| 60Hz
-| 64:27
-| Pi 4
-
-| 84
-| 1680x720
-| 100Hz
-| 64:27
-| Pi 4
-
-| 85
-| 1680x720
-| 120Hz
-| 64:27
-| Pi 4
-
-| 86
-| 2560x720
-| 24Hz
-| 64:27
-| Pi 4
-
-| 87
-| 2560x720
-| 25Hz
-| 64:27
-| Pi 4
-
-| 88
-| 2560x720
-| 30Hz
-| 64:27
-| Pi 4
-
-| 89
-| 2560x720
-| 50Hz
-| 64:27
-| Pi 4
-
-| 90
-| 2560x720
-| 60Hz
-| 64:27
-| Pi 4
-
-| 91
-| 2560x720
-| 100Hz
-| 64:27
-| Pi 4
-
-| 92
-| 2560x720
-| 120Hz
-| 64:27
-| Pi 4
-
-| 93
-| 2160p
-| 24Hz
-| 16:9
-| Pi 4
-
-| 94
-| 2160p
-| 25Hz
-| 16:9
-| Pi 4
-
-| 95
-| 2160p
-| 30Hz
-| 16:9
-| Pi 4
-
-| 96
-| 2160p
-| 50Hz
-| 16:9
-| Pi 4
-
-| 97
-| 2160p
-| 60Hz
-| 16:9
-| Pi 4
-
-| 98
-| 4096x2160
-| 24Hz
-| 256:135
-| Pi 4
-
-| 99
-| 4096x2160
-| 25Hz
-| 256:135
-| Pi 4
-
-| 100
-| 4096x2160
-| 30Hz
-| 256:135
-| Pi 4
-
-| 101
-| 4096x2160
-| 50Hz
-| 256:135
-| Pi 4<>
-
-| 102
-| 4096x2160
-| 60Hz
-| 256:135
-| Pi 4<