diff --git a/.circleci/config.yml b/.circleci/config.yml index 47e6dff76c..3d69fa3e96 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -67,18 +67,11 @@ jobs: name: Ensure that every template builds for a variety of options. command: | go build . - goruncmd="./u-root -stats-output-path=/tmp/stats.json" + goruncmd="./u-root" $goruncmd minimal $goruncmd core $goruncmd coreboot-app $goruncmd all $goruncmd world - # Fails because of dups. Is this supposed to work? - #$goruncmd all core - #$goruncmd all core + $goruncmd all core GOOS=plan9 $goruncmd -defaultsh=/bbin/rush plan9 - cat /tmp/stats.json - - store_artifacts: - name: Store build stats - path: /tmp/stats.json - destination: stats.json diff --git a/.mkuimage.yaml b/.mkuimage.yaml new file mode 100644 index 0000000000..8e5e6ae35c --- /dev/null +++ b/.mkuimage.yaml @@ -0,0 +1,188 @@ +commands: + all: + - github.com/u-root/u-root/cmds/core/* + - github.com/u-root/u-root/cmds/boot/*boot* + + boot: + - github.com/u-root/u-root/cmds/boot/*boot* + + world: + - github.com/u-root/u-root/cmds/*/* + + core: + - github.com/u-root/u-root/cmds/core/* + + minimal: + - github.com/u-root/u-root/cmds/core/blkid + - github.com/u-root/u-root/cmds/core/cat + - github.com/u-root/u-root/cmds/core/chmod + - github.com/u-root/u-root/cmds/core/cmp + - github.com/u-root/u-root/cmds/core/cp + - github.com/u-root/u-root/cmds/core/date + - github.com/u-root/u-root/cmds/core/dd + - github.com/u-root/u-root/cmds/core/df + - github.com/u-root/u-root/cmds/core/dhclient + - github.com/u-root/u-root/cmds/core/dmesg + - github.com/u-root/u-root/cmds/core/echo + - github.com/u-root/u-root/cmds/core/find + - github.com/u-root/u-root/cmds/core/free + - github.com/u-root/u-root/cmds/core/gosh + - github.com/u-root/u-root/cmds/core/gpgv + - github.com/u-root/u-root/cmds/core/grep + - github.com/u-root/u-root/cmds/core/gzip + - github.com/u-root/u-root/cmds/core/hostname + - github.com/u-root/u-root/cmds/core/id + - github.com/u-root/u-root/cmds/core/init + - github.com/u-root/u-root/cmds/core/insmod + - github.com/u-root/u-root/cmds/core/io + - github.com/u-root/u-root/cmds/core/ip + - github.com/u-root/u-root/cmds/core/kexec + - github.com/u-root/u-root/cmds/core/kill + - github.com/u-root/u-root/cmds/core/ln + - github.com/u-root/u-root/cmds/core/losetup + - github.com/u-root/u-root/cmds/core/ls + - github.com/u-root/u-root/cmds/core/lsmod + - github.com/u-root/u-root/cmds/core/mkdir + - github.com/u-root/u-root/cmds/core/mknod + - github.com/u-root/u-root/cmds/core/mount + - github.com/u-root/u-root/cmds/core/msr + - github.com/u-root/u-root/cmds/core/mv + - github.com/u-root/u-root/cmds/core/pci + - github.com/u-root/u-root/cmds/core/ping + - github.com/u-root/u-root/cmds/core/printenv + - github.com/u-root/u-root/cmds/core/ps + - github.com/u-root/u-root/cmds/core/pwd + - github.com/u-root/u-root/cmds/core/readlink + - github.com/u-root/u-root/cmds/core/rm + - github.com/u-root/u-root/cmds/core/rmmod + - github.com/u-root/u-root/cmds/core/seq + - github.com/u-root/u-root/cmds/core/shutdown + - github.com/u-root/u-root/cmds/core/sleep + - github.com/u-root/u-root/cmds/core/sync + - github.com/u-root/u-root/cmds/core/tail + - github.com/u-root/u-root/cmds/core/tee + - github.com/u-root/u-root/cmds/core/truncate + - github.com/u-root/u-root/cmds/core/umount + - github.com/u-root/u-root/cmds/core/uname + - github.com/u-root/u-root/cmds/core/unshare + - github.com/u-root/u-root/cmds/core/wc + - github.com/u-root/u-root/cmds/core/wget + - github.com/u-root/u-root/cmds/core/which + + embedded: + - github.com/u-root/u-root/cmds/core/cat + - github.com/u-root/u-root/cmds/core/cp + - github.com/u-root/u-root/cmds/core/dd + - github.com/u-root/u-root/cmds/core/dhclient + - github.com/u-root/u-root/cmds/core/dmesg + - github.com/u-root/u-root/cmds/core/echo + - github.com/u-root/u-root/cmds/core/free + - github.com/u-root/u-root/cmds/core/gosh + - github.com/u-root/u-root/cmds/core/grep + - github.com/u-root/u-root/cmds/core/init + - github.com/u-root/u-root/cmds/core/insmod + - github.com/u-root/u-root/cmds/core/ip + - github.com/u-root/u-root/cmds/core/kexec + - github.com/u-root/u-root/cmds/core/ln + - github.com/u-root/u-root/cmds/core/ls + - github.com/u-root/u-root/cmds/core/mkdir + - github.com/u-root/u-root/cmds/core/mount + - github.com/u-root/u-root/cmds/core/netcat + - github.com/u-root/u-root/cmds/core/ping + - github.com/u-root/u-root/cmds/core/rm + - github.com/u-root/u-root/cmds/core/rmmod + - github.com/u-root/u-root/cmds/core/shutdown + - github.com/u-root/u-root/cmds/core/tail + - github.com/u-root/u-root/cmds/core/tee + - github.com/u-root/u-root/cmds/core/uname + - github.com/u-root/u-root/cmds/core/wget + + coreboot-app: + - github.com/u-root/u-root/cmds/core/cat + - github.com/u-root/u-root/cmds/exp/cbmem + - github.com/u-root/u-root/cmds/core/chroot + - github.com/u-root/u-root/cmds/core/cp + - github.com/u-root/u-root/cmds/core/dd + - github.com/u-root/u-root/cmds/core/dhclient + - github.com/u-root/u-root/cmds/core/dmesg + - github.com/u-root/u-root/cmds/core/find + - github.com/u-root/u-root/cmds/core/gosh + - github.com/u-root/u-root/cmds/core/grep + - github.com/u-root/u-root/cmds/core/id + - github.com/u-root/u-root/cmds/core/init + - github.com/u-root/u-root/cmds/core/insmod + - github.com/u-root/u-root/cmds/core/ip + - github.com/u-root/u-root/cmds/core/kill + - github.com/u-root/u-root/cmds/core/ls + - github.com/u-root/u-root/cmds/core/mount + - github.com/u-root/u-root/cmds/core/pci + - github.com/u-root/u-root/cmds/core/ping + - github.com/u-root/u-root/cmds/core/ps + - github.com/u-root/u-root/cmds/core/pwd + - github.com/u-root/u-root/cmds/core/rm + - github.com/u-root/u-root/cmds/core/rmmod + - github.com/u-root/u-root/cmds/core/shutdown + - github.com/u-root/u-root/cmds/core/sshd + - github.com/u-root/u-root/cmds/core/switch_root + - github.com/u-root/u-root/cmds/core/tail + - github.com/u-root/u-root/cmds/core/tee + - github.com/u-root/u-root/cmds/core/uname + - github.com/u-root/u-root/cmds/core/wget + + plan9: + - github.com/u-root/u-root/cmds/core/* + + tinygo: + - github.com/u-root/u-root/cmds/core/backoff + - github.com/u-root/u-root/cmds/core/base64 + - github.com/u-root/u-root/cmds/core/basename + - github.com/u-root/u-root/cmds/core/cat + - github.com/u-root/u-root/cmds/core/chmod + - github.com/u-root/u-root/cmds/core/cmp + - github.com/u-root/u-root/cmds/core/comm + - github.com/u-root/u-root/cmds/core/cp + - github.com/u-root/u-root/cmds/core/date + - github.com/u-root/u-root/cmds/core/dd + - github.com/u-root/u-root/cmds/core/dirname + - github.com/u-root/u-root/cmds/core/echo + - github.com/u-root/u-root/cmds/core/false + - github.com/u-root/u-root/cmds/core/free + - github.com/u-root/u-root/cmds/core/gpt + - github.com/u-root/u-root/cmds/core/grep + - github.com/u-root/u-root/cmds/core/hexdump + - github.com/u-root/u-root/cmds/core/kill + - github.com/u-root/u-root/cmds/core/ls + - github.com/u-root/u-root/cmds/core/mkdir + - github.com/u-root/u-root/cmds/core/more + - github.com/u-root/u-root/cmds/core/mv + - github.com/u-root/u-root/cmds/core/ps + - github.com/u-root/u-root/cmds/core/pwd + - github.com/u-root/u-root/cmds/core/rm + - github.com/u-root/u-root/cmds/core/sort + - github.com/u-root/u-root/cmds/core/tr + - github.com/u-root/u-root/cmds/core/true + - github.com/u-root/u-root/cmds/core/uniq + - github.com/u-root/u-root/cmds/core/wc + - github.com/u-root/u-root/cmds/core/yes + +configs: + default: + init: init + shell: gosh + commands: + - builder: bb + commands: [core] + + plan9: + goos: plan9 + init: init + shell: "" + commands: + - builder: bb + commands: [plan9] + + # Enables build tags that allow Dead Code Elimination. + small: + init: init + shell: gosh + build_tags: [goshsmall, grpcnotrace] diff --git a/README.md b/README.md index 23269218d1..bc1bf71ef4 100644 --- a/README.md +++ b/README.md @@ -30,47 +30,37 @@ u-root embodies four different projects. # Usage -Make sure your Go version is >=1.21. +Make sure your Go version is >= 1.21. Download and install u-root either via git: ```shell git clone https://github.com/u-root/u-root cd u-root -go build +go install ``` -The resulting binary will the be placed where `go build` was invoked - Or install directly with go: ```shell -go install github.com/u-root/u-root +go install github.com/u-root/u-root@latest ``` -**Note: The `u-root` command will end up in `$GOPATH/bin/u-root`, so you may -need to add `$GOPATH/bin` to your `$PATH`.** - -## Templates - -To quickly specify a set of commands from u-root, you can use any of the -templates as defined in [templates.go](templates.go). +> [!NOTE] +> The `u-root` command will end up in `$GOPATH/bin/u-root`, so you may +> need to add `$GOPATH/bin` to your `$PATH`. -## Examples +# Examples -Here are some examples of using the `u-root` command to build an initramfs, -with `$UROOT_PATH` being the path to where the u-root sources are on the disk -(explicitly specifiying this is only necessary if not running u-root inside the -root of the repository): +Here are some examples of using the `u-root` command to build an initramfs. ```shell +git clone https://github.com/u-root/u-root +cd u-root + # Build an initramfs of all the Go cmds in ./cmds/core/... (default) u-root -# Build an initramfs of all the Go cmds in ./cmds/core/... -# But running the command outside of the repository root -(cd /tmp && GBB_PATH=$UROOT_PATH u-root) - # Generate an archive with bootloaders # # core and boot are templates that expand to sets of commands @@ -81,53 +71,118 @@ u-root ./cmds/core/{init,ls,ip,dhclient,wget,cat,gosh} # Generate an archive with all of the core tools with some exceptions u-root core -cmds/core/{ls,losetup} +``` -# Generate an archive with a tool outside of u-root -git clone https://github.com/u-root/cpu -u-root ./cmds/core/{init,ls,gosh} ./cpu/cmds/cpud +> [!IMPORTANT] +> +> `u-root` works exactly when `go build` and `go list` work as well. -# Generate an archive with a tool outside of u-root, in any PWD -(cd /tmp && GBB_PATH=$UROOT_PATH:$CPU_PATH u-root ./cmds/core/{init,ls,gosh} ./cmds/cpud) -``` +> [!NOTE] +> +> The `u-root` tool is the same as the +> [mkuimage](https://github.com/u-root/mkuimage) tool with some defaults +> applied. +> +> In the near future, `uimage` will replace `u-root`. + +> [!TIP] +> +> To just build Go busybox binaries, try out +> [gobusybox](https://github.com/u-root/gobusybox)'s `makebb` tool. + +## Multi-module workspace builds + +There are several ways to build multi-module command images using standard Go +tooling. + +```shell +$ mkdir workspace +$ cd workspace +$ git clone https://github.com/u-root/u-root +$ git clone https://github.com/u-root/cpu + +$ go work init ./u-root +$ go work use ./cpu + +$ u-root ./u-root/cmds/core/{init,gosh} ./cpu/cmds/cpud + +$ cpio -ivt < /tmp/initramfs.linux_amd64.cpio +... +-rwxr-x--- 0 root root 6365184 Jan 1 1970 bbin/bb +lrwxrwxrwx 0 root root 2 Jan 1 1970 bbin/cpud -> bb +lrwxrwxrwx 0 root root 2 Jan 1 1970 bbin/gosh -> bb +lrwxrwxrwx 0 root root 2 Jan 1 1970 bbin/init -> bb +... + +# Works for offline vendored builds as well. +$ go work vendor # Go 1.22 feature. -The default set of packages included is all packages in -`github.com/u-root/u-root/cmds/core/...`. +$ u-root ./u-root/cmds/core/{init,gosh} ./cpu/cmds/cpud +``` -`GBB_PATH` is a place that u-root will look for commands. Each colon-separated -`GBB_PATH` element is concatenated with patterns from the command-line and -checked for existence. For example: +When creating a new Go workspace is too much work, the `goanywhere` tool can +create one on the fly. This works **only with local file system paths**: ```shell -GBB_PATH=$HOME/u-root:$HOME/u-bmc u-root \ - cmds/core/init \ - cmds/core/gosh \ - cmd/socreset - -# matches: -# $HOME/u-root/cmds/core/init -# $HOME/u-root/cmds/core/gosh -# $HOME/u-bmc/cmd/socreset +$ go install github.com/u-root/gobusybox/src/cmd/goanywhere@latest + +$ goanywhere ./u-root/cmds/core/{init,gosh} ./cpu/cmds/cpud -- u-root ``` +`goanywhere` creates a workspace in a temporary directory with the given +modules, and then execs `u-root` in the workspace passing along the command +names. + +> [!TIP] +> +> While workspaces are good for local compilation, they are not meant to be +> checked in to version control systems. +> +> For a non-workspace way of building multi-module initramfs images, read more +> in the [mkuimage README](https://github.com/u-root/mkuimage). (The `u-root` +> tool is `mkuimage` with more defaults applied.) + ## Extra Files You may also include additional files in the initramfs using the `-files` flag. + If you add binaries with `-files` are listed, their ldd dependencies will be -included as well. As example for Debian, you want to add two kernel modules for -testing, executing your currently booted kernel: +included as well. + +```shell +$ u-root -files /bin/bash + +$ cpio -ivt < /tmp/initramfs.linux_amd64.cpio +... +-rwxr-xr-x 0 root root 1277936 Jan 1 1970 bin/bash +... +drwxr-xr-x 0 root root 0 Jan 1 1970 lib/x86_64-linux-gnu +-rwxr-xr-x 0 root root 210792 Jan 1 1970 lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 +-rwxr-xr-x 0 root root 1926256 Jan 1 1970 lib/x86_64-linux-gnu/libc.so.6 +lrwxrwxrwx 0 root root 15 Jan 1 1970 lib/x86_64-linux-gnu/libtinfo.so.6 -> libtinfo.so.6.4 +-rw-r--r-- 0 root root 216368 Jan 1 1970 lib/x86_64-linux-gnu/libtinfo.so.6.4 +drwxr-xr-x 0 root root 0 Jan 1 1970 lib64 +lrwxrwxrwx 0 root root 42 Jan 1 1970 lib64/ld-linux-x86-64.so.2 -> /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 +... +``` -> NOTE: these files will be placed in the `$HOME` dir in the initramfs. +You can determine placement with colons: ```shell -u-root -files $HOME/hello.ko -files $HOME/hello2.ko -qemu-system-x86_64 -kernel /boot/vmlinuz-$(uname -r) -initrd /tmp/initramfs.linux_amd64.cpio +$ u-root -files "/bin/bash:sbin/sh" + +$ cpio -ivt < /tmp/initramfs.linux_amd64.cpio +... +-rwxr-xr-x 0 root root 1277936 Jan 1 1970 sbin/sh +... ``` -To specify the location in the initramfs, use `:`. -For example: +For example on Debian, if you want to add two kernel modules for testing, +executing your currently booted kernel: ```shell -u-root -files "root-fs/usr/bin/runc:usr/bin/run" +$ u-root -files "$HOME/hello.ko:etc/hello.ko" -files "$HOME/hello2.ko:etc/hello2.ko" +$ qemu-system-x86_64 -kernel /boot/vmlinuz-$(uname -r) -initrd /tmp/initramfs.linux_amd64.cpio ``` ## Init and Uinit @@ -137,10 +192,12 @@ and `-uinitcmd` command-line flags. * `-initcmd` determines what `/init` is symlinked to. `-initcmd` may be a u-root command name or a symlink target. + * `-uinitcmd` is run by the default u-root [init](cmds/core/init) after some basic file system setup. There is no default, users should optionally supply their own. `-uinitcmd` may be a u-root command name with arguments or a symlink target with arguments. + * After running a uinit (if there is one), [init](cmds/core/init) will start a shell determined by the `-defaultsh` argument. @@ -173,9 +230,12 @@ qemu-system-x86_64 -kernel $KERNEL -initrd /tmp/initramfs.linux_amd64.cpio -nogr # Go Gopher # ~/> ``` -Passing command line arguments like above is equivalent to passing the arguments to uinit via a flags file in `/etc/uinit.flags`, see [Extra Files](#extra-files). -Additionally, you can pass arguments to uinit via the `uroot.uinitargs` kernel parameters, for example: +Passing command line arguments like above is equivalent to passing the arguments +to uinit via a flags file in `/etc/uinit.flags`, see [Extra Files](#extra-files). + +Additionally, you can pass arguments to uinit via the `uroot.uinitargs` kernel +parameters, for example: ```bash u-root -uinitcmd="echo Gopher" ./cmds/core/{init,echo,gosh} @@ -198,15 +258,15 @@ qemu-system-x86_64 -kernel $KERNEL -initrd /tmp/initramfs.linux_amd64.cpio -nogr # Go Gopher # ~/> ``` -Note the order of the passed arguments in the above example. +Note the order of the passed arguments in the above example. The command you name must be present in the command set. The following will *not work*: ```bash u-root -uinitcmd="echo Go Gopher" ./cmds/core/{init,gosh} -# 2020/04/30 21:05:57 could not create symlink from "bin/uinit" to "echo": command or path "echo" not included in u-root build: specify -uinitcmd="" to ignore this error and build without a uinit +# 21:05:57 could not create symlink from "bin/uinit" to "echo": command or path "echo" not included in u-root build: specify -uinitcmd="" to ignore this error and build without a uinit ``` You can also refer to non-u-root-commands; they will be added as symlinks. We @@ -229,7 +289,6 @@ The effect of the above command: * Adds /bin/echo as bin/foobar * Adds your-hosts-file as etc/hosts * builds in the cmds/core/init, and cmds/core/gosh commands. - The {} are expanded by the shell This will bypass the regular u-root init and just launch a shell: @@ -256,9 +315,9 @@ Cross-OS and -architecture compilation comes for free with Go. In fact, every PR to the u-root repo is built against the following architectures: amd64, x86 (i.e. 32bit), mipsle, armv7, arm64, and ppc64le. -Further, we run integration tests on linux/amd64, freebsd/amd64 and linux/arm64, -using several CI systems. If you need to add another CI system, processor or OS, -please let us know. +Further, we run integration tests on linux/amd64, and linux/arm64, using several +CI systems. If you need to add another CI system, processor or OS, please let us +know. To cross compile for an ARM, on Linux: @@ -283,12 +342,15 @@ qemu-system-x86_64 -nographic -kernel path/to/kernel -initrd /tmp/initramfs.linu Note that you do not have to build a special kernel on your own, it is sufficient to use an existing one. Usually you can find one in `/boot`. -If you quickly need to obtain a kernel, for example, when you are on a non-Linux -system, you can assemble a URL to download one through Arch Linux's -[iPXE menu file](https://www.archlinux.org/releng/netboot/archlinux.ipxe). It -would download from `${mirrorurl}iso/${release}/arch/boot/x86_64/vmlinuz-linux`, so -just search for a mirror URL you prefer and a release version, for example, -`http://mirror.rackspace.com/archlinux/iso/2024.02.01/arch/boot/x86_64/vmlinuz-linux`. +If you don't have a kernel handy, you can also get the one we use for VM testing: + +```shell +go install github.com/hugelgupf/vmtest/tools/runvmtest@latest + +runvmtest -- bash -c "cp \$VMTEST_KERNEL ./kernel" +``` + +It may not have all features you require, however. ### Framebuffer @@ -334,58 +396,6 @@ qemu-system-x86_64 \ -initrd /tmp/initramfs.linux_amd64.cpio ``` -## u-root with Go package paths - -For Go package paths to be usable, the path passed to `u-root` must be in the -go.mod of the working directory or one of its parents. This is mostly useful for -repositories making programmatic use of u-root's APIs. - -```sh -cd ./u-root - -# In u-root's directory itself, github.com/u-root/u-root is resolvable. There is -# a go.mod here that can refer to u-root. -u-root github.com/u-root/u-root/cmds/core/... -u-root github.com/u-root/u-root/cmds/core/* -u-root github.com/u-root/u-root/cmds/core/i* -``` - -To depend on commands outside of ones own repository, the easiest way to depend -on Go commands is the following: - -```sh -TMPDIR=$(mktemp -d) -cd $TMPDIR -go mod init foobar -``` - -Create a file with some unused build tag like this to create dependencies on -commands: - -```go -//go:build tools - -package something - -import ( - "github.com/u-root/u-root/cmds/core/ip" - "github.com/u-root/u-root/cmds/core/init" - "github.com/hugelgupf/p9/cmd/p9ufs" -) -``` - -The unused build tag keeps it from being compiled, but its existence forces `go -mod tidy` to add these dependencies to `go.mod`: - -```sh -go mod tidy - -u-root \ - github.com/u-root/u-root/cmds/core/ip \ - github.com/u-root/u-root/cmds/core/init \ - github.com/hugelgupf/p9/cmd/p9ufs -``` - ## SystemBoot SystemBoot is a set of bootloaders written in Go. It is meant to be a @@ -490,41 +500,13 @@ go mod vendor ## Building without network access -Go modules require network access. If you need to make a repeatable build with -no network access, make sure that your code is under `$GOPATH` and the -environment variable `GO111MODULE` is set to `off`. This is: - -1. Pick a location for your off-network build, it can be anywhere and -the directory does not need to exist ahead of time: - -```shell -export GOPATH=$(mktemp -d) - -``` - -2. Fetch the code, you can use `git`, `go get` or even a release file, just -make sure that the code ends in: `${GOPATH}/src/github.com/u-root/u-root` E.g: - -```shell -mkdir -p ${GOPATH}/src/github.com/u-root/ -cd ${GOPATH}/src/github.com/u-root/ -git clone https://github.com/u-root/u-root.git -cd u-root -``` +The u-root command supports building with workspace vendoring and module +vendoring. In both of those cases, if all dependencies are found in the vendored +directories, the build happens completely offline. -Or simply: +Read more in the [mkuimage README](https://github.com/u-root/mkuimage). -```shell -GO111MODULE=off go get github.com/u-root/u-root -cd $GOPATH/src/github.com/u-root/u-root -``` - -3. Build u-root and use it normally: - -```shell -GO111MODULE=off GOPROXY=off go build -GO111MODULE=off GOPROXY=off ./u-root -``` +u-root also still supports `GO111MODULE=off` builds. # Hardware diff --git a/cmds/core/df/df.go b/cmds/core/df/df.go index 1ea0d26691..e18be9dba4 100644 --- a/cmds/core/df/df.go +++ b/cmds/core/df/df.go @@ -112,9 +112,8 @@ func mountinfoFromBytes(buf []byte) (mountinfomap, error) { } if mnt.Blocks == 0 { continue - } else { - ret[key] = mnt } + ret[key] = mnt } return ret, nil } diff --git a/dependencies.go b/dependencies.go index 51173ce962..3b5b1a2e1b 100644 --- a/dependencies.go +++ b/dependencies.go @@ -13,4 +13,6 @@ package main import ( _ "github.com/hugelgupf/vmtest/vminit/gouinit" _ "github.com/hugelgupf/vmtest/vminit/shelluinit" + _ "github.com/hugelgupf/vmtest/vminit/shutdownafter" + _ "github.com/hugelgupf/vmtest/vminit/vmmount" ) diff --git a/docs/playbook.md b/docs/playbook.md index 1e0f18fc8d..0382c7ea75 100644 --- a/docs/playbook.md +++ b/docs/playbook.md @@ -50,3 +50,13 @@ In this case, it looks like this: +ENV TAMAGO_CHECKSUM="6319b1778e93695b62bb63946c5dd28c4d8f3c1ac3c4bf28e49cb967d570dfd5" Then follow the instructions, above, for updating the test images. + +## Make u-root init verbose +The u-root init command has several options, one being -v. +Setting -v will enable verbose logging. +How do you set it? +This can be done with the Linux kernel command line. The flags to init +are set via uroot.initflags, a string. So, to set verbose: +``` +uroot.initflags="-v" +``` diff --git a/go.mod b/go.mod index b652546efd..5c589894d8 100644 --- a/go.mod +++ b/go.mod @@ -31,20 +31,20 @@ require ( github.com/rekby/gpt v0.0.0-20200219180433-a930afbc6edc github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/spf13/pflag v1.0.5 - github.com/u-root/gobusybox/src v0.0.0-20240212035024-44ff0bf359ad + github.com/u-root/gobusybox/src v0.0.0-20240225013946-a274a8d5d83a github.com/u-root/iscsinl v0.1.1-0.20210528121423-84c32645822a - github.com/u-root/mkuimage v0.0.0-20240216050315-5f527d1fae2e + github.com/u-root/mkuimage v0.0.0-20240225063926-11a3bcc79c2a github.com/u-root/uio v0.0.0-20240209044354-b3d14b93376a github.com/ulikunitz/xz v0.5.11 github.com/vishvananda/netlink v1.2.1-beta.2 github.com/vtolstov/go-ioctl v0.0.0-20151206205506-6be9cced4810 - golang.org/x/crypto v0.18.0 - golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 - golang.org/x/net v0.20.0 - golang.org/x/sys v0.16.0 - golang.org/x/term v0.16.0 + golang.org/x/crypto v0.19.0 + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + golang.org/x/net v0.21.0 + golang.org/x/sys v0.17.0 + golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 - golang.org/x/tools v0.17.0 + golang.org/x/tools v0.18.0 gopkg.in/yaml.v2 v2.2.8 mvdan.cc/sh/v3 v3.7.0 pack.ag/tftp v1.0.1-0.20181129014014-07909dfbde3c @@ -60,7 +60,6 @@ require ( github.com/charmbracelet/lipgloss v0.7.1 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/containerd/console v1.0.4-0.20230706203907-8f6c4e4faef5 // indirect - github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 // indirect github.com/hugelgupf/go-shlex v0.0.0-20200702092117-c80c9d0918fa // indirect github.com/josharian/native v1.1.0 // indirect github.com/jsimonetti/rtnetlink v1.3.5 // indirect @@ -81,8 +80,9 @@ require ( github.com/sahilm/fuzzy v0.1.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect golang.org/x/arch v0.2.0 // indirect - golang.org/x/mod v0.14.0 // indirect + golang.org/x/mod v0.15.0 // indirect golang.org/x/sync v0.6.0 // indirect + gopkg.in/yaml.v3 v3.0.0 // indirect ) retract ( diff --git a/go.sum b/go.sum index 3345c3ed55..d003b526c0 100644 --- a/go.sum +++ b/go.sum @@ -80,8 +80,6 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-tpm v0.9.1-0.20230914180155-ee6cbcd136f8 h1:g9RVRZdQrNEK2E94RcFescvXFC9afWsFar4IIdejP34= github.com/google/go-tpm v0.9.1-0.20230914180155-ee6cbcd136f8/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU= -github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 h1:CVuJwN34x4xM2aT4sIKhmeib40NeBPhRihNjQmpJsA4= -github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hexdigest/gowrap v1.1.7/go.mod h1:Z+nBFUDLa01iaNM+/jzoOA1JJ7sm51rnYFauKFUB5fs= @@ -212,12 +210,12 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/twitchtv/twirp v5.8.0+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= -github.com/u-root/gobusybox/src v0.0.0-20240212035024-44ff0bf359ad h1:lUSEFqsEuc+c+sTI5jVEC0wWw0FOuXZbrYGZbxQL19E= -github.com/u-root/gobusybox/src v0.0.0-20240212035024-44ff0bf359ad/go.mod h1:vN1IwhlCo7gTDTJDUs6WCKM4/C2uiq5w0XvZCqLtb5s= +github.com/u-root/gobusybox/src v0.0.0-20240225013946-a274a8d5d83a h1:eg5FkNoQp76ZsswyGZ+TjYqA/rhKefxK8BW7XOlQsxo= +github.com/u-root/gobusybox/src v0.0.0-20240225013946-a274a8d5d83a/go.mod h1:e/8TmrdreH0sZOw2DFKBaUV7bvDWRq6SeM9PzkuVM68= github.com/u-root/iscsinl v0.1.1-0.20210528121423-84c32645822a h1:A0sK7WEodak7eVd21MOEatnh2pfAAwZaEPSIEEsjctQ= github.com/u-root/iscsinl v0.1.1-0.20210528121423-84c32645822a/go.mod h1:RWIgJWqm9/0gjBZ0Hl8iR6MVGzZ+yAda2uqqLmetE2I= -github.com/u-root/mkuimage v0.0.0-20240216050315-5f527d1fae2e h1:KRdkURI+8KMNx+3XxTIFFpgTchYUaptp40Vq0wBaQE8= -github.com/u-root/mkuimage v0.0.0-20240216050315-5f527d1fae2e/go.mod h1:04Id/CtMh7fm3TB2crUokz9qNhn7q8Qturr4k0teSDI= +github.com/u-root/mkuimage v0.0.0-20240225063926-11a3bcc79c2a h1:PsI71z55OoumrXP01sYr+cV3Ab4pL7Y2QW/ftMKG7CY= +github.com/u-root/mkuimage v0.0.0-20240225063926-11a3bcc79c2a/go.mod h1:Yqr8aXRStz71Z1JVz2bUut8Xt9wmBijQIVOSn3eYEIw= github.com/u-root/uio v0.0.0-20240209044354-b3d14b93376a h1:BH1SOPEvehD2kVrndDnGJiUF0TrBpNs+iyYocu6h0og= github.com/u-root/uio v0.0.0-20240209044354-b3d14b93376a/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= @@ -238,24 +236,24 @@ golang.org/x/arch v0.2.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 h1:+iq7lrkxmFNBM7xx+Rae2W6uyPfhPeDWD+n+JgppptE= -golang.org/x/exp v0.0.0-20231219180239-dc181d75b848/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -283,12 +281,12 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -300,8 +298,8 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= diff --git a/integration/gotests/gotest_test.go b/integration/gotests/gotest_test.go index e09fc53e0f..3c20a7db5c 100644 --- a/integration/gotests/gotest_test.go +++ b/integration/gotests/gotest_test.go @@ -68,10 +68,6 @@ func testPkgs(t *testing.T) []string { "github.com/u-root/u-root/cmds/exp/bzimage", "github.com/u-root/u-root/pkg/boot/bzimage", - // No Go compiler in VM. - "github.com/u-root/u-root/pkg/uroot", - "github.com/u-root/u-root/pkg/uroot/builder", - // ?? "github.com/u-root/u-root/pkg/tss", "github.com/u-root/u-root/pkg/syscallfilter", diff --git a/pkg/dt/fdt.go b/pkg/dt/fdt.go index 09fcb382b5..a5070a22f0 100644 --- a/pkg/dt/fdt.go +++ b/pkg/dt/fdt.go @@ -220,13 +220,14 @@ func (fdt *FDT) readStructBlock(f io.ReadSeeker, strs []byte) error { // The name is a null-terminating string. for { - if b, err := r.ReadByte(); err != nil { + b, err := r.ReadByte() + if err != nil { return err - } else if b == 0 { + } + if b == 0 { break - } else { - child.Name += string(b) } + child.Name += string(b) } case tokenEndNode: diff --git a/pkg/uroot/builder/binary.go b/pkg/uroot/builder/binary.go deleted file mode 100644 index c470063f0f..0000000000 --- a/pkg/uroot/builder/binary.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2015-2017 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" - "path/filepath" - "sync" - - gbbgolang "github.com/u-root/gobusybox/src/pkg/golang" - "github.com/u-root/u-root/pkg/ulog" - "github.com/u-root/u-root/pkg/uroot/initramfs" - "golang.org/x/tools/go/packages" -) - -func dirFor(env *gbbgolang.Environ, pkg string) (string, error) { - pkgs, err := env.Lookup(packages.NeedName|packages.NeedFiles, "", pkg) - if err != nil { - return "", fmt.Errorf("failed to look up package %q: %v", pkg, err) - } - - // One directory = one package in standard Go, so - // finding the first file's parent directory should - // find us the package directory. - var dir string - for _, p := range pkgs { - if len(p.GoFiles) > 0 { - dir = filepath.Dir(p.GoFiles[0]) - } - } - if dir == "" { - return "", fmt.Errorf("could not find package directory for %q", pkg) - } - return dir, nil -} - -// BinaryBuilder builds each Go command as a separate binary. -// -// BinaryBuilder is an implementation of Builder. -type BinaryBuilder struct{} - -// DefaultBinaryDir implements Builder.DefaultBinaryDir. -// -// "bin" is the default initramfs binary directory for these binaries. -func (BinaryBuilder) DefaultBinaryDir() string { - return "bin" -} - -// Build implements Builder.Build. -func (BinaryBuilder) Build(l ulog.Logger, af *initramfs.Files, opts Opts) error { - if opts.Env == nil { - return fmt.Errorf("must specify Go build environment") - } - result := make(chan error, len(opts.Packages)) - - var wg sync.WaitGroup - for _, pkg := range opts.Packages { - wg.Add(1) - go func(p string) { - defer wg.Done() - dir, err := dirFor(opts.Env, p) - if err != nil { - result <- err - return - } - result <- opts.Env.BuildDir( - dir, - filepath.Join(opts.TempDir, opts.BinaryDir, filepath.Base(p)), - opts.BuildOpts) - }(pkg) - } - - wg.Wait() - close(result) - - for err := range result { - if err != nil { - return err - } - } - - // Add bin directory to archive. - return af.AddFile(opts.TempDir, "") -} diff --git a/pkg/uroot/builder/binary_test.go b/pkg/uroot/builder/binary_test.go deleted file mode 100644 index e8e151e405..0000000000 --- a/pkg/uroot/builder/binary_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "testing" - - "github.com/u-root/gobusybox/src/pkg/golang" - "github.com/u-root/u-root/pkg/ulog/ulogtest" - "github.com/u-root/u-root/pkg/uroot/initramfs" -) - -func TestBinaryBuild(t *testing.T) { - dir := t.TempDir() - - opts := Opts{ - Env: golang.Default(golang.DisableCGO()), - Packages: []string{ - "../test/foo", - "../../../cmds/core/elvish", - "github.com/u-root/u-root/cmds/core/init", - "cmd/test2json", - }, - TempDir: dir, - BinaryDir: "bbin", - BuildOpts: &golang.BuildOpts{}, - } - af := initramfs.NewFiles() - var b BinaryBuilder - if err := b.Build(ulogtest.Logger{TB: t}, af, opts); err != nil { - t.Fatalf("Build(%v, %v); %v != nil", af, opts, err) - } - - mustContain := []string{ - "bbin/elvish", - "bbin/foo", - "bbin/init", - } - for _, name := range mustContain { - if !af.Contains(name) { - t.Errorf("expected files to include %q; archive: %v", name, af) - } - } -} diff --git a/pkg/uroot/builder/builder.go b/pkg/uroot/builder/builder.go deleted file mode 100644 index e4f5ea1388..0000000000 --- a/pkg/uroot/builder/builder.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015-2017 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - gbbgolang "github.com/u-root/gobusybox/src/pkg/golang" - "github.com/u-root/u-root/pkg/ulog" - "github.com/u-root/u-root/pkg/uroot/initramfs" -) - -var ( - // BusyBox is a shared GBBBuilder instance. - BusyBox = GBBBuilder{} - // Binary is a shared BinaryBuilder instance. - Binary = BinaryBuilder{} -) - -// Opts are options passed to the Builder.Build function. -type Opts struct { - // Env is the Go compiler environment. - Env *gbbgolang.Environ - - // Build options for building go binaries. Ultimate this holds all the - // args that end up being passed to `go build`. - BuildOpts *gbbgolang.BuildOpts - - // Packages are the Go packages to compile. - // - // Only an explicit list of absolute directory paths is accepted. - Packages []string - - // TempDir is a temporary directory where the compilation mode compiled - // binaries can be placed. - // - // TempDir should contain no files. - TempDir string - - // BinaryDir is the initramfs directory for built binaries. - // - // BinaryDir must be specified. - BinaryDir string -} - -// Builder builds Go packages and adds the binaries to an initramfs. -// -// The resulting files need not be binaries per se, but exec'ing the resulting -// file should result in the Go program being executed. -type Builder interface { - // Build uses the given options to build Go packages and adds its files - // to be included in the initramfs to the given ArchiveFiles. - Build(ulog.Logger, *initramfs.Files, Opts) error - - // DefaultBinaryDir is the initramfs' default directory for binaries - // built using this builder. - DefaultBinaryDir() string -} diff --git a/pkg/uroot/builder/gbb.go b/pkg/uroot/builder/gbb.go deleted file mode 100644 index f769f36bab..0000000000 --- a/pkg/uroot/builder/gbb.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2015-2021 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "errors" - "fmt" - "path" - "path/filepath" - - "github.com/u-root/gobusybox/src/pkg/bb" - "github.com/u-root/u-root/pkg/cpio" - "github.com/u-root/u-root/pkg/ulog" - "github.com/u-root/u-root/pkg/uroot/initramfs" -) - -// Commands to skip building in bb mode. -var skip = map[string]struct{}{ - "bb": {}, -} - -// GBBBuilder is an implementation of Builder that compiles many Go commands -// into one busybox-style binary. -// -// GBBBuilder will also include symlinks for each command to the busybox binary. -// -// GBBBuilder does all this by rewriting the source files of the packages given -// to create one busybox-like binary containing all commands. -// -// The compiled binary uses argv[0] to decide which Go command to run. -// -// See bb/README.md for a detailed explanation of the implementation of busybox -// mode. -type GBBBuilder struct { - // ShellBang means generate #! files instead of symlinks. - // ShellBang are more portable and just as efficient. - ShellBang bool -} - -// DefaultBinaryDir implements Builder.DefaultBinaryDir. -// -// The default initramfs binary dir is bbin for busybox binaries. -func (GBBBuilder) DefaultBinaryDir() string { - return "bbin" -} - -// Build is an implementation of Builder.Build for a busybox-like initramfs. -func (b GBBBuilder) Build(l ulog.Logger, af *initramfs.Files, opts Opts) error { - // Build the busybox binary. - if len(opts.TempDir) == 0 { - return fmt.Errorf("opts.TempDir is empty") - } - bbPath := filepath.Join(opts.TempDir, "bb") - - if len(opts.BinaryDir) == 0 { - return fmt.Errorf("must specify binary directory") - } - if opts.Env == nil { - return fmt.Errorf("must specify Go build environment") - } - - bopts := &bb.Opts{ - Env: opts.Env, - GenSrcDir: opts.TempDir, - CommandPaths: opts.Packages, - BinaryPath: bbPath, - GoBuildOpts: opts.BuildOpts, - } - - if err := bb.BuildBusybox(l, bopts); err != nil { - // Print the actual error. This may contain a suggestion for - // what to do, actually. - l.Printf("Gobusybox error: %v", err) - - // Return some instructions for the user; this is printed last in the u-root tool. - // - // TODO: yeah, this isn't a good way to do error handling. The - // error should be the thing that's returned, I just wanted - // that to be printed first, and the instructions for what to - // do about it to be last. - var errGopath *bb.ErrGopathBuild - var errGomod *bb.ErrModuleBuild - if errors.As(err, &errGopath) { - return fmt.Errorf("preserving bb generated source directory at %s due to error. To reproduce build, `cd %s` and `GO111MODULE=off GOPATH=%s go build`: %v", opts.TempDir, errGopath.CmdDir, errGopath.GOPATH, err) - } else if errors.As(err, &errGomod) { - return fmt.Errorf("preserving bb generated source directory at %s due to error. To debug build, `cd %s` and use `go build` to build, or `go mod [why|tidy|graph]` to debug dependencies, or `go list -m all` to list all dependency versions:\n%v", opts.TempDir, errGomod.CmdDir, err) - } - return fmt.Errorf("preserving bb generated source directory at %s due to error:\n%v", opts.TempDir, err) - } - - if err := af.AddFile(bbPath, "bbin/bb"); err != nil { - return err - } - - // Add symlinks for included commands to initramfs. - for _, pkg := range opts.Packages { - if _, ok := skip[path.Base(pkg)]; ok { - continue - } - - // Add a symlink /bbin/{cmd} -> /bbin/bb to our initramfs. - // Or add a #! file if b.ShellBang is set ... - if b.ShellBang { - b := path.Base(pkg) - if err := af.AddRecord(cpio.StaticFile(filepath.Join(opts.BinaryDir, b), "#!/bbin/bb #!"+b+"\n", 0o755)); err != nil { - return err - } - } else if err := af.AddRecord(cpio.Symlink(filepath.Join(opts.BinaryDir, path.Base(pkg)), "bb")); err != nil { - return err - } - } - return nil -} diff --git a/pkg/uroot/builder/gbb_test.go b/pkg/uroot/builder/gbb_test.go deleted file mode 100644 index faae5a2238..0000000000 --- a/pkg/uroot/builder/gbb_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2021 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "testing" - - "github.com/u-root/gobusybox/src/pkg/golang" - "github.com/u-root/u-root/pkg/ulog/ulogtest" - "github.com/u-root/u-root/pkg/uroot/initramfs" -) - -func TestGBBBuild(t *testing.T) { - dir := t.TempDir() - - opts := Opts{ - Env: golang.Default(golang.DisableCGO()), - Packages: []string{ - "../test/foo", - "../../../cmds/core/elvish", - }, - TempDir: dir, - BinaryDir: "bbin", - BuildOpts: &golang.BuildOpts{}, - } - af := initramfs.NewFiles() - var gbb GBBBuilder - if err := gbb.Build(ulogtest.Logger{TB: t}, af, opts); err != nil { - t.Fatalf("Build(%v, %v); %v != nil", af, opts, err) - } - - mustContain := []string{ - "bbin/elvish", - "bbin/foo", - "bbin/bb", - } - for _, name := range mustContain { - if !af.Contains(name) { - t.Errorf("expected files to include %q; archive: %v", name, af) - } - } -} diff --git a/pkg/uroot/initramfs/archive.go b/pkg/uroot/initramfs/archive.go deleted file mode 100644 index a38a7ea096..0000000000 --- a/pkg/uroot/initramfs/archive.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2015-2017 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package initramfs - -import ( - "fmt" - "io" - - "github.com/u-root/u-root/pkg/cpio" - "github.com/u-root/u-root/pkg/ulog" -) - -var ( - CPIO = CPIOArchiver{ - RecordFormat: cpio.Newc, - } - - Dir = DirArchiver{} - - // Archivers are the supported initramfs archivers at the moment. - // - // - cpio: writes the initramfs to a cpio. - // - dir: writes the initramfs relative to a specified directory. - Archivers = map[string]Archiver{ - "cpio": CPIO, - "dir": Dir, - } -) - -// Archiver is an archive format that builds an archive using a given set of -// files. -type Archiver interface { - // OpenWriter opens an archive writer at `path`. - OpenWriter(l ulog.Logger, path string) (Writer, error) - - // Reader returns a Reader that allows reading files from a file. - Reader(file io.ReaderAt) Reader -} - -// GetArchiver finds a registered initramfs archiver by name. -// -// Good to use with command-line arguments. -func GetArchiver(name string) (Archiver, error) { - archiver, ok := Archivers[name] - if !ok { - return nil, fmt.Errorf("couldn't find archival format %q", name) - } - return archiver, nil -} - -// Writer is an initramfs archive that files can be written to. -type Writer interface { - cpio.RecordWriter - - // Finish finishes the archive. - Finish() error -} - -// Reader is an object that files can be read from. -type Reader cpio.RecordReader - -// Opts are options for building an initramfs archive. -type Opts struct { - // Files are the files to be included. - // - // Files here generally have priority over files in DefaultRecords or - // BaseArchive. - *Files - - // OutputFile is the file to write to. - OutputFile Writer - - // BaseArchive is an existing archive to add files to. - // - // BaseArchive may be nil. - BaseArchive Reader - - // UseExistingInit determines whether the init from BaseArchive is used - // or not, if BaseArchive is specified. - // - // If this is false, the "init" file in BaseArchive will be renamed - // "inito" (for init-original) in the output archive. - UseExistingInit bool -} - -// Write uses the given options to determine which files to write to the output -// initramfs. -func Write(opts *Opts) error { - // Write base archive. - if opts.BaseArchive != nil { - transform := cpio.MakeReproducible - - // Rename init to inito if user doesn't want the existing init. - if !opts.UseExistingInit && opts.Contains("init") { - transform = func(r cpio.Record) cpio.Record { - if r.Name == "init" { - r.Name = "inito" - } - return cpio.MakeReproducible(r) - } - } - // If user wants the base archive init, but specified another - // init, make the other one inito. - if opts.UseExistingInit && opts.Contains("init") { - opts.Rename("init", "inito") - } - - for { - f, err := opts.BaseArchive.ReadRecord() - if err == io.EOF { - break - } - if err != nil { - return err - } - // TODO: ignore only the error where it already exists - // in archive. - opts.Files.AddRecord(transform(f)) - } - } - - if err := opts.Files.WriteTo(opts.OutputFile); err != nil { - return err - } - return opts.OutputFile.Finish() -} diff --git a/pkg/uroot/initramfs/cpio.go b/pkg/uroot/initramfs/cpio.go deleted file mode 100644 index 5559051f1d..0000000000 --- a/pkg/uroot/initramfs/cpio.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015-2017 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package initramfs - -import ( - "fmt" - "io" - "os" - - "github.com/u-root/u-root/pkg/cpio" - "github.com/u-root/u-root/pkg/ulog" -) - -// CPIOArchiver is an implementation of Archiver for the cpio format. -type CPIOArchiver struct { - cpio.RecordFormat -} - -// OpenWriter opens `path` as the correct file type and returns an -// Writer pointing to `path`. -func (ca CPIOArchiver) OpenWriter(l ulog.Logger, path string) (Writer, error) { - if len(path) == 0 { - return nil, fmt.Errorf("path is required") - } - f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644) - if err != nil { - return nil, err - } - return osWriter{ca.RecordFormat.Writer(f), f}, nil -} - -// osWriter implements Writer. -type osWriter struct { - cpio.RecordWriter - - f *os.File -} - -// Finish implements Writer.Finish. -func (o osWriter) Finish() error { - err := cpio.WriteTrailer(o) - o.f.Close() - return err -} - -// Reader implements Archiver.Reader. -func (ca CPIOArchiver) Reader(r io.ReaderAt) Reader { - return ca.RecordFormat.Reader(r) -} diff --git a/pkg/uroot/initramfs/dir.go b/pkg/uroot/initramfs/dir.go deleted file mode 100644 index 55b21bffab..0000000000 --- a/pkg/uroot/initramfs/dir.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package initramfs - -import ( - "fmt" - "io" - "os" - - "github.com/u-root/u-root/pkg/cpio" - "github.com/u-root/u-root/pkg/ulog" -) - -// DirArchiver implements Archiver for a directory. -type DirArchiver struct{} - -// Reader implements Archiver.Reader. -// -// Currently unsupported for directories. -func (da DirArchiver) Reader(io.ReaderAt) Reader { - return nil -} - -// OpenWriter implements Archiver.OpenWriter. -func (da DirArchiver) OpenWriter(l ulog.Logger, path string) (Writer, error) { - if len(path) == 0 { - var err error - path, err = os.MkdirTemp("", "u-root") - if err != nil { - return nil, err - } - } else { - if _, err := os.Stat(path); os.IsExist(err) { - return nil, fmt.Errorf("path %q already exists", path) - } - if err := os.MkdirAll(path, 0o755); err != nil { - return nil, err - } - } - l.Printf("Path is %s", path) - return dirWriter{path}, nil -} - -// dirWriter implements Writer. -type dirWriter struct { - dir string -} - -// WriteRecord implements Writer.WriteRecord. -func (dw dirWriter) WriteRecord(r cpio.Record) error { - return cpio.CreateFileInRoot(r, dw.dir, false) -} - -// Finish implements Writer.Finish. -func (dw dirWriter) Finish() error { - return nil -} diff --git a/pkg/uroot/initramfs/files.go b/pkg/uroot/initramfs/files.go deleted file mode 100644 index 906b893105..0000000000 --- a/pkg/uroot/initramfs/files.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package initramfs - -import ( - "fmt" - "log" - "os" - "path" - "path/filepath" - "sort" - - "github.com/u-root/u-root/pkg/cpio" -) - -// Files are host files and records to add to the resulting initramfs. -type Files struct { - // Files is a map of relative archive path -> absolute host file path. - Files map[string]string - - // Records is a map of relative archive path -> Record to use. - // - // TODO: While the only archive mode is cpio, this will be a - // cpio.Record. If or when there is another archival mode, we can add a - // similar uroot.Record type. - Records map[string]cpio.Record -} - -// NewFiles returns a new archive files map. -func NewFiles() *Files { - return &Files{ - Files: make(map[string]string), - Records: make(map[string]cpio.Record), - } -} - -// sortedKeys returns a list of sorted paths in the archive. -func (af *Files) sortedKeys() []string { - keys := make([]string, 0, len(af.Files)+len(af.Records)) - for dest := range af.Files { - keys = append(keys, dest) - } - for dest := range af.Records { - keys = append(keys, dest) - } - sort.Strings(keys) - return keys -} - -func (af *Files) addFile(src string, dest string, follow bool) error { - src = filepath.Clean(src) - dest = path.Clean(dest) - if path.IsAbs(dest) { - r, err := filepath.Rel("/", dest) - if err != nil { - return fmt.Errorf("%q is an absolute path and can't make it relative to /: %v", dest, err) - } - log.Printf("Warning: You used an absolute path %q and it was adjusted to %q", dest, r) - dest = r - } - - if follow { - s, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - src = s - } - - // We check if it's a directory first. If a directory already exists as - // a record or file, we want to include its children anyway. - sInfo, err := os.Lstat(src) - if err != nil { - return fmt.Errorf("adding %q to archive failed because Lstat failed: %v", src, err) - } - - // Recursively add children. - if sInfo.Mode().IsDir() { - err := children(src, func(name string) error { - return af.addFile(filepath.Join(src, name), filepath.Join(dest, name), follow) - }) - if err != nil { - return err - } - - // Only override an existing directory if all children were - // added successfully. - af.Files[dest] = src - return nil - } - - if record, ok := af.Records[dest]; ok { - return fmt.Errorf("record for %q already exists in archive: %v", dest, record) - } - - if srcFile, ok := af.Files[dest]; ok { - // Just a duplicate. - if src == srcFile { - return nil - } - return fmt.Errorf("record for %q already exists in archive (is %q)", dest, src) - } - - af.Files[dest] = src - return nil -} - -// AddFile adds a host file at src into the archive at dest. -// It follows symlinks. -// -// If src is a directory, it and its children will be added to the archive -// relative to dest. -// -// Duplicate files with identical content will be silently ignored. -func (af *Files) AddFile(src string, dest string) error { - return af.addFile(src, dest, true) -} - -// AddFileNoFollow adds a host file at src into the archive at dest. -// It does not follow symlinks. -// -// If src is a directory, it and its children will be added to the archive -// relative to dest. -// -// Duplicate files with identical content will be silently ignored. -func (af *Files) AddFileNoFollow(src string, dest string) error { - return af.addFile(src, dest, false) -} - -// AddRecord adds a cpio.Record into the archive at `r.Name`. -func (af *Files) AddRecord(r cpio.Record) error { - r.Name = path.Clean(r.Name) - if filepath.IsAbs(r.Name) { - return fmt.Errorf("record name %q must not be absolute", r.Name) - } - - if src, ok := af.Files[r.Name]; ok { - return fmt.Errorf("record for %q already exists in archive: file %q", r.Name, src) - } - if rr, ok := af.Records[r.Name]; ok { - if rr.Info == r.Info { - return nil - } - return fmt.Errorf("record for %q already exists in archive: %v", r.Name, rr) - } - - af.Records[r.Name] = r - return nil -} - -// Contains returns whether path `dest` is already contained in the archive. -func (af *Files) Contains(dest string) bool { - _, fok := af.Files[dest] - _, rok := af.Records[dest] - return fok || rok -} - -// Rename renames a file in the archive. -func (af *Files) Rename(name string, newname string) { - if src, ok := af.Files[name]; ok { - delete(af.Files, name) - af.Files[newname] = src - } - if record, ok := af.Records[name]; ok { - delete(af.Records, name) - record.Name = newname - af.Records[newname] = record - } -} - -// addParent recursively adds parent directory records for `name`. -func (af *Files) addParent(name string) { - parent := path.Dir(name) - if parent == "." { - return - } - if !af.Contains(parent) { - af.AddRecord(cpio.Directory(parent, 0o755)) - } - af.addParent(parent) -} - -// fillInParents adds parent directory records for unparented files in `af`. -func (af *Files) fillInParents() { - for name := range af.Files { - af.addParent(name) - } - for name := range af.Records { - af.addParent(name) - } -} - -// WriteTo writes all records and files in `af` to `w`. -func (af *Files) WriteTo(w Writer) error { - // Add parent directories when not added specifically. - af.fillInParents() - cr := cpio.NewRecorder() - - // Reproducible builds: Files should be added to the archive in the - // same order. - for _, path := range af.sortedKeys() { - if record, ok := af.Records[path]; ok { - if err := w.WriteRecord(record); err != nil { - return err - } - } - if src, ok := af.Files[path]; ok { - if err := writeFile(w, cr, src, path); err != nil { - return err - } - } - } - return nil -} - -// writeFile takes the file at `src` on the host system and adds it to the -// archive `w` at path `dest`. -// -// If `src` is a directory, its children will be added to the archive as well. -func writeFile(w Writer, r *cpio.Recorder, src, dest string) error { - record, err := r.GetRecord(src) - if err != nil { - return err - } - - // Fix the name. - record.Name = dest - return w.WriteRecord(cpio.MakeReproducible(record)) -} - -// children calls `fn` on all direct children of directory `dir`. -func children(dir string, fn func(name string) error) error { - f, err := os.Open(dir) - if err != nil { - return err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return err - } - - for _, name := range names { - if err := fn(name); os.IsNotExist(err) { - // File was deleted in the meantime. - continue - } else if err != nil { - return err - } - } - return nil -} diff --git a/pkg/uroot/initramfs/files_test.go b/pkg/uroot/initramfs/files_test.go deleted file mode 100644 index 66746f6b1e..0000000000 --- a/pkg/uroot/initramfs/files_test.go +++ /dev/null @@ -1,670 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package initramfs - -import ( - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "strings" - "testing" - - "github.com/u-root/u-root/pkg/cpio" - "github.com/u-root/uio/uio" -) - -func TestFilesAddFileNoFollow(t *testing.T) { - regularFile, err := os.CreateTemp("", "archive-files-add-file") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(regularFile.Name()) - - dir := t.TempDir() - dir2 := t.TempDir() - - os.Create(filepath.Join(dir, "foo2")) - os.Symlink(filepath.Join(dir, "foo2"), filepath.Join(dir2, "foo3")) - - for i, tt := range []struct { - name string - af *Files - src string - dest string - result *Files - errContains string - }{ - { - name: "just add a file", - af: NewFiles(), - - src: regularFile.Name(), - dest: "bar/foo", - - result: &Files{ - Files: map[string]string{ - "bar/foo": regularFile.Name(), - }, - Records: map[string]cpio.Record{}, - }, - }, - { - name: "add symlinked file, NOT following", - af: NewFiles(), - src: filepath.Join(dir2, "foo3"), - dest: "bar/foo", - result: &Files{ - Files: map[string]string{ - "bar/foo": filepath.Join(dir2, "foo3"), - }, - Records: map[string]cpio.Record{}, - }, - }, - } { - t.Run(fmt.Sprintf("Test %02d: %s", i, tt.name), func(t *testing.T) { - err := tt.af.AddFileNoFollow(tt.src, tt.dest) - if err != nil && !strings.Contains(err.Error(), tt.errContains) { - t.Errorf("Error is %v, does not contain %v", err, tt.errContains) - } - if err == nil && len(tt.errContains) > 0 { - t.Errorf("Got no error, want %v", tt.errContains) - } - - if tt.result != nil && !reflect.DeepEqual(tt.af, tt.result) { - t.Errorf("got %v, want %v", tt.af, tt.result) - } - }) - } -} - -func TestFilesAddFile(t *testing.T) { - regularFile, err := os.CreateTemp("", "archive-files-add-file") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(regularFile.Name()) - - dir := t.TempDir() - dir2 := t.TempDir() - dir3 := t.TempDir() - - os.Create(filepath.Join(dir, "foo")) - os.Create(filepath.Join(dir, "foo2")) - os.Symlink(filepath.Join(dir, "foo2"), filepath.Join(dir2, "foo3")) - - fooDir := filepath.Join(dir3, "fooDir") - os.Mkdir(fooDir, os.ModePerm) - symlinkToDir3 := filepath.Join(dir3, "fooSymDir/") - os.Symlink(fooDir, symlinkToDir3) - os.Create(filepath.Join(fooDir, "foo")) - os.Create(filepath.Join(fooDir, "bar")) - - for i, tt := range []struct { - name string - af *Files - src string - dest string - result *Files - errContains string - }{ - { - name: "just add a file", - af: NewFiles(), - - src: regularFile.Name(), - dest: "bar/foo", - - result: &Files{ - Files: map[string]string{ - "bar/foo": regularFile.Name(), - }, - Records: map[string]cpio.Record{}, - }, - }, - { - name: "add symlinked file, following", - af: NewFiles(), - src: filepath.Join(dir2, "foo3"), - dest: "bar/foo", - result: &Files{ - Files: map[string]string{ - "bar/foo": filepath.Join(dir, "foo2"), - }, - Records: map[string]cpio.Record{}, - }, - }, - { - name: "add symlinked directory, following", - af: NewFiles(), - src: symlinkToDir3, - dest: "foo/", - result: &Files{ - Files: map[string]string{ - "foo": fooDir, - "foo/foo": filepath.Join(fooDir, "foo"), - "foo/bar": filepath.Join(fooDir, "bar"), - }, - Records: map[string]cpio.Record{}, - }, - }, - { - name: "add file that exists in Files", - af: &Files{ - Files: map[string]string{ - "bar/foo": "/some/other/place", - }, - }, - src: regularFile.Name(), - dest: "bar/foo", - result: &Files{ - Files: map[string]string{ - "bar/foo": "/some/other/place", - }, - }, - errContains: "already exists in archive", - }, - { - name: "add a file that exists in Records", - af: &Files{ - Records: map[string]cpio.Record{ - "bar/foo": cpio.Symlink("bar/foo", "/some/other/place"), - }, - }, - src: regularFile.Name(), - dest: "bar/foo", - result: &Files{ - Records: map[string]cpio.Record{ - "bar/foo": cpio.Symlink("bar/foo", "/some/other/place"), - }, - }, - errContains: "already exists in archive", - }, - { - name: "add a file that already exists in Files, but is the same one", - af: &Files{ - Files: map[string]string{ - "bar/foo": regularFile.Name(), - }, - }, - src: regularFile.Name(), - dest: "bar/foo", - result: &Files{ - Files: map[string]string{ - "bar/foo": regularFile.Name(), - }, - }, - }, - { - name: "absolute destination paths are made relative", - af: &Files{ - Files: map[string]string{}, - }, - src: dir, - dest: "/bar/foo", - result: &Files{ - Files: map[string]string{ - "bar/foo": dir, - "bar/foo/foo": filepath.Join(dir, "foo"), - "bar/foo/foo2": filepath.Join(dir, "foo2"), - }, - }, - }, - { - name: "add a directory", - af: &Files{ - Files: map[string]string{}, - }, - src: dir, - dest: "bar/foo", - result: &Files{ - Files: map[string]string{ - "bar/foo": dir, - "bar/foo/foo": filepath.Join(dir, "foo"), - "bar/foo/foo2": filepath.Join(dir, "foo2"), - }, - }, - }, - { - name: "add a different directory to the same destination, no overlapping children", - af: &Files{ - Files: map[string]string{ - "bar/foo": "/some/place/real", - "bar/foo/zed": "/some/place/real/zed", - }, - }, - src: dir, - dest: "bar/foo", - result: &Files{ - Files: map[string]string{ - "bar/foo": dir, - "bar/foo/foo": filepath.Join(dir, "foo"), - "bar/foo/foo2": filepath.Join(dir, "foo2"), - "bar/foo/zed": "/some/place/real/zed", - }, - }, - }, - { - name: "add a different directory to the same destination, overlapping children", - af: &Files{ - Files: map[string]string{ - "bar/foo": "/some/place/real", - "bar/foo/foo2": "/some/place/real/zed", - }, - }, - src: dir, - dest: "bar/foo", - errContains: "already exists in archive", - }, - } { - t.Run(fmt.Sprintf("Test %02d: %s", i, tt.name), func(t *testing.T) { - err := tt.af.AddFile(tt.src, tt.dest) - if err != nil && !strings.Contains(err.Error(), tt.errContains) { - t.Errorf("Error is %v, does not contain %v", err, tt.errContains) - } - if err == nil && len(tt.errContains) > 0 { - t.Errorf("Got no error, want %v", tt.errContains) - } - - if tt.result != nil && !reflect.DeepEqual(tt.af, tt.result) { - t.Errorf("got %v, want %v", tt.af, tt.result) - } - }) - } -} - -func TestFilesAddRecord(t *testing.T) { - for i, tt := range []struct { - af *Files - record cpio.Record - - result *Files - errContains string - }{ - { - af: NewFiles(), - record: cpio.Symlink("bar/foo", ""), - result: &Files{ - Files: map[string]string{}, - Records: map[string]cpio.Record{ - "bar/foo": cpio.Symlink("bar/foo", ""), - }, - }, - }, - { - af: &Files{ - Files: map[string]string{ - "bar/foo": "/some/other/place", - }, - }, - record: cpio.Symlink("bar/foo", ""), - result: &Files{ - Files: map[string]string{ - "bar/foo": "/some/other/place", - }, - }, - errContains: "already exists in archive", - }, - { - af: &Files{ - Records: map[string]cpio.Record{ - "bar/foo": cpio.Symlink("bar/foo", "/some/other/place"), - }, - }, - record: cpio.Symlink("bar/foo", ""), - result: &Files{ - Records: map[string]cpio.Record{ - "bar/foo": cpio.Symlink("bar/foo", "/some/other/place"), - }, - }, - errContains: "already exists in archive", - }, - { - af: &Files{ - Records: map[string]cpio.Record{ - "bar/foo": cpio.Symlink("bar/foo", "/some/other/place"), - }, - }, - record: cpio.Symlink("bar/foo", "/some/other/place"), - result: &Files{ - Records: map[string]cpio.Record{ - "bar/foo": cpio.Symlink("bar/foo", "/some/other/place"), - }, - }, - }, - { - record: cpio.Symlink("/bar/foo", ""), - errContains: "must not be absolute", - }, - } { - t.Run(fmt.Sprintf("Test %02d", i), func(t *testing.T) { - err := tt.af.AddRecord(tt.record) - if err != nil && !strings.Contains(err.Error(), tt.errContains) { - t.Errorf("Error is %v, does not contain %v", err, tt.errContains) - } - if err == nil && len(tt.errContains) > 0 { - t.Errorf("Got no error, want %v", tt.errContains) - } - - if !reflect.DeepEqual(tt.af, tt.result) { - t.Errorf("got %v, want %v", tt.af, tt.result) - } - }) - } -} - -func TestFilesfillInParent(t *testing.T) { - for i, tt := range []struct { - af *Files - result *Files - }{ - { - af: &Files{ - Records: map[string]cpio.Record{ - "foo/bar": cpio.Directory("foo/bar", 0o777), - }, - }, - result: &Files{ - Records: map[string]cpio.Record{ - "foo/bar": cpio.Directory("foo/bar", 0o777), - "foo": cpio.Directory("foo", 0o755), - }, - }, - }, - { - af: &Files{ - Files: map[string]string{ - "baz/baz/baz": "/somewhere", - }, - Records: map[string]cpio.Record{ - "foo/bar": cpio.Directory("foo/bar", 0o777), - }, - }, - result: &Files{ - Files: map[string]string{ - "baz/baz/baz": "/somewhere", - }, - Records: map[string]cpio.Record{ - "foo/bar": cpio.Directory("foo/bar", 0o777), - "foo": cpio.Directory("foo", 0o755), - "baz": cpio.Directory("baz", 0o755), - "baz/baz": cpio.Directory("baz/baz", 0o755), - }, - }, - }, - { - af: &Files{}, - result: &Files{}, - }, - } { - t.Run(fmt.Sprintf("Test %02d", i), func(t *testing.T) { - tt.af.fillInParents() - if !reflect.DeepEqual(tt.af, tt.result) { - t.Errorf("got %v, want %v", tt.af, tt.result) - } - }) - } -} - -type MockArchiver struct { - Records Records - FinishCalled bool - BaseArchive []cpio.Record -} - -func (ma *MockArchiver) WriteRecord(r cpio.Record) error { - if _, ok := ma.Records[r.Name]; ok { - return fmt.Errorf("file exists") - } - ma.Records[r.Name] = r - return nil -} - -func (ma *MockArchiver) Finish() error { - ma.FinishCalled = true - return nil -} - -func (ma *MockArchiver) ReadRecord() (cpio.Record, error) { - if len(ma.BaseArchive) > 0 { - next := ma.BaseArchive[0] - ma.BaseArchive = ma.BaseArchive[1:] - return next, nil - } - return cpio.Record{}, io.EOF -} - -type Records map[string]cpio.Record - -func RecordsEqual(r1, r2 Records, recordEqual func(cpio.Record, cpio.Record) bool) bool { - for name, s1 := range r1 { - s2, ok := r2[name] - if !ok { - return false - } - if !recordEqual(s1, s2) { - return false - } - } - for name := range r2 { - if _, ok := r1[name]; !ok { - return false - } - } - return true -} - -func sameNameModeContent(r1 cpio.Record, r2 cpio.Record) bool { - if r1.Name != r2.Name || r1.Mode != r2.Mode { - return false - } - return uio.ReaderAtEqual(r1.ReaderAt, r2.ReaderAt) -} - -func TestOptsWrite(t *testing.T) { - for i, tt := range []struct { - desc string - opts *Opts - ma *MockArchiver - want Records - err error - }{ - { - desc: "no conflicts, just records", - opts: &Opts{ - Files: &Files{ - Records: map[string]cpio.Record{ - "foo": cpio.Symlink("foo", "elsewhere"), - }, - }, - }, - ma: &MockArchiver{ - Records: make(Records), - BaseArchive: []cpio.Record{ - cpio.Directory("etc", 0o777), - cpio.Directory("etc/nginx", 0o777), - }, - }, - want: Records{ - "foo": cpio.Symlink("foo", "elsewhere"), - "etc": cpio.Directory("etc", 0o777), - "etc/nginx": cpio.Directory("etc/nginx", 0o777), - }, - }, - { - desc: "default already exists", - opts: &Opts{ - Files: &Files{ - Records: map[string]cpio.Record{ - "etc": cpio.Symlink("etc", "whatever"), - }, - }, - }, - ma: &MockArchiver{ - Records: make(Records), - BaseArchive: []cpio.Record{ - cpio.Directory("etc", 0o777), - }, - }, - want: Records{ - "etc": cpio.Symlink("etc", "whatever"), - }, - }, - { - desc: "no conflicts, missing parent automatically created", - opts: &Opts{ - Files: &Files{ - Records: map[string]cpio.Record{ - "foo/bar/baz": cpio.Symlink("foo/bar/baz", "elsewhere"), - }, - }, - }, - ma: &MockArchiver{ - Records: make(Records), - }, - want: Records{ - "foo": cpio.Directory("foo", 0o755), - "foo/bar": cpio.Directory("foo/bar", 0o755), - "foo/bar/baz": cpio.Symlink("foo/bar/baz", "elsewhere"), - }, - }, - { - desc: "parent only automatically created if not already exists", - opts: &Opts{ - Files: &Files{ - Records: map[string]cpio.Record{ - "foo/bar": cpio.Directory("foo/bar", 0o444), - "foo/bar/baz": cpio.Symlink("foo/bar/baz", "elsewhere"), - }, - }, - }, - ma: &MockArchiver{ - Records: make(Records), - }, - want: Records{ - "foo": cpio.Directory("foo", 0o755), - "foo/bar": cpio.Directory("foo/bar", 0o444), - "foo/bar/baz": cpio.Symlink("foo/bar/baz", "elsewhere"), - }, - }, - { - desc: "base archive", - opts: &Opts{ - Files: &Files{ - Records: map[string]cpio.Record{ - "foo/bar": cpio.Symlink("foo/bar", "elsewhere"), - "exists": cpio.Directory("exists", 0o777), - }, - }, - }, - ma: &MockArchiver{ - Records: make(Records), - BaseArchive: []cpio.Record{ - cpio.Directory("etc", 0o755), - cpio.Directory("foo", 0o444), - cpio.Directory("exists", 0), - }, - }, - want: Records{ - "etc": cpio.Directory("etc", 0o755), - "exists": cpio.Directory("exists", 0o777), - "foo": cpio.Directory("foo", 0o444), - "foo/bar": cpio.Symlink("foo/bar", "elsewhere"), - }, - }, - { - desc: "base archive with init, no user init", - opts: &Opts{ - Files: &Files{ - Records: map[string]cpio.Record{}, - }, - }, - ma: &MockArchiver{ - Records: make(Records), - BaseArchive: []cpio.Record{ - cpio.StaticFile("init", "boo", 0o555), - }, - }, - want: Records{ - "init": cpio.StaticFile("init", "boo", 0o555), - }, - }, - { - desc: "base archive with init and user init", - opts: &Opts{ - Files: &Files{ - Records: map[string]cpio.Record{ - "init": cpio.StaticFile("init", "bar", 0o444), - }, - }, - }, - ma: &MockArchiver{ - Records: make(Records), - BaseArchive: []cpio.Record{ - cpio.StaticFile("init", "boo", 0o555), - }, - }, - want: Records{ - "init": cpio.StaticFile("init", "bar", 0o444), - "inito": cpio.StaticFile("inito", "boo", 0o555), - }, - }, - { - desc: "base archive with init, use existing init", - opts: &Opts{ - Files: &Files{ - Records: map[string]cpio.Record{}, - }, - UseExistingInit: true, - }, - ma: &MockArchiver{ - Records: make(Records), - BaseArchive: []cpio.Record{ - cpio.StaticFile("init", "boo", 0o555), - }, - }, - want: Records{ - "init": cpio.StaticFile("init", "boo", 0o555), - }, - }, - { - desc: "base archive with init and user init, use existing init", - opts: &Opts{ - Files: &Files{ - Records: map[string]cpio.Record{ - "init": cpio.StaticFile("init", "huh", 0o111), - }, - }, - UseExistingInit: true, - }, - ma: &MockArchiver{ - Records: make(Records), - BaseArchive: []cpio.Record{ - cpio.StaticFile("init", "boo", 0o555), - }, - }, - want: Records{ - "init": cpio.StaticFile("init", "boo", 0o555), - "inito": cpio.StaticFile("inito", "huh", 0o111), - }, - }, - } { - t.Run(fmt.Sprintf("Test %02d (%s)", i, tt.desc), func(t *testing.T) { - tt.opts.BaseArchive = tt.ma - tt.opts.OutputFile = tt.ma - - if err := Write(tt.opts); err != tt.err { - t.Errorf("Write() = %v, want %v", err, tt.err) - } else if err == nil && !tt.ma.FinishCalled { - t.Errorf("Finish wasn't called on archive") - } - - if !RecordsEqual(tt.ma.Records, tt.want, sameNameModeContent) { - t.Errorf("Write() = %v, want %v", tt.ma.Records, tt.want) - } - }) - } -} diff --git a/pkg/uroot/test/gopath1/src/foo/foo.go b/pkg/uroot/test/gopath1/src/foo/foo.go deleted file mode 100644 index 5e16ecd571..0000000000 --- a/pkg/uroot/test/gopath1/src/foo/foo.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func main() { - -} diff --git a/pkg/uroot/test/gopath1/src/os/os.go b/pkg/uroot/test/gopath1/src/os/os.go deleted file mode 100644 index 5e16ecd571..0000000000 --- a/pkg/uroot/test/gopath1/src/os/os.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func main() { - -} diff --git a/pkg/uroot/test/gopath2/src/mypkga/mypkga.go b/pkg/uroot/test/gopath2/src/mypkga/mypkga.go deleted file mode 100644 index 5e16ecd571..0000000000 --- a/pkg/uroot/test/gopath2/src/mypkga/mypkga.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func main() { - -} diff --git a/pkg/uroot/test/gopath2/src/mypkgb/mypkgb.go b/pkg/uroot/test/gopath2/src/mypkgb/mypkgb.go deleted file mode 100644 index 5e16ecd571..0000000000 --- a/pkg/uroot/test/gopath2/src/mypkgb/mypkgb.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func main() { - -} diff --git a/pkg/uroot/uroot.go b/pkg/uroot/uroot.go deleted file mode 100644 index 1994352559..0000000000 --- a/pkg/uroot/uroot.go +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright 2015-2017 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uroot creates root file systems from Go programs. -// -// uroot will appropriately compile the Go programs, create symlinks for their -// names, and assemble an initramfs with additional files as specified. -package uroot - -import ( - "debug/elf" - "fmt" - "os" - "path" - "path/filepath" - "strings" - - "github.com/u-root/gobusybox/src/pkg/bb/findpkg" - gbbgolang "github.com/u-root/gobusybox/src/pkg/golang" - "github.com/u-root/u-root/pkg/cpio" - "github.com/u-root/u-root/pkg/ldd" - "github.com/u-root/u-root/pkg/uflag" - "github.com/u-root/u-root/pkg/ulog" - "github.com/u-root/u-root/pkg/uroot/builder" - "github.com/u-root/u-root/pkg/uroot/initramfs" -) - -// These constants are used in DefaultRamfs. -const ( - // This is the literal timezone file for GMT-0. Given that we have no - // idea where we will be running, GMT seems a reasonable guess. If it - // matters, setup code should download and change this to something - // else. - gmt0 = "TZif2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00GMT\x00\x00\x00TZif2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x04\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00GMT\x00\x00\x00\nGMT0\n" - - nameserver = "nameserver 8.8.8.8\n" -) - -// DefaultRamRamfs returns a cpio.Archive for the target OS. -// If an OS is not known it will return a reasonable u-root specific -// default. -func DefaultRamfs() *cpio.Archive { - switch gbbgolang.Default().GOOS { - case "linux": - return cpio.ArchiveFromRecords([]cpio.Record{ - cpio.Directory("bin", 0o755), - cpio.Directory("dev", 0o755), - cpio.Directory("env", 0o755), - cpio.Directory("etc", 0o755), - cpio.Directory("lib64", 0o755), - cpio.Directory("proc", 0o755), - cpio.Directory("sys", 0o755), - cpio.Directory("tcz", 0o755), - cpio.Directory("tmp", 0o777), - cpio.Directory("ubin", 0o755), - cpio.Directory("usr", 0o755), - cpio.Directory("usr/lib", 0o755), - cpio.Directory("var/log", 0o777), - cpio.CharDev("dev/console", 0o600, 5, 1), - cpio.CharDev("dev/tty", 0o666, 5, 0), - cpio.CharDev("dev/null", 0o666, 1, 3), - cpio.CharDev("dev/port", 0o640, 1, 4), - cpio.CharDev("dev/urandom", 0o666, 1, 9), - cpio.StaticFile("etc/resolv.conf", nameserver, 0o644), - cpio.StaticFile("etc/localtime", gmt0, 0o644), - }) - default: - return cpio.ArchiveFromRecords([]cpio.Record{ - cpio.Directory("ubin", 0o755), - cpio.Directory("bbin", 0o755), - }) - } -} - -// Commands specifies a list of Golang packages to build with a builder, e.g. -// in busybox mode, source mode, or binary mode. -// -// See Builder for an explanation of build modes. -type Commands struct { - // Builder is the Go compiler mode. - Builder builder.Builder - - // Packages are the Go commands to include (compiled or otherwise) and - // add to the archive. - // - // Currently allowed formats: - // - // - package imports; e.g. github.com/u-root/u-root/cmds/ls - // - globs of package imports; e.g. github.com/u-root/u-root/cmds/* - // - paths to package directories; e.g. $GOPATH/src/github.com/u-root/u-root/cmds/ls - // - globs of paths to package directories; e.g. ./cmds/* - // - // Directories may be relative or absolute, with or without globs. - // Globs are resolved using filepath.Glob. - Packages []string - - // BinaryDir is the directory in which the resulting binaries are - // placed inside the initramfs. - // - // BinaryDir may be empty, in which case Builder.DefaultBinaryDir() - // will be used. - BinaryDir string -} - -// TargetDir returns the initramfs binary directory for these Commands. -func (c Commands) TargetDir() string { - if len(c.BinaryDir) != 0 { - return c.BinaryDir - } - return c.Builder.DefaultBinaryDir() -} - -// Opts are the arguments to CreateInitramfs. -// -// Opts contains everything that influences initramfs creation such as the Go -// build environment. -type Opts struct { - // Env is the Golang build environment (GOOS, GOARCH, etc). - // - // If nil, gbbgolang.Default is used. - Env *gbbgolang.Environ - - // Commands specify packages to build using a specific builder. - // - // E.g. the following will build 'ls' and 'ip' in busybox mode, but - // 'cd' and 'cat' as separate binaries. 'cd', 'cat', 'bb', and symlinks - // from 'ls' and 'ip' will be added to the final initramfs. - // - // []Commands{ - // Commands{ - // Builder: builder.BusyBox, - // Packages: []string{ - // "github.com/u-root/u-root/cmds/ls", - // "github.com/u-root/u-root/cmds/ip", - // }, - // }, - // Commands{ - // Builder: builder.Binary, - // Packages: []string{ - // "github.com/u-root/u-root/cmds/cd", - // "github.com/u-root/u-root/cmds/cat", - // }, - // }, - // } - Commands []Commands - - // UrootSource is the filesystem path to the locally checked out - // u-root source tree. This is needed to resolve templates or - // import paths of u-root commands. - UrootSource string - - // TempDir is a temporary directory for builders to store files in. - TempDir string - - // ExtraFiles are files to add to the archive in addition to the Go - // packages. - // - // Shared library dependencies will automatically also be added to the - // archive using ldd, unless SkipLDD (below) is true. - // - // The following formats are allowed in the list: - // - // - "/home/chrisko/foo:root/bar" adds the file from absolute path - // /home/chrisko/foo on the host at the relative root/bar in the - // archive. - // - "/home/foo" is equivalent to "/home/foo:home/foo". - ExtraFiles []string - - // If true, do not use ldd to pick up dependencies from local machine for - // ExtraFiles. Useful if you have all deps revision controlled and wish to - // ensure builds are repeatable, and/or if the local machine's binaries use - // instructions unavailable on the emulated cpu. - // - // If you turn this on but do not manually list all deps, affected binaries - // will misbehave. - SkipLDD bool - - // OutputFile is the archive output file. - OutputFile initramfs.Writer - - // BaseArchive is an existing initramfs to include in the resulting - // initramfs. - BaseArchive initramfs.Reader - - // UseExistingInit determines whether the existing init from - // BaseArchive should be used. - // - // If this is false, the "init" from BaseArchive will be renamed to - // "inito" (init-original). - UseExistingInit bool - - // InitCmd is the name of a command to link /init to. - // - // This can be an absolute path or the name of a command included in - // Commands. - // - // If this is empty, no init symlink will be created, but a user may - // still specify a command called init or include an /init file. - InitCmd string - - // UinitCmd is the name of a command to link /bin/uinit to. - // - // This can be an absolute path or the name of a command included in - // Commands. - // - // The u-root init will always attempt to fork/exec a uinit program, - // and append arguments from both the kernel command-line - // (uroot.uinitargs) as well as specified in UinitArgs. - // - // If this is empty, no uinit symlink will be created, but a user may - // still specify a command called uinit or include a /bin/uinit file. - UinitCmd string - - // UinitArgs are the arguments passed to /bin/uinit. - UinitArgs []string - - // DefaultShell is the default shell to start after init. - // - // This can be an absolute path or the name of a command included in - // Commands. - // - // This must be specified to have a default shell. - DefaultShell string - - // Build options for building go binaries. Ultimate this holds all the - // args that end up being passed to `go build`. - BuildOpts *gbbgolang.BuildOpts -} - -// CreateInitramfs creates an initramfs built to opts' specifications. -func CreateInitramfs(logger ulog.Logger, opts Opts) error { - if _, err := os.Stat(opts.TempDir); os.IsNotExist(err) { - return fmt.Errorf("temp dir %q must exist: %v", opts.TempDir, err) - } - if opts.OutputFile == nil { - return fmt.Errorf("must give output file") - } - - env := gbbgolang.Default() - if opts.Env != nil { - env = opts.Env - } - if opts.BuildOpts == nil { - opts.BuildOpts = &gbbgolang.BuildOpts{} - } - - files := initramfs.NewFiles() - - lookupEnv := findpkg.DefaultEnv() - if opts.UrootSource != "" { - lookupEnv.URootSource = opts.UrootSource - } - - // Expand commands. - for index, cmds := range opts.Commands { - paths, err := findpkg.ResolveGlobs(logger, env, lookupEnv, cmds.Packages) - if err != nil { - return err - } - opts.Commands[index].Packages = paths - } - - // Add each build mode's commands to the archive. - for _, cmds := range opts.Commands { - builderTmpDir, err := os.MkdirTemp(opts.TempDir, "builder") - if err != nil { - return err - } - - // Build packages. - bOpts := builder.Opts{ - Env: env, - BuildOpts: opts.BuildOpts, - Packages: cmds.Packages, - TempDir: builderTmpDir, - BinaryDir: cmds.TargetDir(), - } - if err := cmds.Builder.Build(logger, files, bOpts); err != nil { - return fmt.Errorf("error building: %v", err) - } - } - - // Open the target initramfs file. - archive := &initramfs.Opts{ - Files: files, - OutputFile: opts.OutputFile, - BaseArchive: opts.BaseArchive, - UseExistingInit: opts.UseExistingInit, - } - if err := ParseExtraFiles(logger, archive.Files, opts.ExtraFiles, !opts.SkipLDD); err != nil { - return err - } - if err := opts.addSymlinkTo(logger, archive, opts.UinitCmd, "bin/uinit"); err != nil { - return fmt.Errorf("%v: specify -uinitcmd=\"\" to ignore this error and build without a uinit", err) - } - if len(opts.UinitArgs) > 0 { - if err := archive.AddRecord(cpio.StaticFile("etc/uinit.flags", uflag.ArgvToFile(opts.UinitArgs), 0o444)); err != nil { - return fmt.Errorf("%v: could not add uinit arguments from UinitArgs (-uinitcmd) to initramfs", err) - } - } - if err := opts.addSymlinkTo(logger, archive, opts.InitCmd, "init"); err != nil { - return fmt.Errorf("%v: specify -initcmd=\"\" to ignore this error and build without an init (or, did you specify a list, and are you missing github.com/u-root/u-root/cmds/core/init?)", err) - } - if err := opts.addSymlinkTo(logger, archive, opts.DefaultShell, "bin/sh"); err != nil { - return fmt.Errorf("%v: specify -defaultsh=\"\" to ignore this error and build without a shell", err) - } - if err := opts.addSymlinkTo(logger, archive, opts.DefaultShell, "bin/defaultsh"); err != nil { - return fmt.Errorf("%v: specify -defaultsh=\"\" to ignore this error and build without a shell", err) - } - - // Finally, write the archive. - if err := initramfs.Write(archive); err != nil { - return fmt.Errorf("error archiving: %v", err) - } - return nil -} - -func (o *Opts) addSymlinkTo(logger ulog.Logger, archive *initramfs.Opts, command string, source string) error { - if len(command) == 0 { - return nil - } - - target, err := resolveCommandOrPath(command, o.Commands) - if err != nil { - if o.Commands != nil { - return fmt.Errorf("could not create symlink from %q to %q: %v", source, command, err) - } - logger.Printf("Could not create symlink from %q to %q: %v", source, command, err) - return nil - } - - // Make a relative symlink from /source -> target - // - // E.g. bin/defaultsh -> target, so you need to - // filepath.Rel(/bin, target) since relative symlinks are - // evaluated from their PARENT directory. - relTarget, err := filepath.Rel(filepath.Join("/", filepath.Dir(source)), target) - if err != nil { - return err - } - - if err := archive.AddRecord(cpio.Symlink(source, relTarget)); err != nil { - return fmt.Errorf("failed to add symlink %s -> %s to initramfs: %v", source, relTarget, err) - } - return nil -} - -func resolveCommandOrPath(cmd string, cmds []Commands) (string, error) { - if strings.ContainsRune(cmd, filepath.Separator) { - return cmd, nil - } - - // Each build mode has its own binary dir (/bbin or /bin or /ubin). - // - // Figure out which build mode the shell is in, and symlink to that - // build mode. - for _, c := range cmds { - for _, p := range c.Packages { - if name := path.Base(p); name == cmd { - return path.Join("/", c.TargetDir(), cmd), nil - } - } - } - - return "", fmt.Errorf("command or path %q not included in u-root build", cmd) -} - -// ParseExtraFiles adds files from the extraFiles list to the archive. -// -// The following formats are allowed in the extraFiles list: -// -// - "/home/chrisko/foo:root/bar" adds the file from absolute path -// /home/chrisko/foo on the host at the relative root/bar in the -// archive. -// - "/home/foo" is equivalent to "/home/foo:home/foo". -// -// ParseExtraFiles will also add ldd-listed dependencies if lddDeps is true. -func ParseExtraFiles(logger ulog.Logger, archive *initramfs.Files, extraFiles []string, lddDeps bool) error { - var err error - // Add files from command line. - for _, file := range extraFiles { - var src, dst string - parts := strings.SplitN(file, ":", 2) - if len(parts) == 2 { - // treat the entry with the new src:dst syntax - src = filepath.Clean(parts[0]) - dst = filepath.Clean(parts[1]) - } else { - // plain old syntax - // filepath.Clean interprets an empty string as CWD for no good reason. - if len(file) == 0 { - continue - } - src = filepath.Clean(file) - dst = src - if filepath.IsAbs(dst) { - dst, err = filepath.Rel("/", dst) - if err != nil { - return fmt.Errorf("cannot make path relative to /: %v: %v", dst, err) - } - } - } - src, err := filepath.Abs(src) - if err != nil { - return fmt.Errorf("couldn't find absolute path for %q: %v", src, err) - } - if err := archive.AddFile(src, dst); err != nil { - return fmt.Errorf("couldn't add %q to archive: %v", file, err) - } - - if lddDeps { - // Users are frequently naming directories now, not just files. - // Hence we must use walk here, not just check the one file. - if err := filepath.Walk(src, func(name string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - return nil - } - // Try to open it as an ELF. If that fails, we can skip the ldd - // step. The file will still be included from above. - f, err := elf.Open(name) - if err != nil { - return nil - } - if err = f.Close(); err != nil { - logger.Printf("WARNING: Closing ELF file %q: %v", name, err) - } - // Pull dependencies in the case of binaries. If `path` is not - // a binary, `libs` will just be empty. - libs, err := ldd.FList(name) - if err != nil { - return fmt.Errorf("WARNING: couldn't add ldd dependencies for %q: %v", name, err) - } - for _, lib := range libs { - if err := archive.AddFileNoFollow(lib, lib[1:]); err != nil { - logger.Printf("WARNING: couldn't add ldd dependencies for %q: %v", lib, err) - } - } - return nil - }); err != nil { - logger.Printf("Getting dependencies for %q: %v", src, err) - } - } - } - return nil -} - -// AddCommands adds commands to the build. -func (o *Opts) AddCommands(c ...Commands) { - o.Commands = append(o.Commands, c...) -} - -func (o *Opts) AddBusyBoxCommands(pkgs ...string) { - for i, cmds := range o.Commands { - if cmds.Builder == builder.BusyBox { - o.Commands[i].Packages = append(cmds.Packages, pkgs...) - return - } - } - - // Not found? Add first busybox. - o.AddCommands(BusyBoxCmds(pkgs...)...) -} - -// BinaryCmds returns a list of Commands with cmds built as a busybox. -func BinaryCmds(cmds ...string) []Commands { - if len(cmds) == 0 { - return nil - } - return []Commands{ - { - Builder: builder.Binary, - Packages: cmds, - }, - } -} - -// BusyBoxCmds returns a list of Commands with cmds built as a busybox. -func BusyBoxCmds(cmds ...string) []Commands { - if len(cmds) == 0 { - return nil - } - return []Commands{ - { - Builder: builder.BusyBox, - Packages: cmds, - }, - } -} diff --git a/pkg/uroot/uroot_test.go b/pkg/uroot/uroot_test.go deleted file mode 100644 index f4eb5f144e..0000000000 --- a/pkg/uroot/uroot_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uroot - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "testing" - - "github.com/u-root/gobusybox/src/pkg/golang" - "github.com/u-root/u-root/pkg/cpio" - "github.com/u-root/u-root/pkg/ulog/ulogtest" - "github.com/u-root/u-root/pkg/uroot/builder" - itest "github.com/u-root/u-root/pkg/uroot/initramfs/test" -) - -type inMemArchive struct { - *cpio.Archive -} - -// Finish implements initramfs.Writer.Finish. -func (inMemArchive) Finish() error { return nil } - -func TestCreateInitramfs(t *testing.T) { - dir := t.TempDir() - syscall.Umask(0) - - urootpath, err := filepath.Abs("../../") - if err != nil { - t.Fatalf("failure to set up test: %v", err) - } - - tmp777 := filepath.Join(dir, "tmp777") - if err := os.MkdirAll(tmp777, 0o777); err != nil { - t.Error(err) - } - - l := ulogtest.Logger{TB: t} - - for i, tt := range []struct { - name string - opts Opts - want string - validators []itest.ArchiveValidator - }{ - { - name: "BB archive with ls and init", - opts: Opts{ - Env: golang.Default(golang.DisableCGO()), - TempDir: dir, - ExtraFiles: nil, - UseExistingInit: false, - InitCmd: "init", - DefaultShell: "ls", - UrootSource: urootpath, - Commands: []Commands{ - { - Builder: builder.BusyBox, - Packages: []string{ - "github.com/u-root/u-root/cmds/core/init", - "github.com/u-root/u-root/cmds/core/ls", - }, - }, - }, - }, - want: "", - validators: []itest.ArchiveValidator{ - itest.HasFile{"bbin/bb"}, - itest.HasRecord{cpio.Symlink("bbin/init", "bb")}, - itest.HasRecord{cpio.Symlink("bbin/ls", "bb")}, - itest.HasRecord{cpio.Symlink("bin/defaultsh", "../bbin/ls")}, - itest.HasRecord{cpio.Symlink("bin/sh", "../bbin/ls")}, - }, - }, - { - name: "no temp dir", - opts: Opts{ - InitCmd: "init", - DefaultShell: "", - }, - want: "temp dir \"\" must exist: stat : no such file or directory", - validators: []itest.ArchiveValidator{ - itest.IsEmpty{}, - }, - }, - { - name: "no commands", - opts: Opts{ - TempDir: dir, - }, - want: "", - validators: []itest.ArchiveValidator{ - itest.MissingFile{"bbin/bb"}, - }, - }, - { - name: "init specified, but not in commands", - opts: Opts{ - Env: golang.Default(golang.DisableCGO()), - TempDir: dir, - DefaultShell: "zoocar", - InitCmd: "foobar", - UrootSource: urootpath, - Commands: []Commands{ - { - Builder: builder.Binary, - Packages: []string{ - "github.com/u-root/u-root/cmds/core/ls", - }, - }, - }, - }, - want: "could not create symlink from \"init\" to \"foobar\": command or path \"foobar\" not included in u-root build: specify -initcmd=\"\" to ignore this error and build without an init (or, did you specify a list, and are you missing github.com/u-root/u-root/cmds/core/init?)", - validators: []itest.ArchiveValidator{ - itest.IsEmpty{}, - }, - }, - { - name: "init symlinked to absolute path", - opts: Opts{ - TempDir: dir, - InitCmd: "/bin/systemd", - }, - want: "", - validators: []itest.ArchiveValidator{ - itest.HasRecord{cpio.Symlink("init", "bin/systemd")}, - }, - }, - { - name: "multi-mode archive", - opts: Opts{ - Env: golang.Default(golang.DisableCGO()), - TempDir: dir, - ExtraFiles: nil, - UseExistingInit: false, - InitCmd: "init", - DefaultShell: "ls", - UrootSource: urootpath, - Commands: []Commands{ - { - Builder: builder.BusyBox, - Packages: []string{ - "github.com/u-root/u-root/cmds/core/init", - "github.com/u-root/u-root/cmds/core/ls", - }, - }, - { - Builder: builder.Binary, - Packages: []string{ - "github.com/u-root/u-root/cmds/core/cp", - "github.com/u-root/u-root/cmds/core/dd", - }, - }, - }, - }, - want: "", - validators: []itest.ArchiveValidator{ - itest.HasRecord{cpio.Symlink("init", "bbin/init")}, - - // bb mode. - itest.HasFile{"bbin/bb"}, - itest.HasRecord{cpio.Symlink("bbin/init", "bb")}, - itest.HasRecord{cpio.Symlink("bbin/ls", "bb")}, - itest.HasRecord{cpio.Symlink("bin/defaultsh", "../bbin/ls")}, - itest.HasRecord{cpio.Symlink("bin/sh", "../bbin/ls")}, - - // binary mode. - itest.HasFile{"bin/cp"}, - itest.HasFile{"bin/dd"}, - }, - }, - } { - t.Run(fmt.Sprintf("Test %d [%s]", i, tt.name), func(t *testing.T) { - archive := inMemArchive{cpio.InMemArchive()} - tt.opts.OutputFile = archive - // Compare error type or error string. - if err := CreateInitramfs(l, tt.opts); (err != nil && err.Error() != tt.want) || (len(tt.want) > 0 && err == nil) { - t.Errorf("CreateInitramfs(%v) = %v, want %v", tt.opts, err, tt.want) - } - - for _, v := range tt.validators { - if err := v.Validate(archive.Archive); err != nil { - t.Errorf("validator failed: %v / archive:\n%s", err, archive) - } - } - }) - } -} diff --git a/templates.go b/templates.go deleted file mode 100644 index a517e67097..0000000000 --- a/templates.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2015-2020 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -// TODO: make templates able to include other templates. -// e.g. "all" below should just say "core" and "boot". Use it to replace -// the old 'systemboot' template. -// Or just call it a day, now that we have the new directory structure, and dump the templates -// completely; that may be our best bet. -var templates = map[string][]string{ - "all": { - "github.com/u-root/u-root/cmds/core/*", - "github.com/u-root/u-root/cmds/boot/*boot*", - }, - "boot": { - "github.com/u-root/u-root/cmds/boot/*boot*", - }, - // Absolutely everything, including experimental commands. - "world": { - "github.com/u-root/u-root/cmds/*/*", - }, - // Core should be things you don't want to live without. - "core": { - "github.com/u-root/u-root/cmds/core/*", - }, - // Minimal should be things you can't live without. - "minimal": { - "github.com/u-root/u-root/cmds/core/blkid", - "github.com/u-root/u-root/cmds/core/cat", - "github.com/u-root/u-root/cmds/core/chmod", - "github.com/u-root/u-root/cmds/core/cmp", - "github.com/u-root/u-root/cmds/core/cp", - "github.com/u-root/u-root/cmds/core/date", - "github.com/u-root/u-root/cmds/core/dd", - "github.com/u-root/u-root/cmds/core/df", - "github.com/u-root/u-root/cmds/core/dhclient", - "github.com/u-root/u-root/cmds/core/dmesg", - "github.com/u-root/u-root/cmds/core/echo", - "github.com/u-root/u-root/cmds/core/find", - "github.com/u-root/u-root/cmds/core/free", - "github.com/u-root/u-root/cmds/core/gosh", - "github.com/u-root/u-root/cmds/core/gpgv", - "github.com/u-root/u-root/cmds/core/grep", - "github.com/u-root/u-root/cmds/core/gzip", - "github.com/u-root/u-root/cmds/core/hostname", - "github.com/u-root/u-root/cmds/core/id", - "github.com/u-root/u-root/cmds/core/init", - "github.com/u-root/u-root/cmds/core/insmod", - "github.com/u-root/u-root/cmds/core/io", - "github.com/u-root/u-root/cmds/core/ip", - "github.com/u-root/u-root/cmds/core/kexec", - "github.com/u-root/u-root/cmds/core/kill", - "github.com/u-root/u-root/cmds/core/ln", - "github.com/u-root/u-root/cmds/core/losetup", - "github.com/u-root/u-root/cmds/core/ls", - "github.com/u-root/u-root/cmds/core/lsmod", - "github.com/u-root/u-root/cmds/core/mkdir", - "github.com/u-root/u-root/cmds/core/mknod", - "github.com/u-root/u-root/cmds/core/mount", - "github.com/u-root/u-root/cmds/core/msr", - "github.com/u-root/u-root/cmds/core/mv", - "github.com/u-root/u-root/cmds/core/pci", - "github.com/u-root/u-root/cmds/core/ping", - "github.com/u-root/u-root/cmds/core/printenv", - "github.com/u-root/u-root/cmds/core/ps", - "github.com/u-root/u-root/cmds/core/pwd", - "github.com/u-root/u-root/cmds/core/readlink", - "github.com/u-root/u-root/cmds/core/rm", - "github.com/u-root/u-root/cmds/core/rmmod", - "github.com/u-root/u-root/cmds/core/seq", - "github.com/u-root/u-root/cmds/core/shutdown", - "github.com/u-root/u-root/cmds/core/sleep", - "github.com/u-root/u-root/cmds/core/sync", - "github.com/u-root/u-root/cmds/core/tail", - "github.com/u-root/u-root/cmds/core/tee", - "github.com/u-root/u-root/cmds/core/truncate", - "github.com/u-root/u-root/cmds/core/umount", - "github.com/u-root/u-root/cmds/core/uname", - "github.com/u-root/u-root/cmds/core/unshare", - "github.com/u-root/u-root/cmds/core/wc", - "github.com/u-root/u-root/cmds/core/wget", - "github.com/u-root/u-root/cmds/core/which", - }, - // embedded systems, like ARM based gadgets and SBCs - "embedded": { - "github.com/u-root/u-root/cmds/core/cat", - "github.com/u-root/u-root/cmds/core/cp", - "github.com/u-root/u-root/cmds/core/dd", - "github.com/u-root/u-root/cmds/core/dhclient", - "github.com/u-root/u-root/cmds/core/dmesg", - "github.com/u-root/u-root/cmds/core/echo", - "github.com/u-root/u-root/cmds/core/free", - "github.com/u-root/u-root/cmds/core/gosh", - "github.com/u-root/u-root/cmds/core/grep", - "github.com/u-root/u-root/cmds/core/init", - "github.com/u-root/u-root/cmds/core/insmod", - "github.com/u-root/u-root/cmds/core/ip", - "github.com/u-root/u-root/cmds/core/kexec", - "github.com/u-root/u-root/cmds/core/ln", - "github.com/u-root/u-root/cmds/core/ls", - "github.com/u-root/u-root/cmds/core/mkdir", - "github.com/u-root/u-root/cmds/core/mount", - "github.com/u-root/u-root/cmds/core/netcat", - "github.com/u-root/u-root/cmds/core/ping", - "github.com/u-root/u-root/cmds/core/rm", - "github.com/u-root/u-root/cmds/core/rmmod", - "github.com/u-root/u-root/cmds/core/shutdown", - "github.com/u-root/u-root/cmds/core/tail", - "github.com/u-root/u-root/cmds/core/tee", - "github.com/u-root/u-root/cmds/core/uname", - "github.com/u-root/u-root/cmds/core/wget", - }, - // coreboot-app minimal environment - "coreboot-app": { - "github.com/u-root/u-root/cmds/core/cat", - "github.com/u-root/u-root/cmds/exp/cbmem", - "github.com/u-root/u-root/cmds/core/chroot", - "github.com/u-root/u-root/cmds/core/cp", - "github.com/u-root/u-root/cmds/core/dd", - "github.com/u-root/u-root/cmds/core/dhclient", - "github.com/u-root/u-root/cmds/core/dmesg", - "github.com/u-root/u-root/cmds/core/find", - "github.com/u-root/u-root/cmds/core/gosh", - "github.com/u-root/u-root/cmds/core/grep", - "github.com/u-root/u-root/cmds/core/id", - "github.com/u-root/u-root/cmds/core/init", - "github.com/u-root/u-root/cmds/core/insmod", - "github.com/u-root/u-root/cmds/core/ip", - "github.com/u-root/u-root/cmds/core/kill", - "github.com/u-root/u-root/cmds/core/ls", - "github.com/u-root/u-root/cmds/core/mount", - "github.com/u-root/u-root/cmds/core/pci", - "github.com/u-root/u-root/cmds/core/ping", - "github.com/u-root/u-root/cmds/core/ps", - "github.com/u-root/u-root/cmds/core/pwd", - "github.com/u-root/u-root/cmds/core/rm", - "github.com/u-root/u-root/cmds/core/rmmod", - "github.com/u-root/u-root/cmds/core/shutdown", - "github.com/u-root/u-root/cmds/core/sshd", - "github.com/u-root/u-root/cmds/core/switch_root", - "github.com/u-root/u-root/cmds/core/tail", - "github.com/u-root/u-root/cmds/core/tee", - "github.com/u-root/u-root/cmds/core/uname", - "github.com/u-root/u-root/cmds/core/wget", - }, - "plan9": { - "github.com/u-root/u-root/cmds/core/*", - }, - // For a command to be uncommented, - // tinygo test should work on linux and darwin. - "tinygo": { - "github.com/u-root/u-root/cmds/core/backoff", - "github.com/u-root/u-root/cmds/core/base64", - "github.com/u-root/u-root/cmds/core/basename", - ////"github.com/u-root/u-root/cmds/core/bind", - //"github.com/u-root/u-root/cmds/core/blkid", - "github.com/u-root/u-root/cmds/core/cat", - "github.com/u-root/u-root/cmds/core/chmod", - //"github.com/u-root/u-root/cmds/core/chroot", credentials problem - "github.com/u-root/u-root/cmds/core/cmp", - "github.com/u-root/u-root/cmds/core/comm", - "github.com/u-root/u-root/cmds/core/cp", - //////"github.com/u-root/u-root/cmds/core/cpio", build constraints problem - "github.com/u-root/u-root/cmds/core/date", - "github.com/u-root/u-root/cmds/core/dd", - //////"github.com/u-root/u-root/cmds/core/df", - //////"github.com/u-root/u-root/cmds/core/dhclient", - "github.com/u-root/u-root/cmds/core/dirname", - //////"github.com/u-root/u-root/cmds/core/dmesg", - "github.com/u-root/u-root/cmds/core/echo", - //////"github.com/u-root/u-root/cmds/core/elvish", - "github.com/u-root/u-root/cmds/core/false", - //"github.com/u-root/u-root/cmds/core/find", - "github.com/u-root/u-root/cmds/core/free", - //"github.com/u-root/u-root/cmds/core/fusermount", - //////"github.com/u-root/u-root/cmds/core/gosh", - //"github.com/u-root/u-root/cmds/core/gpgv", - "github.com/u-root/u-root/cmds/core/gpt", - "github.com/u-root/u-root/cmds/core/grep", - //"github.com/u-root/u-root/cmds/core/gzip", - "github.com/u-root/u-root/cmds/core/hexdump", - //////"github.com/u-root/u-root/cmds/core/hostname", - //"github.com/u-root/u-root/cmds/core/hwclock", - //"github.com/u-root/u-root/cmds/core/id", - //////"github.com/u-root/u-root/cmds/core/init", - //"github.com/u-root/u-root/cmds/core/insmod", - //////"github.com/u-root/u-root/cmds/core/io", - //"github.com/u-root/u-root/cmds/core/ip", - //"github.com/u-root/u-root/cmds/core/kexec", - "github.com/u-root/u-root/cmds/core/kill", - //"github.com/u-root/u-root/cmds/core/lddfiles", - //"github.com/u-root/u-root/cmds/core/ln", - //"github.com/u-root/u-root/cmds/core/lockmsrs", - //"github.com/u-root/u-root/cmds/core/losetup", - "github.com/u-root/u-root/cmds/core/ls", - //"github.com/u-root/u-root/cmds/core/lsdrivers", - //"github.com/u-root/u-root/cmds/core/lsmod", - //"github.com/u-root/u-root/cmds/core/man", - //"github.com/u-root/u-root/cmds/core/md5sum", - "github.com/u-root/u-root/cmds/core/mkdir", - //////"github.com/u-root/u-root/cmds/core/mkfifo", - //////"github.com/u-root/u-root/cmds/core/mknod", - //"github.com/u-root/u-root/cmds/core/mktemp", - "github.com/u-root/u-root/cmds/core/more", - //"github.com/u-root/u-root/cmds/core/mount", - //"github.com/u-root/u-root/cmds/core/msr", - "github.com/u-root/u-root/cmds/core/mv", - //"github.com/u-root/u-root/cmds/core/netcat", - //////"github.com/u-root/u-root/cmds/core/ntpdate", - //"github.com/u-root/u-root/cmds/core/pci", - //"github.com/u-root/u-root/cmds/core/ping", - //"github.com/u-root/u-root/cmds/core/poweroff", - //"github.com/u-root/u-root/cmds/core/printenv", - "github.com/u-root/u-root/cmds/core/ps", - "github.com/u-root/u-root/cmds/core/pwd", - //"github.com/u-root/u-root/cmds/core/readlink", - "github.com/u-root/u-root/cmds/core/rm", - //"github.com/u-root/u-root/cmds/core/rmmod", - //"github.com/u-root/u-root/cmds/core/rsdp", - //"github.com/u-root/u-root/cmds/core/scp", - //"github.com/u-root/u-root/cmds/core/seq", - //"github.com/u-root/u-root/cmds/core/shasum", - //"github.com/u-root/u-root/cmds/core/shutdown", - //"github.com/u-root/u-root/cmds/core/sleep", - //"github.com/u-root/u-root/cmds/core/sluinit", - "github.com/u-root/u-root/cmds/core/sort", - //////"github.com/u-root/u-root/cmds/core/sshd", - //////"github.com/u-root/u-root/cmds/core/strace", - //"github.com/u-root/u-root/cmds/core/strings", - //////"github.com/u-root/u-root/cmds/core/stty", - //////"github.com/u-root/u-root/cmds/core/switch_root", - //"github.com/u-root/u-root/cmds/core/sync", - //"github.com/u-root/u-root/cmds/core/tail", - //"github.com/u-root/u-root/cmds/core/tar", - //"github.com/u-root/u-root/cmds/core/tee", - //"github.com/u-root/u-root/cmds/core/time", - //"github.com/u-root/u-root/cmds/core/timeout", - "github.com/u-root/u-root/cmds/core/tr", - "github.com/u-root/u-root/cmds/core/true", - //"github.com/u-root/u-root/cmds/core/truncate", - //"github.com/u-root/u-root/cmds/core/ts", - //"github.com/u-root/u-root/cmds/core/umount", - //////"github.com/u-root/u-root/cmds/core/uname", - "github.com/u-root/u-root/cmds/core/uniq", - //"github.com/u-root/u-root/cmds/core/unmount", - //"github.com/u-root/u-root/cmds/core/unshare", - //"github.com/u-root/u-root/cmds/core/uptime", - //"github.com/u-root/u-root/cmds/core/watchdog", - //"github.com/u-root/u-root/cmds/core/watchdogd", - "github.com/u-root/u-root/cmds/core/wc", - //"github.com/u-root/u-root/cmds/core/wget", - //////"github.com/u-root/u-root/cmds/core/which", - "github.com/u-root/u-root/cmds/core/yes", - //// One of these commands in exp tickles a busybox bug. - ////"github.com/u-root/u-root/cmds/exp/acpicat", - ////"github.com/u-root/u-root/cmds/exp/acpigrep", - ////"github.com/u-root/u-root/cmds/exp/ansi", - ////"github.com/u-root/u-root/cmds/exp/bootvars", - ////"github.com/u-root/u-root/cmds/exp/bzimage", - ////"github.com/u-root/u-root/cmds/exp/cbmem", - ////"github.com/u-root/u-root/cmds/exp/console", - ////"github.com/u-root/u-root/cmds/exp/crc", - ////"github.com/u-root/u-root/cmds/exp/disk_unlock", - ////"github.com/u-root/u-root/cmds/exp/dmidecode", - ////"github.com/u-root/u-root/cmds/exp/dumpebda", - ////"github.com/u-root/u-root/cmds/exp/ectool", - ////"github.com/u-root/u-root/cmds/exp/ed", - ////"github.com/u-root/u-root/cmds/exp/efivarfs", - ////"github.com/u-root/u-root/cmds/exp/esxiboot", - ////"github.com/u-root/u-root/cmds/exp/fbsplash", - ////"github.com/u-root/u-root/cmds/exp/fdtdump", - ////"github.com/u-root/u-root/cmds/exp/field", - ////"github.com/u-root/u-root/cmds/exp/fixrsdp", - ////"github.com/u-root/u-root/cmds/exp/forth", - ////"github.com/u-root/u-root/cmds/exp/freq", - ////"github.com/u-root/u-root/cmds/exp/getty", - ////"github.com/u-root/u-root/cmds/exp/gosh", - ////"github.com/u-root/u-root/cmds/exp/hdparm", - ////"github.com/u-root/u-root/cmds/exp/ipmidump", - ////"github.com/u-root/u-root/cmds/exp/kconf", - ////"github.com/u-root/u-root/cmds/exp/lsfabric", - ////"github.com/u-root/u-root/cmds/exp/madeye", - ////"github.com/u-root/u-root/cmds/exp/modprobe", - ////"github.com/u-root/u-root/cmds/exp/netbootxyz", - ////"github.com/u-root/u-root/cmds/exp/newsshd", - ////"github.com/u-root/u-root/cmds/exp/nvme_unlock", - ////"github.com/u-root/u-root/cmds/exp/page", - ////"github.com/u-root/u-root/cmds/exp/partprobe", - ////"github.com/u-root/u-root/cmds/exp/pflask", - ////"github.com/u-root/u-root/cmds/exp/pox", - ////"github.com/u-root/u-root/cmds/exp/pxeserver", - ////"github.com/u-root/u-root/cmds/exp/readpe", - ////"github.com/u-root/u-root/cmds/exp/run", - ////"github.com/u-root/u-root/cmds/exp/rush", - ////"github.com/u-root/u-root/cmds/exp/smbios_transfer", - ////"github.com/u-root/u-root/cmds/exp/smn", - ////"github.com/u-root/u-root/cmds/exp/srvfiles", - ////"github.com/u-root/u-root/cmds/exp/ssh", - //////"github.com/u-root/u-root/cmds/exp/syscallfilter", - ////"github.com/u-root/u-root/cmds/exp/tac", - ////"github.com/u-root/u-root/cmds/exp/tcz", - ////"github.com/u-root/u-root/cmds/exp/uefiboot", - ////"github.com/u-root/u-root/cmds/exp/vboot", - ////"github.com/u-root/u-root/cmds/exp/watch", - ////"github.com/u-root/u-root/cmds/exp/zbi", - ////"github.com/u-root/u-root/cmds/exp/zimage", - }, -} diff --git a/tools/mkinitramfs/main.go b/tools/mkinitramfs/main.go deleted file mode 100644 index 086dba97a3..0000000000 --- a/tools/mkinitramfs/main.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015-2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// mkinitramfs creates a u-root initramfs given the list of files on the -// command line. -package mkinitramfs - -import ( - "flag" - "log" - "os" - - "github.com/u-root/u-root/pkg/uroot" - "github.com/u-root/u-root/pkg/uroot/initramfs" -) - -var outputFile = flag.String("o", "initramfs.cpio", "Initramfs output file") - -func main() { - flag.Parse() - - if flag.NArg() == 0 { - log.Fatalf("must specify at least one file to include in initramfs") - } - logger := log.New(os.Stderr, "", log.LstdFlags) - - // Open the target initramfs file. - w, err := initramfs.CPIO.OpenWriter(logger, *outputFile) - if err != nil { - log.Fatalf("failed to open cpio archive %q: %v", *outputFile, err) - } - - files := initramfs.NewFiles() - archive := &initramfs.Opts{ - Files: files, - OutputFile: w, - BaseArchive: uroot.DefaultRamfs().Reader(), - } - if err := uroot.ParseExtraFiles(logger, archive.Files, flag.Args(), false); err != nil { - log.Fatalf("failed to parse file names %v: %v", flag.Args(), err) - } - - if err := initramfs.Write(archive); err != nil { - log.Fatalf("failed to write archive %q: %v", *outputFile, err) - } -} diff --git a/u-root.go b/u-root.go index 7b5d38e3c0..ea6934b16a 100644 --- a/u-root.go +++ b/u-root.go @@ -2,154 +2,28 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Command u-root builds CPIO archives with the given files and Go commands. package main import ( - "encoding/json" "errors" "flag" "fmt" "log" + "log/slog" "os" - "path" - "path/filepath" - "runtime" - "sort" - "strings" - "time" + "github.com/dustin/go-humanize" "github.com/u-root/gobusybox/src/pkg/golang" - "github.com/u-root/gobusybox/src/pkg/uflag" - "github.com/u-root/u-root/pkg/shlex" - "github.com/u-root/u-root/pkg/ulog" - "github.com/u-root/u-root/pkg/uroot" - "github.com/u-root/u-root/pkg/uroot/builder" - "github.com/u-root/u-root/pkg/uroot/initramfs" + "github.com/u-root/mkuimage/uimage" + "github.com/u-root/mkuimage/uimage/mkuimage" + "github.com/u-root/uio/llog" ) -// multiFlag is used for flags that support multiple invocations, e.g. -files -type multiFlag []string - -func (m *multiFlag) String() string { - return fmt.Sprint(*m) -} - -func (m *multiFlag) Set(value string) error { - *m = append(*m, value) - return nil -} - -// errors from the u-root command var ( - ErrEmptyFilesArg = errors.New("empty argument to -files") + errEmptyFilesArg = errors.New("empty argument to -files") ) -// Flags for u-root builder. -var ( - build, format, tmpDir, base, outputPath *string - uinitCmd, initCmd *string - defaultShell *string - useExistingInit *bool - noCommands *bool - extraFiles multiFlag - statsOutputPath *string - statsLabel *string - shellbang *bool - // For the new gobusybox support - usegobusybox *bool - genDir *string - // For the new "filepath only" logic - urootSourceDir *string -) - -func init() { - var sh string - switch golang.Default().GOOS { - case "plan9": - sh = "" - default: - sh = "gosh" - } - - build = flag.String("build", "gbb", "u-root build format (e.g. bb/gbb or binary).") - format = flag.String("format", "cpio", "Archival format.") - - tmpDir = flag.String("tmpdir", "", "Temporary directory to put binaries in.") - - base = flag.String("base", "", "Base archive to add files to. By default, this is a couple of directories like /bin, /etc, etc. u-root has a default internally supplied set of files; use base=/dev/null if you don't want any base files.") - useExistingInit = flag.Bool("useinit", false, "Use existing init from base archive (only if --base was specified).") - outputPath = flag.String("o", "", "Path to output initramfs file.") - - initCmd = flag.String("initcmd", "init", "Symlink target for /init. Can be an absolute path or a u-root command name. Use initcmd=\"\" if you don't want the symlink.") - uinitCmd = flag.String("uinitcmd", "", "Symlink target and arguments for /bin/uinit. Can be an absolute path or a u-root command name. Use uinitcmd=\"\" if you don't want the symlink. E.g. -uinitcmd=\"echo foobar\"") - defaultShell = flag.String("defaultsh", sh, "Default shell. Can be an absolute path or a u-root command name. Use defaultsh=\"\" if you don't want the symlink.") - - noCommands = flag.Bool("nocmd", false, "Build no Go commands; initramfs only") - - flag.Var(&extraFiles, "files", "Additional files, directories, and binaries (with their ldd dependencies) to add to archive. Can be specified multiple times.") - - shellbang = flag.Bool("shellbang", false, "Use #! instead of symlinks for busybox") - - statsOutputPath = flag.String("stats-output-path", "", "Write build stats to this file (JSON)") - statsLabel = flag.String("stats-label", "", "Use this statsLabel when writing stats") - - // Flags for the gobusybox, which we hope to move to, since it works with modules. - genDir = flag.String("gen-dir", "", "Directory to generate source in") - - // Flag for the new filepath only mode. This will be required to find the u-root commands and make templates work - // In almost every case, "." is fine. - urootSourceDir = flag.String("uroot-source", ".", "Path to the locally checked out u-root source tree in case commands from there are desired.") -} - -type buildStats struct { - Label string `json:"label,omitempty"` - Time int64 `json:"time"` - Duration float64 `json:"duration"` - OutputSize int64 `json:"output_size"` -} - -func writeBuildStats(stats buildStats, path string) error { - var allStats []buildStats - if data, err := os.ReadFile(*statsOutputPath); err == nil { - json.Unmarshal(data, &allStats) - } - found := false - for i, s := range allStats { - if s.Label == stats.Label { - allStats[i] = stats - found = true - break - } - } - if !found { - allStats = append(allStats, stats) - sort.Slice(allStats, func(i, j int) bool { - return strings.Compare(allStats[i].Label, allStats[j].Label) == -1 - }) - } - data, err := json.MarshalIndent(allStats, "", " ") - if err != nil { - return err - } - if err := os.WriteFile(*statsOutputPath, data, 0o644); err != nil { - return err - } - return nil -} - -func generateLabel(env *golang.Environ) string { - var baseCmds []string - if len(flag.Args()) > 0 { - // Use the last component of the name to keep the label short - for _, e := range flag.Args() { - baseCmds = append(baseCmds, path.Base(e)) - } - } else { - baseCmds = []string{"core"} - } - return fmt.Sprintf("%s-%s-%s-%s", *build, env.GOOS, env.GOARCH, strings.Join(baseCmds, "_")) -} - // checkArgs checks for common mistakes that cause confusion. // 1. -files as the last argument // 2. -files followed by any switch, indicating a shell expansion problem @@ -166,14 +40,14 @@ func checkArgs(args ...string) error { } if args[len(args)-1] == "-files" { - return fmt.Errorf("last argument is -files:%w", ErrEmptyFilesArg) + return fmt.Errorf("last argument is -files:%w", errEmptyFilesArg) } // We know the last arg is not -files; scan the arguments for -files // followed by a switch. for i := 0; i < len(args)-1; i++ { if args[i] == "-files" && args[i+1][0] == '-' { - return fmt.Errorf("-files argument %d is followed by a switch: %w", i, ErrEmptyFilesArg) + return fmt.Errorf("-files argument %d is followed by a switch: %w", i, errEmptyFilesArg) } } @@ -181,249 +55,58 @@ func checkArgs(args ...string) error { } func main() { + log.SetFlags(log.Ltime) if err := checkArgs(os.Args...); err != nil { log.Fatal(err) } - gbbOpts := &golang.BuildOpts{} - gbbOpts.RegisterFlags(flag.CommandLine) - // Register an alias for -go-no-strip for backwards compatibility. - flag.CommandLine.BoolVar(&gbbOpts.NoStrip, "no-strip", false, "Build unstripped binaries") - - env := golang.Default() - env.RegisterFlags(flag.CommandLine) - tags := (*uflag.Strings)(&env.BuildTags) - flag.CommandLine.Var(tags, "tags", "Go build tags -- repeat the flag for multiple values") - - flag.Parse() - - l := log.New(os.Stderr, "", log.Ltime) - - if usrc := os.Getenv("UROOT_SOURCE"); usrc != "" && *urootSourceDir == "" { - *urootSourceDir = usrc - } - - if env.CgoEnabled { - l.Printf("Disabling CGO for u-root...") - env.CgoEnabled = false - } - l.Printf("Build environment: %s", env) - if env.GOOS != "linux" { - l.Printf("GOOS is not linux. Did you mean to set GOOS=linux?") - } - - start := time.Now() - - // Main is in a separate functions so defers run on return. - if err := Main(l, env, gbbOpts); err != nil { - l.Fatalf("Build error: %v", err) - } - - elapsed := time.Now().Sub(start) - - stats := buildStats{ - Label: *statsLabel, - Time: start.Unix(), - Duration: float64(elapsed.Milliseconds()) / 1000, - } - if stats.Label == "" { - stats.Label = generateLabel(env) - } - if stat, err := os.Stat(*outputPath); err == nil && stat.ModTime().After(start) { - l.Printf("Successfully built %q (size %d).", *outputPath, stat.Size()) - stats.OutputSize = stat.Size() - if *statsOutputPath != "" { - if err := writeBuildStats(stats, *statsOutputPath); err == nil { - l.Printf("Wrote stats to %q (label %q)", *statsOutputPath, stats.Label) - } else { - l.Printf("Failed to write stats to %s: %v", *statsOutputPath, err) - } - } - } -} - -var recommendedVersions = []string{ - "go1.20", - "go1.21", -} - -func isRecommendedVersion(v string) bool { - for _, r := range recommendedVersions { - if strings.HasPrefix(v, r) { - return true - } - } - return false -} - -func canFindSource(dir string) error { - d := filepath.Join(dir, "cmds", "core") - if _, err := os.Stat(d); err != nil { - return fmt.Errorf("can not build u-root in %q:%w (-uroot-source may be incorrect or not set)", *urootSourceDir, os.ErrNotExist) - } - return nil -} - -// Main is a separate function so defers are run on return, which they wouldn't -// on exit. -func Main(l ulog.Logger, env *golang.Environ, buildOpts *golang.BuildOpts) error { - v, err := env.Version() - if err != nil { - l.Printf("Could not get environment's Go version, using runtime's version: %v", err) - v = runtime.Version() - } - if !isRecommendedVersion(v) { - l.Printf(`WARNING: You are not using one of the recommended Go versions (have = %s, recommended = %v). - Some packages may not compile. - Go to https://golang.org/doc/install to find out how to install a newer version of Go, - or use https://godoc.org/golang.org/dl/%s to install an additional version of Go.`, - v, recommendedVersions, recommendedVersions[0]) - } - - archiver, err := initramfs.GetArchiver(*format) - if err != nil { - return err + env := golang.Default(golang.DisableCGO()) + f := &mkuimage.Flags{ + Commands: mkuimage.CommandFlags{Builder: "bb"}, + ArchiveFormat: "cpio", + OutputFile: defaultFile(env), } + f.RegisterFlags(flag.CommandLine) - // Open the target initramfs file. - if *outputPath == "" { - if len(env.GOOS) == 0 && len(env.GOARCH) == 0 { - return fmt.Errorf("passed no path, GOOS, and GOARCH to CPIOArchiver.OpenWriter") - } - *outputPath = fmt.Sprintf("/tmp/initramfs.%s_%s.cpio", env.GOOS, env.GOARCH) - } - w, err := archiver.OpenWriter(l, *outputPath) - if err != nil { - return err - } + l := llog.Default() + l.RegisterVerboseFlag(flag.CommandLine, "v", slog.LevelDebug) - var baseFile initramfs.Reader - if *base != "" { - bf, err := os.Open(*base) - if err != nil { - return err - } - defer bf.Close() - baseFile = archiver.Reader(bf) - } else { - baseFile = uroot.DefaultRamfs().Reader() - } + tf := &mkuimage.TemplateFlags{} + tf.RegisterFlags(flag.CommandLine) + flag.Parse() - tempDir := *tmpDir - if tempDir == "" { - var err error - tempDir, err = os.MkdirTemp("", "u-root") - if err != nil { - return err - } - defer os.RemoveAll(tempDir) - } else if _, err := os.Stat(tempDir); os.IsNotExist(err) { - if err := os.MkdirAll(tempDir, 0o755); err != nil { - return fmt.Errorf("temporary directory %q did not exist; tried to mkdir but failed: %v", tempDir, err) - } + // Set defaults. + m := []uimage.Modifier{ + uimage.WithReplaceEnv(env), + uimage.WithBaseArchive(uimage.DefaultRamfs()), + uimage.WithCPIOOutput(defaultFile(env)), + uimage.WithInit("init"), } - - var ( - c []uroot.Commands - initCommand = *initCmd - ) - if !*noCommands { - var b builder.Builder - switch *build { - case "bb", "gbb": - l.Printf("NOTE: building with the new gobusybox; to get the old behavior check out commit 8b790de") - b = builder.GBBBuilder{ShellBang: *shellbang} - case "binary": - b = builder.BinaryBuilder{} - case "source": - return fmt.Errorf("source mode has been deprecated") - default: - return fmt.Errorf("could not find builder %q", *build) - } - - // Resolve globs into package imports. - // - // Currently allowed format: - // Paths to Go package directories; e.g. $GOPATH/src/github.com/u-root/u-root/cmds/* - // u-root templates; e.g. all, core, minimal (requires uroot-source be valid) - // Import paths of u-root commands; e.g. github.com/u-root/u-root/cmds/* (requires uroot-source) - var pkgs []string - for _, a := range flag.Args() { - p, ok := templates[a] - if !ok { - if !validateArg(a) { - l.Printf("%q is not a valid path, allowed are only existing relative or absolute file paths!", a) - continue - } - pkgs = append(pkgs, a) - continue - } - pkgs = append(pkgs, p...) - } - if len(pkgs) == 0 { - pkgs = []string{"github.com/u-root/u-root/cmds/core/*"} - } - - // The command-line tool only allows specifying one build mode - // right now. - c = append(c, uroot.Commands{ - Builder: b, - Packages: pkgs, - }) + if golang.Default().GOOS != "plan9" { + m = append(m, uimage.WithShell("gosh")) } - opts := uroot.Opts{ - Env: env, - Commands: c, - UrootSource: *urootSourceDir, - TempDir: tempDir, - ExtraFiles: extraFiles, - OutputFile: w, - BaseArchive: baseFile, - UseExistingInit: *useExistingInit, - InitCmd: initCommand, - DefaultShell: *defaultShell, - BuildOpts: buildOpts, + pkgs := flag.Args() + // Only add default packages if no config template was given. + // + // Otherwise, the template can't erase the default packages and all + // templates would be forced to use cmds/core/*. + if len(pkgs) == 0 && tf.Config == "" { + pkgs = []string{"github.com/u-root/u-root/cmds/core/*"} } - uinitArgs := shlex.Argv(*uinitCmd) - if len(uinitArgs) > 0 { - opts.UinitCmd = uinitArgs[0] + if err := mkuimage.CreateUimage(l, m, tf, f, pkgs); err != nil { + l.Errorf("mkuimage error: %v", err) + os.Exit(1) } - if len(uinitArgs) > 1 { - opts.UinitArgs = uinitArgs[1:] - } - return uroot.CreateInitramfs(l, opts) -} -func validateArg(arg string) bool { - // Do the simple thing first: stat the path. - // This saves incorrect diagnostics when the - // path is a perfectly valid relative path. - if _, err := os.Stat(arg); err == nil { - return true + if stat, err := os.Stat(f.OutputFile); err == nil && f.ArchiveFormat == "cpio" { + l.Infof("Successfully built %q (size %d bytes -- %s).", f.OutputFile, stat.Size(), humanize.IBytes(uint64(stat.Size()))) } - if !checkPrefix(arg) { - paths, err := filepath.Glob(arg) - if err != nil { - return false - } - for _, path := range paths { - if !checkPrefix(path) { - return false - } - } - } - - return true } -func checkPrefix(arg string) bool { - prefixes := []string{".", "/", "-", "cmds", "github.com/u-root/u-root"} - for _, prefix := range prefixes { - if strings.HasPrefix(arg, prefix) { - return true - } +func defaultFile(env *golang.Environ) string { + if len(env.GOOS) == 0 || len(env.GOARCH) == 0 { + return "/tmp/initramfs.cpio" } - - return false + return fmt.Sprintf("/tmp/initramfs.%s_%s.cpio", env.GOOS, env.GOARCH) } diff --git a/uroot_test.go b/uroot_test.go index cadc8fc546..95a3698552 100644 --- a/uroot_test.go +++ b/uroot_test.go @@ -357,9 +357,9 @@ func TestCheckArgs(t *testing.T) { args []string err error }{ - {"-files is only arg", []string{"-files"}, ErrEmptyFilesArg}, - {"-files followed by -files", []string{"-files", "-files"}, ErrEmptyFilesArg}, - {"-files followed by any other switch", []string{"-files", "-abc"}, ErrEmptyFilesArg}, + {"-files is only arg", []string{"-files"}, errEmptyFilesArg}, + {"-files followed by -files", []string{"-files", "-files"}, errEmptyFilesArg}, + {"-files followed by any other switch", []string{"-files", "-abc"}, errEmptyFilesArg}, {"no args", []string{}, nil}, {"u-root alone", []string{"u-root"}, nil}, {"u-root with -files and other args", []string{"u-root", "-files", "/bin/bash", "core"}, nil}, diff --git a/vendor/github.com/google/goterm/AUTHORS b/vendor/github.com/google/goterm/AUTHORS deleted file mode 100644 index 15167cd746..0000000000 --- a/vendor/github.com/google/goterm/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/google/goterm/LICENSE b/vendor/github.com/google/goterm/LICENSE deleted file mode 100644 index 931520b99e..0000000000 --- a/vendor/github.com/google/goterm/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/google/goterm/term/color.go b/vendor/github.com/google/goterm/term/color.go deleted file mode 100644 index 4bba716073..0000000000 --- a/vendor/github.com/google/goterm/term/color.go +++ /dev/null @@ -1,677 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -/* -Some simple functions to add colors and attributes to terminals. - -The base colors are types implementing the Stringer interface, this makes -it very simple to give a color to arbitrary strings. Also handy to have the raw string still -available for comparisons and such. - - g := Green("Green world") - fmt.Println("Hello",g) - fmt.Println(Red("Warning!")) - - var col fmt.Stringer - switch { - case atk == 0: - col = Blue("5 FADE OUT") - case atk < 4: - col = Green("4 DOUBLE TAKE") - case atk <10: - col = Yellow("3 ROUND HOUSE") - case atk <50: - col = Red("2 FAST PACE") - case atk >= 50: - col = Blinking("1 COCKED PISTOL") - } - fmt.Println("Defcon: ",col) -*/ - -import ( - "errors" - "fmt" - "math/rand" - "strconv" -) - -type stringer interface { - String() string -} - -// colorEnable toggles colors on/off. -var colorEnable = true - -// ColorEnable activates the terminal colors , this is the default. -func ColorEnable() { - colorEnable = true -} - -// ColorDisable disables the terminal colors. -func ColorDisable() { - colorEnable = false -} - -// Terminal Color and modifier codes -const ( - CSI = "\033[" - FgBlack = "30" - FgRed = "31" - FgGreen = "32" - FgYellow = "33" - FgBlue = "34" - FgMagenta = "35" - FgCyan = "36" - FgWhite = "37" - FgDefault = "39" - F256 = "38" - BgBlack = "40" - BgRed = "41" - BgGreen = "42" - BgYellow = "43" - BgBlue = "44" - BgMagenta = "45" - BgCyan = "46" - BgWhite = "47" - BgDefault = "49" - Bg256 = "48" - Blink = "5" - Ital = "3" - Underln = "4" - Faint = "2" - Bld = "1" - NoMode = "0" -) - -// Standard colors -// Foreground - -// Green implements the Stringer interface to print string foreground in Green color. -type Green string - -// Blue implements the Stringer interface to print string foreground in Blue color. -type Blue string - -// Red implements the Stringer interface to print string foreground in Red color. -type Red string - -// Yellow implements the Stringer interface to print string foreground in Yellow color. -type Yellow string - -// Magenta implements the Stringer interface to print string foreground in Magenta color. -type Magenta string - -// Cyan implements the Stringer interface to print string foreground in Cyan color. -type Cyan string - -// White implements the Stringer interface to print string foreground in White color. -type White string - -// Black implements the Stringer interface to print string foreground in Black color. -type Black string - -// Random implements the Stringer interface to print string foreground in Random color. -type Random string - -// Background - -// BGreen implements the Stringer interface to print string background in Green color. -type BGreen string - -// BBlue implements the Stringer interface to print string background in Blue color. -type BBlue string - -// BRed implements the Stringer interface to print string background in Red color. -type BRed string - -// BYellow implements the Stringer interface to print string background in Yellow color. -type BYellow string - -// BRandom implements the Stringer interface to print string background in Random color. -type BRandom string - -// BMagenta implements the Stringer interface to print string background in Magenta color. -type BMagenta string - -// BCyan implements the Stringer interface to print string background in Cyan color. -type BCyan string - -// BWhite implements the Stringer interface to print string background in White color. -type BWhite string - -// BBlack implements the Stringer interface to print string background in Black color. -type BBlack string - -// Set color - -// Color is the type returned by the colour setters to print any terminal colour. -type Color string - -// ColorRandom implements the Stringer interface to print string Random color. -type ColorRandom string - -// Color256Random implements the Stringer interface to print string random 256 color Term style. -type Color256Random string - -// Some modifiers - -// Blinking implements the Stringer interface to print string in Blinking mode. -type Blinking string - -// Underline implements the Stringer interface to print string in Underline mode. -type Underline string - -// Bold implements the Stringer interface to print string in Bold mode. -type Bold string - -//type Bright string -- Doesn't seem to work well - -// Italic implements the Stringer interface to print string foreground in Italic color. -type Italic string - -// colType takes all the base color types and generates proper modifiers. -func colType(col stringer) string { - nMode := FgDefault - var mode, res string - switch c := col.(type) { - case Black: - mode = FgBlack - res = string(c) - case Red: - mode = FgRed - res = string(c) - case Green: - mode = FgGreen - res = string(c) - case Yellow: - mode = FgYellow - res = string(c) - case Blue: - mode = FgBlue - res = string(c) - case Magenta: - mode = FgMagenta - res = string(c) - case Cyan: - mode = FgCyan - res = string(c) - case White: - mode = FgWhite - res = string(c) - case BBlack: - mode = BgBlack - res = string(c) - case BRed: - mode = BgRed - nMode = BgDefault - res = string(c) - case BGreen: - mode = BgGreen - nMode = BgDefault - res = string(c) - case BYellow: - mode = BgYellow - nMode = BgDefault - res = string(c) - case BBlue: - mode = BgBlue - nMode = BgDefault - res = string(c) - case BMagenta: - mode = BgMagenta - nMode = BgDefault - res = string(c) - case BCyan: - mode = BgCyan - nMode = BgDefault - res = string(c) - case BWhite: - mode = BgWhite - nMode = BgDefault - res = string(c) - case Blinking: - mode = Blink - nMode = NoMode - res = string(c) - case Italic: - mode = Ital - nMode = NoMode - res = string(c) - case Underline: - mode = Underln - nMode = NoMode - res = string(c) - case Bold: - nMode = NoMode - mode = Bld - res = string(c) - default: - return "unsupported type" - } - if !colorEnable { - return res - } - return CSI + mode + "m" + res + CSI + nMode + "m" -} - -// Stringers for all the base colors , just fill it in with something and print it -// Foreground - -// String implements the Stringer interface for type Green. -func (c Green) String() string { - return colType(c) -} - -// Greenf returns a Green formatted string. -func Greenf(format string, a ...interface{}) string { - return colType(Green(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type Blue. -func (c Blue) String() string { - return colType(c) -} - -// Bluef returns a Blue formatted string. -func Bluef(format string, a ...interface{}) string { - return colType(Blue(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type Red. -func (c Red) String() string { - return colType(c) -} - -// Redf returns a Red formatted string. -func Redf(format string, a ...interface{}) string { - return colType(Red(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type Yellow. -func (c Yellow) String() string { - return colType(c) -} - -// Yellowf returns a Yellow formatted string. -func Yellowf(format string, a ...interface{}) string { - return colType(Yellow(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type Magenta. -func (c Magenta) String() string { - return colType(c) -} - -// Magentaf returns a Magenta formatted string. -func Magentaf(format string, a ...interface{}) string { - return colType(Magenta(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type White. -func (c White) String() string { - return colType(c) -} - -// Whitef returns a White formatted string. -func Whitef(format string, a ...interface{}) string { - return colType(White(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type Black. -func (c Black) String() string { - return colType(c) -} - -// Blackf returns a Black formatted string. -func Blackf(format string, a ...interface{}) string { - return colType(Black(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type Cyan. -func (c Cyan) String() string { - return colType(c) -} - -// Cyanf returns a Cyan formatted string. -func Cyanf(format string, a ...interface{}) string { - return colType(Cyan(fmt.Sprintf(format, a...))) -} - -// Background - -// String implements the Stringer interface for type BGreen. -func (c BGreen) String() string { - return colType(c) -} - -// BGreenf returns a BGreen formatted string. -func BGreenf(format string, a ...interface{}) string { - return colType(BGreen(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type BBlue. -func (c BBlue) String() string { - return colType(c) -} - -// BBluef returns a BBlue formatted string. -func BBluef(format string, a ...interface{}) string { - return colType(BBlue(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type BRed. -func (c BRed) String() string { - return colType(c) -} - -// BRedf returns a BRed formatted string. -func BRedf(format string, a ...interface{}) string { - return colType(BRed(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type BYellow. -func (c BYellow) String() string { - return colType(c) -} - -// BYellowf returns a BYellow formatted string. -func BYellowf(format string, a ...interface{}) string { - return colType(BYellow(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type BMagenta. -func (c BMagenta) String() string { - return colType(c) -} - -// BMagentaf returns a BMagenta formatted string. -func BMagentaf(format string, a ...interface{}) string { - return colType(BMagenta(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type BWhite. -func (c BWhite) String() string { - return colType(c) -} - -// BWhitef returns a BWhite formatted string. -func BWhitef(format string, a ...interface{}) string { - return colType(BWhite(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type BBlack. -func (c BBlack) String() string { - return colType(c) -} - -// BBlackf returns a BBlack formatted string. -func BBlackf(format string, a ...interface{}) string { - return colType(BBlack(fmt.Sprintf(format, a...))) -} - -// String implements the Stringer interface for type BCyan. -func (c BCyan) String() string { - return colType(c) -} - -// BCyanf returns a BCyan formatted string. -func BCyanf(format string, a ...interface{}) string { - return colType(BCyan(fmt.Sprintf(format, a...))) -} - -// Modifier codes - -// String implements the Stringer interface for type Blinking. -func (c Blinking) String() string { - return colType(c) -} - -// String implements the Stringer interface for type Underline. -func (c Underline) String() string { - return colType(c) -} - -// String implements the Stringer interface for type Bold. -func (c Bold) String() string { - return colType(c) -} - -// String implements the Stringer interface for type Italic. -func (c Italic) String() string { - return colType(c) -} - -// NewColor gives a type Color back with specified fg/bg colors set that can -// be printed with anything using the Stringer iface. -func NewColor(str string, fg string, bg string) (Color, error) { - if fg != "" { - ifg, err := strconv.Atoi(fg) - if err != nil { - return Color(""), err - } - if ifg < 30 && ifg > 37 { - return Color(""), errors.New("fg: " + fg + "not a valid color 30-37") - } - } else { - fg = FgDefault - } - if bg != "" { - ibg, err := strconv.Atoi(bg) - if err != nil { - return Color(""), err - } - if ibg < 40 && ibg > 47 { - return Color(""), errors.New("fg: " + fg + "not a valid color 40-47") - } - } else { - bg = BgDefault - } - return Color(CSI + fg + ";" + bg + "m" + str + CSI + FgDefault + ";" + BgDefault + "m"), nil -} - -// String the stringer interface for all base color types. -func (c Color) String() string { - if !colorEnable { - clean := make([]byte, 0, len(c)) - src := []byte(c) - L1: - for i := 0; i < len(src); i++ { - // Shortest possible mod. - if len(src) < i+4 { - clean = append(clean, src[i:]...) - return string(clean) - } - if string(src[i:i+2]) == CSI { - // Save current index incase this is not a term mod code. - s := i - // skip forward to end of mod - for i += 2; i < len(src); i++ { - switch src[i] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ';': - // Legal characters in a term mod code. - continue - case 'm': - // End of the term mod code. - continue L1 - default: - // Not a term mod code. - i = s - break - } - } - } - clean = append(clean, src[i]) - } - return string(clean) - } - return string(c) -} - -// NewColor256 gives a type Color back using Term 256 color that can be printed with anything using the Stringer iface. -func NewColor256(str string, fg string, bg string) (Color, error) { - if fg != "" { - ifg, err := strconv.Atoi(fg) - if err != nil { - return Color(""), err - } - if ifg < 0 && ifg > 256 { - return Color(""), errors.New("fg: " + fg + " not a valid color 0-256") - } - } - if bg != "" { - ibg, err := strconv.Atoi(bg) - if err != nil { - return Color(""), err - } - if ibg < 0 && ibg > 256 { - return Color(""), errors.New("bg: " + bg + " not a valid color 0-256") - } - } - tstr := CSI + F256 + ";5;" + fg + ";" + Bg256 + ";5;" + bg + "m" - tstr += str - return Color(tstr + CSI + FgDefault + ";5;" + BgDefault + ";5;" + "m"), nil -} - -// NewColorRGB takes R G B and returns a ColorRGB type that can be printed by anything using the Stringer iface. -// Only Konsole to my knowledge that supports 24bit color -func NewColorRGB(str string, red uint8, green uint8, blue uint8) Color { - ired := strconv.Itoa(int(red)) - igreen := strconv.Itoa(int(green)) - iblue := strconv.Itoa(int(blue)) - tstr := CSI + F256 + ";2;" + ired + ";" + igreen + ";" + iblue + "m" - tstr += str - return Color(tstr + CSI + FgDefault + ";5;" + BgDefault + ";5;" + "m") -} - -// String is a random color stringer. -func (c ColorRandom) String() string { - if !colorEnable { - return string(c) - } - ifg := rand.Int()%8 + 30 - ibg := rand.Int()%8 + 40 - res := CSI + strconv.Itoa(ifg) + ";" + strconv.Itoa(ibg) + "m" - res += string(c) - res += CSI + strconv.Itoa(ifg) + ";" + strconv.Itoa(ibg) + "m" - return res -} - -// String gives a random fg color everytime it's printed. -func (c Random) String() string { - if !colorEnable { - return string(c) - } - ifg := int(rand.Int()%8 + 30) - res := CSI + strconv.Itoa(ifg) + "m" - res += string(c) + strconv.Itoa(int(ifg)) - res += CSI + FgDefault + "m" - return res -} - -// String gives a random bg color everytime it's printed. -func (c BRandom) String() string { - if !colorEnable { - return string(c) - } - ibg := rand.Int()%8 + 40 - res := CSI + strconv.Itoa(ibg) + "m" - res += string(c) + strconv.Itoa(ibg) - res += CSI + BgDefault + "m" - return res -} - -// NewCombo Takes a combination of modes and return a string with them all combined. -func NewCombo(s string, mods ...string) Color { - var col, bcol, mod bool - modstr := CSI - tracking := make(map[string]bool) - for _, m := range mods { - switch m { - case FgBlack, FgRed, FgGreen, FgYellow, FgBlue, FgMagenta, FgCyan, FgWhite: - if col { - continue - } - col = true - case BgBlack, BgRed, BgGreen, BgYellow, BgBlue, BgMagenta, BgCyan, BgWhite: - if bcol { - continue - } - bcol = true - case Bld, Faint, Ital, Underln, Blink: - if tracking[m] { - continue - } - tracking[m] = true - mod = true - default: - continue - } - modstr += m + ";" - } - end := CSI - if col { - end += FgDefault - } - if bcol { - if col { - end += ";" - } - end += BgDefault - } - if mod { - if col || bcol { - end += ";" - } - end += NoMode - } - end += "m" - modstr = modstr[:len(modstr)-1] + "m" - modstr += s - modstr += end - return Color(modstr) -} - -// TestTerm tries out most of the functions in this package and return -// a colourful string. Could be used to check what your terminal supports. -func TestTerm() string { - res := "Standard 8:\n" - res += "Fg:\t" - for c := 30; c < 38; c++ { - tres, _ := NewColor("#", strconv.Itoa(c), "") - res += tres.String() - } - res += "\nBg:\t" - for c := 40; c < 48; c++ { - tres, _ := NewColor(" ", "", strconv.Itoa(c)) - res += tres.String() - } - res += "\nStandard 16:\t" - for c := 0; c < 16; c++ { - tcol, _ := NewColor256(" ", "", strconv.Itoa(c)) - res += tcol.String() - } - res += "\n" - res += "256 Col:\n" - // 6x6x6 cubes are trendy - for row, base := 1, 0; row <= 6; row++ { - base = (row * 6) + 9 // Step over the first 16 base colors - for cubes := 1; cubes <= 6; cubes++ { - for column := 1; column <= 6; column++ { - tcol, _ := NewColor256(" ", "", strconv.Itoa(base+column)) - res += tcol.String() - } - base += 36 // 6 * 6 - } - res += "\n" - } - // Grayscale left. - res += "Grayscales:\n" - for c := 232; c <= 255; c++ { - tcol, _ := NewColor256(" ", "", strconv.Itoa(c)) - res += tcol.String() - } - return res -} diff --git a/vendor/github.com/google/goterm/term/ssh.go b/vendor/github.com/google/goterm/term/ssh.go deleted file mode 100644 index 2e7619cec6..0000000000 --- a/vendor/github.com/google/goterm/term/ssh.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - - -package term - -const ( - // Terminal attribute types. - sshIflag = iota - sshOflag - sshCflag - sshLflag - sshCchar - sshTspeed - sshNOP - - // SSH terminal attributes. - sshTTYOPEND = 0 - sshVINTR = 1 - sshVQUIT = 2 - sshVERASE = 3 - sshVKILL = 4 - sshVEOF = 5 - sshVEOL = 6 - sshVEOL2 = 7 - sshVSTART = 8 - sshVSTOP = 9 - sshVSUSP = 10 - sshVDSUSP = 11 - sshVREPRINT = 12 - sshVWERASE = 13 - sshVLNEXT = 14 - sshVFLUSH = 15 - sshVSWTCH = 16 - sshVSTATUS = 17 - sshVDISCARD = 18 - sshIGNPAR = 30 - sshPARMRK = 31 - sshINPCK = 32 - sshISTRIP = 33 - sshINLCR = 34 - sshIGNCR = 35 - sshICRNL = 36 - sshIUCLC = 37 - sshIXON = 38 - sshIXANY = 39 - sshIXOFF = 40 - sshIMAXBEL = 41 - sshISIG = 50 - sshICANON = 51 - sshXCASE = 52 - sshECHO = 53 - sshECHOE = 54 - sshECHOK = 55 - sshECHONL = 56 - sshNOFLSH = 57 - sshTOSTOP = 58 - sshIEXTEN = 59 - sshECHOCTL = 60 - sshECHOKE = 61 - sshPENDIN = 62 - sshOPOST = 70 - sshOLCUC = 71 - sshONLCR = 72 - sshOCRNL = 73 - sshONOCR = 74 - sshONLRET = 75 - sshCS7 = 90 - sshCS8 = 91 - sshPARENB = 92 - sshPARODD = 93 - sshTTYOPISPEED = 128 - sshTTYOPOSPEED = 129 -) - -var convertSSH = map[uint8]struct { - tType uint - native uint32 -}{ - sshTTYOPEND: {tType: sshNOP}, - sshVINTR: {tType: sshCchar, native: VINTR}, - sshVQUIT: {tType: sshCchar, native: VQUIT}, - sshVERASE: {tType: sshCchar, native: VERASE}, - sshVKILL: {tType: sshCchar, native: VKILL}, - sshVEOF: {tType: sshCchar, native: VEOF}, - sshVEOL: {tType: sshCchar, native: VEOL}, - sshVEOL2: {tType: sshCchar, native: VEOL2}, - sshVSTART: {tType: sshCchar, native: VSTART}, - sshVSTOP: {tType: sshCchar, native: VSTOP}, - sshVSUSP: {tType: sshCchar, native: VSUSP}, - sshVDSUSP: {tType: sshCchar, native: sshNOP}, - sshVREPRINT: {tType: sshCchar, native: VREPRINT}, - sshVWERASE: {tType: sshCchar, native: VWERASE}, - sshVLNEXT: {tType: sshCchar, native: VLNEXT}, - sshVFLUSH: {tType: sshNOP}, - sshVSWTCH: {tType: sshCchar, native: VSWTC}, - sshVSTATUS: {tType: sshNOP}, - sshVDISCARD: {tType: sshCchar, native: VDISCARD}, - sshIGNPAR: {tType: sshIflag, native: IGNPAR}, - sshPARMRK: {tType: sshIflag, native: PARMRK}, - sshINPCK: {tType: sshIflag, native: INPCK}, - sshISTRIP: {tType: sshIflag, native: ISTRIP}, - sshINLCR: {tType: sshIflag, native: INLCR}, - sshIGNCR: {tType: sshIflag, native: IGNCR}, - sshICRNL: {tType: sshIflag, native: ICRNL}, - sshIUCLC: {tType: sshIflag, native: IUCLC}, - sshIXON: {tType: sshIflag, native: IXON}, - sshIXANY: {tType: sshIflag, native: IXANY}, - sshIXOFF: {tType: sshIflag, native: IXOFF}, - sshIMAXBEL: {tType: sshIflag, native: IMAXBEL}, - sshISIG: {tType: sshLflag, native: ISIG}, - sshICANON: {tType: sshLflag, native: ICANON}, - sshXCASE: {tType: sshLflag, native: XCASE}, - sshECHO: {tType: sshLflag, native: ECHO}, - sshECHOE: {tType: sshLflag, native: ECHOE}, - sshECHOK: {tType: sshLflag, native: ECHOK}, - sshECHONL: {tType: sshLflag, native: ECHONL}, - sshNOFLSH: {tType: sshLflag, native: NOFLSH}, - sshTOSTOP: {tType: sshLflag, native: TOSTOP}, - sshIEXTEN: {tType: sshLflag, native: IEXTEN}, - sshECHOCTL: {tType: sshLflag, native: ECHOCTL}, - sshECHOKE: {tType: sshLflag, native: ECHOKE}, - sshPENDIN: {tType: sshNOP}, - sshOPOST: {tType: sshOflag, native: OPOST}, - sshOLCUC: {tType: sshOflag, native: OLCUC}, - sshONLCR: {tType: sshOflag, native: ONLCR}, - sshOCRNL: {tType: sshOflag, native: OCRNL}, - sshONOCR: {tType: sshOflag, native: ONOCR}, - sshONLRET: {tType: sshOflag, native: ONLRET}, - sshCS7: {tType: sshCflag, native: CS7}, - sshCS8: {tType: sshCflag, native: CS8}, - sshPARENB: {tType: sshCflag, native: PARENB}, - sshPARODD: {tType: sshCflag, native: PARODD}, - sshTTYOPISPEED: {tType: sshTspeed}, - sshTTYOPOSPEED: {tType: sshTspeed}, -} - -// ToSSH converts the Termios attributes to SSH attributes usable as ssh.TerminalModes. -func (t *Termios) ToSSH() map[uint8]uint32 { - sshModes := make(map[uint8]uint32, len(convertSSH)) - var flags uint32 - for sshID, tios := range convertSSH { - switch tios.tType { - case sshIflag: - flags = t.Iflag - case sshOflag: - flags = t.Oflag - case sshLflag: - flags = t.Lflag - case sshCflag: - flags = t.Cflag - case sshCchar: - sshModes[sshID] = uint32(t.Cc[tios.native]) - continue - case sshTspeed: - sshModes[sshTTYOPISPEED], sshModes[sshTTYOPOSPEED] = t.Ispeed, t.Ospeed - continue - default: - continue - } - var onOff uint32 - if tios.native&flags > 0 { - onOff = 1 - } - sshModes[sshID] = onOff - } - return sshModes -} - -// FromSSH converts SSH attributes to Termios attributes. -func (t *Termios) FromSSH(termModes map[uint8]uint32) { - var flags *uint32 - for sshID, val := range termModes { - switch convertSSH[sshID].tType { - case sshIflag: - flags = &t.Iflag - case sshOflag: - flags = &t.Oflag - case sshLflag: - flags = &t.Lflag - case sshCflag: - flags = &t.Cflag - case sshCchar: - t.Cc[convertSSH[sshID].native] = byte(val) - continue - case sshTspeed: - if sshID == sshTTYOPISPEED { - t.Ispeed = val - } else { - t.Ospeed = val - } - continue - default: - continue - } - if val > 0 { - *flags |= convertSSH[sshID].native - } else { - *flags &^= convertSSH[sshID].native - } - } -} diff --git a/vendor/github.com/google/goterm/term/termios.go b/vendor/github.com/google/goterm/term/termios.go deleted file mode 100644 index e1aee994f0..0000000000 --- a/vendor/github.com/google/goterm/term/termios.go +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - - -/* -Package term implements a subset of the C termios library to interface with Terminals. - -This package allows the caller to get and set most Terminal capabilites -and sizes as well as create PTYs to enable writing things like script, -screen, tmux, and expect. - -The Termios type is used for setting/getting Terminal capabilities while -the PTY type is used for handling virtual terminals. - -Currently this part of this lib is Linux specific. - -Also implements a simple version of readline in pure Go and some Stringers -for terminal colors and attributes. -*/ -package term - -import ( - "errors" - "os" - "strconv" - "strings" - "syscall" - "unsafe" -) - -// IOCTL terminal stuff. -const ( - TCGETS = 0x5401 // TCGETS get terminal attributes - TCSETS = 0x5402 // TCSETS set terminal attributes - TIOCGWINSZ = 0x5413 // TIOCGWINSZ used to get the terminal window size - TIOCSWINSZ = 0x5414 // TIOCSWINSZ used to set the terminal window size - TIOCGPTN = 0x80045430 // TIOCGPTN IOCTL used to get the PTY number - TIOCSPTLCK = 0x40045431 // TIOCSPTLCK IOCT used to lock/unlock PTY - CBAUD = 0010017 // CBAUD Serial speed settings - CBAUDEX = 0010000 // CBAUDX Serial speed settings -) - -// INPUT handling terminal flags -// see 'man stty' for further info about most of the constants -const ( - IGNBRK = 0000001 // IGNBRK ignore break characters - BRKINT = 0000002 // BRKINT Break genereates an interrupt signal - IGNPAR = 0000004 // IGNPAR Ignore characters with parity errors - PARMRK = 0000010 // PARMRK Mark parity errors byte{ff,0} - INPCK = 0000020 // INPCK enable parity checking - ISTRIP = 0000040 // ISTRIP Clear 8th bit of input characters - INLCR = 0000100 // INLCR Translate LF => CR - IGNCR = 0000200 // IGNCR Ignore Carriage Return - ICRNL = 0000400 // ICRNL Translate CR => NL - IUCLC = 0001000 // IUCLC Translate uppercase to lowercase - IXON = 0002000 // IXON Enable flow control - IXANY = 0004000 // IXANY let any char restart input - IXOFF = 0010000 // IXOFF start sending start/stop chars - IMAXBEL = 0020000 // IMAXBEL Sound the bell and skip flushing input buffer - IUTF8 = 0040000 // IUTF8 assume input being utf-8 -) - -// OUTPUT treatment terminal flags -const ( - OPOST = 0000001 // OPOST post process output - OLCUC = 0000002 // OLCUC translate lower case to upper case - ONLCR = 0000004 // ONLCR Map NL -> CR-NL - OCRNL = 0000010 // OCRNL Map CR -> NL - ONOCR = 0000020 // ONOCR No CR at col 0 - ONLRET = 0000040 // ONLRET NL also do CR - OFILL = 0000100 // OFILL Fillchar for delay - OFDEL = 0000200 // OFDEL use delete instead of null -) - -// TERM control modes. -const ( - CSIZE = 0000060 // CSIZE used as mask when setting character size - CS5 = 0000000 // CS5 char size 5bits - CS6 = 0000020 // CS6 char size 6bits - CS7 = 0000040 // CS7 char size 7bits - CS8 = 0000060 // CS8 char size 8bits - CSTOPB = 0000100 // CSTOPB two stop bits - CREAD = 0000200 // CREAD enable input - PARENB = 0000400 // PARENB generate and expect parity bit - PARODD = 0001000 // PARODD set odd parity - HUPCL = 0002000 // HUPCL send HUP when last process closes term - CLOCAL = 0004000 // CLOCAL no modem control signals -) - -// TERM modes -const ( - ISIG = 0000001 // ISIG enable Interrupt,quit and suspend chars - ICANON = 0000002 // ICANON enable erase,kill ,werase and rprnt chars - XCASE = 0000004 // XCASE preceedes all uppercase chars with '\' - ECHO = 0000010 // ECHO echo input characters - ECHOE = 0000020 // ECHOE erase => BS - SPACE - BS - ECHOK = 0000040 // ECHOK add newline after kill char - ECHONL = 0000100 // ECHONL echo NL even without other characters - NOFLSH = 0000200 // NOFLSH no flush after interrupt and kill characters - TOSTOP = 0000400 // TOSTOP stop BG jobs trying to write to term - ECHOCTL = 0001000 // ECHOCTL will echo control characters as ^c - ECHOPRT = 0002000 // ECHOPRT will print erased characters between \ / - ECHOKE = 0004000 // ECHOKE kill all line considering ECHOPRT and ECHOE flags - IEXTEN = 0100000 // IEXTEN enable non POSIX special characters -) - -// Control characters -const ( - VINTR = 0 // VINTR char will send an interrupt signal - VQUIT = 1 // VQUIT char will send a quit signal - VERASE = 2 // VEREASE char will erase last typed char - VKILL = 3 // VKILL char will erase current line - VEOF = 4 // VEOF char will send EOF - VTIME = 5 // VTIME set read timeout in tenths of seconds - VMIN = 6 // VMIN set min characters for a complete read - VSWTC = 7 // VSWTC char will switch to a different shell layer - VSTART = 8 // VSTART char will restart output after stopping it - VSTOP = 9 // VSTOP char will stop output - VSUSP = 10 // VSUSP char will send a stop signal - VEOL = 11 // VEOL char will end the line - VREPRINT = 12 // VREPRINT will redraw the current line - VDISCARD = 13 // VDISCARD - VWERASE = 14 // VWERASE char will erase last word typed - VLNEXT = 15 // VLNEXT char will enter the next char quoted - VEOL2 = 16 // VEOL2 char alternate to end line - tNCCS = 32 // tNCCS Termios CC size -) - -// Termios merge of the C Terminal and Kernel termios structs. -type Termios struct { - Iflag uint32 // Iflag Handles the different Input modes - Oflag uint32 // Oflag For the different Output modes - Cflag uint32 // Cflag Control modes - Lflag uint32 // Lflag Local modes - Line byte // Line - Cc [tNCCS]byte // Cc Control characters. How to handle special Characters eg. Backspace being ^H or ^? and so on - Ispeed uint32 // Ispeed Hardly ever used speed of terminal - Ospeed uint32 // Ospeed " - Wz Winsize // Wz Terminal size information. -} - -// Winsize handle the terminal window size. -type Winsize struct { - WsRow uint16 // WsRow Terminal number of rows - WsCol uint16 // WsCol Terminal number of columns - WsXpixel uint16 // WsXpixel Terminal width in pixels - WsYpixel uint16 // WsYpixel Terminal height in pixels -} - -// PTY the PTY Master/Slave are always bundled together so makes sense to bundle here too. -// -// Slave - implements the virtual terminal functionality and the place you connect client applications -// Master - Things written to the Master are forwarded to the Slave terminal and the other way around. -// This gives reading from Master would give you nice line-by-line with no strange characters in -// Cooked() Mode and every char in Raw() mode. -// -// Since Slave is a virtual terminal it depends on the terminal settings ( in this lib the Termios ) what -// and when data is forwarded through the terminal. -// -// See 'man pty' for further info -type PTY struct { - Master *os.File // Master The Master part of the PTY - Slave *os.File // Slave The Slave part of the PTY -} - -// Raw Sets terminal t to raw mode. -// This gives that the terminal will do the absolut minimal of processing, pretty much send everything through. -// This is normally what Shells and such want since they have their own readline and movement code. -func (t *Termios) Raw() { - t.Iflag &^= IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON - // t.Iflag &^= BRKINT | ISTRIP | ICRNL | IXON // Stevens RAW - t.Oflag &^= OPOST - t.Lflag &^= ECHO | ECHONL | ICANON | ISIG | IEXTEN - t.Cflag &^= CSIZE | PARENB - t.Cflag |= CS8 - t.Cc[VMIN] = 1 - t.Cc[VTIME] = 0 -} - -// Cook Set the Terminal to Cooked mode. -// In this mode the Terminal process the information before sending it on to the application. -func (t *Termios) Cook() { - t.Iflag |= BRKINT | IGNPAR | ISTRIP | ICRNL | IXON - t.Oflag |= OPOST - t.Lflag |= ISIG | ICANON -} - -// Sane reset Term to sane values. -// Should be pretty much what the shell command "reset" does to the terminal. -func (t *Termios) Sane() { - t.Iflag &^= IGNBRK | INLCR | IGNCR | IUTF8 | IXOFF | IUCLC | IXANY - t.Iflag |= BRKINT | ICRNL | IMAXBEL - t.Oflag |= OPOST | ONLCR - t.Oflag &^= OLCUC | OCRNL | ONOCR | ONLRET - t.Cflag |= CREAD -} - -// Set Sets terminal t attributes on file. -func (t *Termios) Set(file *os.File) error { - fd := file.Fd() - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(TCSETS), uintptr(unsafe.Pointer(t))) - if errno != 0 { - return errno - } - return nil -} - -// Attr Gets (terminal related) attributes from file. -func Attr(file *os.File) (Termios, error) { - var t Termios - fd := file.Fd() - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(TCGETS), uintptr(unsafe.Pointer(&t))) - if errno != 0 { - return t, errno - } - t.Ispeed &= CBAUD | CBAUDEX - t.Ospeed &= CBAUD | CBAUDEX - return t, nil -} - -// Isatty returns true if file is a tty. -func Isatty(file *os.File) bool { - _, err := Attr(file) - return err == nil -} - -// GetPass reads password from a TTY with no echo. -func GetPass(prompt string, f *os.File, pbuf []byte) ([]byte, error) { - t, err := Attr(f) - if err != nil { - return nil, err - } - defer t.Set(f) - noecho := t - noecho.Lflag = noecho.Lflag &^ ECHO - if err := noecho.Set(f); err != nil { - return nil, err - } - b := make([]byte, 1, 1) - i := 0 - if _, err := f.Write([]byte(prompt)); err != nil { - return nil, err - } - for ; i < len(pbuf); i++ { - if _, err := f.Read(b); err != nil { - b[0] = 0 - clearbuf(pbuf[:i+1]) - } - if b[0] == '\n' || b[0] == '\r' { - return pbuf[:i], nil - } - pbuf[i] = b[0] - b[0] = 0 - } - clearbuf(pbuf[:i+1]) - return nil, errors.New("ran out of bufferspace") -} - -// clearbuf clears out the buffer incase we couldn't read the full password. -func clearbuf(b []byte) { - for i := range b { - b[i] = 0 - } -} - -// GetChar reads a single byte. -func GetChar(f *os.File) (b byte, err error) { - bs := make([]byte, 1, 1) - if _, err = f.Read(bs); err != nil { - return 0, err - } - return bs[0], err -} - -// PTSName return the name of the pty. -func (p *PTY) PTSName() (string, error) { - n, err := p.PTSNumber() - if err != nil { - return "", err - } - return "/dev/pts/" + strconv.Itoa(int(n)), nil -} - -// PTSNumber return the pty number. -func (p *PTY) PTSNumber() (uint, error) { - var ptyno uint - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(p.Master.Fd()), uintptr(TIOCGPTN), uintptr(unsafe.Pointer(&ptyno))) - if errno != 0 { - return 0, errno - } - return ptyno, nil -} - -// Winsz Fetches the current terminal windowsize. -// example handling changing window sizes with PTYs: -// -// import "os" -// import "os/signal" -// -// var sig = make(chan os.Signal,2) // Channel to listen for UNIX SIGNALS on -// signal.Notify(sig, syscall.SIGWINCH) // That'd be the window changing -// -// for { -// <-sig -// term.Winsz(os.Stdin) // We got signaled our terminal changed size so we read in the new value -// term.Setwinsz(pty.Slave) // Copy it to our virtual Terminal -// } -func (t *Termios) Winsz(file *os.File) error { - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(file.Fd()), uintptr(TIOCGWINSZ), uintptr(unsafe.Pointer(&t.Wz))) - if errno != 0 { - return errno - } - return nil -} - -// Setwinsz Sets the terminal window size. -func (t *Termios) Setwinsz(file *os.File) error { - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(file.Fd()), uintptr(TIOCSWINSZ), uintptr(unsafe.Pointer(&t.Wz))) - if errno != 0 { - return errno - } - return nil -} - -// OpenPTY Creates a new Master/Slave PTY pair. -func OpenPTY() (*PTY, error) { - // Opening ptmx gives you the FD of a brand new PTY - master, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) - if err != nil { - return nil, err - } - - // unlock pty slave - var unlock int // 0 => Unlock - if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(master.Fd()), uintptr(TIOCSPTLCK), uintptr(unsafe.Pointer(&unlock))); errno != 0 { - master.Close() - return nil, errno - } - - // get path of pts slave - pty := &PTY{Master: master} - slaveStr, err := pty.PTSName() - if err != nil { - master.Close() - return nil, err - } - - // open pty slave - pty.Slave, err = os.OpenFile(slaveStr, os.O_RDWR|syscall.O_NOCTTY, 0) - if err != nil { - master.Close() - return nil, err - } - - return pty, nil -} - -// Close closes the PTYs that OpenPTY created. -func (p *PTY) Close() error { - slaveErr := errors.New("Slave FD nil") - if p.Slave != nil { - slaveErr = p.Slave.Close() - } - masterErr := errors.New("Master FD nil") - if p.Master != nil { - masterErr = p.Master.Close() - } - if slaveErr != nil || masterErr != nil { - var errs []string - if slaveErr != nil { - errs = append(errs, "Slave: "+slaveErr.Error()) - } - if masterErr != nil { - errs = append(errs, "Master: "+masterErr.Error()) - } - return errors.New(strings.Join(errs, " ")) - } - return nil -} - -// ReadByte implements the io.ByteReader interface to read single char from the PTY. -func (p *PTY) ReadByte() (byte, error) { - bs := make([]byte, 1, 1) - _, err := p.Master.Read(bs) - return bs[0], err -} - -// GetChar fine old getchar() for a PTY. -func (p *PTY) GetChar() (byte, error) { - return p.ReadByte() -} diff --git a/vendor/github.com/hugelgupf/vmtest/vminit/shutdownafter/.gitignore b/vendor/github.com/hugelgupf/vmtest/vminit/shutdownafter/.gitignore new file mode 100644 index 0000000000..9283274e49 --- /dev/null +++ b/vendor/github.com/hugelgupf/vmtest/vminit/shutdownafter/.gitignore @@ -0,0 +1 @@ +shutdownafter diff --git a/vendor/github.com/hugelgupf/vmtest/vminit/shutdownafter/main_linux.go b/vendor/github.com/hugelgupf/vmtest/vminit/shutdownafter/main_linux.go new file mode 100644 index 0000000000..21fad68609 --- /dev/null +++ b/vendor/github.com/hugelgupf/vmtest/vminit/shutdownafter/main_linux.go @@ -0,0 +1,36 @@ +// Copyright 2024 the u-root Authors. All rights reserved +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Command shutdownafter runs a command given in args and shuts down. +package main + +import ( + "flag" + "log" + "os" + "os/exec" + + "golang.org/x/sys/unix" +) + +func run() error { + args := flag.Args() + if len(args) == 0 { + return nil + } + c := exec.Command(args[0], args[1:]...) + c.Stdout, c.Stderr = os.Stdout, os.Stderr + return c.Run() +} + +func main() { + flag.Parse() + if err := run(); err != nil { + log.Printf("Failed: %v", err) + } + + if err := unix.Reboot(unix.LINUX_REBOOT_CMD_POWER_OFF); err != nil { + log.Fatalf("Failed to shutdown: %v", err) + } +} diff --git a/vendor/github.com/hugelgupf/vmtest/vminit/vmmount/.gitignore b/vendor/github.com/hugelgupf/vmtest/vminit/vmmount/.gitignore new file mode 100644 index 0000000000..926e9c9487 --- /dev/null +++ b/vendor/github.com/hugelgupf/vmtest/vminit/vmmount/.gitignore @@ -0,0 +1 @@ +vmmount diff --git a/vendor/github.com/hugelgupf/vmtest/vminit/vmmount/main_linux.go b/vendor/github.com/hugelgupf/vmtest/vminit/vmmount/main_linux.go new file mode 100644 index 0000000000..1b97789054 --- /dev/null +++ b/vendor/github.com/hugelgupf/vmtest/vminit/vmmount/main_linux.go @@ -0,0 +1,56 @@ +// Copyright 2024 the u-root Authors. All rights reserved +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Command vmmount mounts 9P directories as defined by env vars, runs a +// command, and unmounts them. +// +// The 9P directories are mounted via virtio; their tags are derived from any +// env var that matches VMTEST_MOUNT9P_*=$tag. The mount location is +// /mount/9p/$tag. +package main + +import ( + "flag" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/hugelgupf/vmtest/guest" +) + +func run() error { + for _, v := range os.Environ() { + if !strings.HasPrefix(v, "VMTEST_MOUNT9P_") { + continue + } + + e := strings.SplitN(v, "=", 2) + mp, err := guest.Mount9PDir(filepath.Join("/mount/9p", e[1]), e[1]) + if err != nil { + log.Printf("Tried to mount 9P tag %s at /mount/9p/%s: %v", e[1], e[1], err) + } + defer func() { + if err := mp.Unmount(0); err != nil { + log.Printf("Failed to unmount: %v", err) + } + }() + } + + args := flag.Args() + if len(args) == 0 { + return nil + } + c := exec.Command(args[0], args[1:]...) + c.Stdout, c.Stderr = os.Stdout, os.Stderr + return c.Run() +} + +func main() { + flag.Parse() + if err := run(); err != nil { + log.Printf("Failed: %v", err) + } +} diff --git a/vendor/github.com/u-root/gobusybox/src/pkg/bb/bb.go b/vendor/github.com/u-root/gobusybox/src/pkg/bb/bb.go index 224595bc95..7ddeb54d83 100644 --- a/vendor/github.com/u-root/gobusybox/src/pkg/bb/bb.go +++ b/vendor/github.com/u-root/gobusybox/src/pkg/bb/bb.go @@ -25,34 +25,21 @@ package bb import ( "fmt" - "io" "io/ioutil" "os" - "path" "path/filepath" - "regexp" "strings" - "github.com/google/goterm/term" - "golang.org/x/mod/modfile" + "golang.org/x/exp/maps" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" "github.com/u-root/gobusybox/src/pkg/bb/bbinternal" "github.com/u-root/gobusybox/src/pkg/bb/findpkg" "github.com/u-root/gobusybox/src/pkg/golang" - "github.com/u-root/uio/cp" "github.com/u-root/uio/ulog" ) -func listStrings(m map[string]struct{}) []string { - var l []string - for k := range m { - l = append(l, k) - } - return l -} - func checkDuplicate(cmds []*bbinternal.Package) error { seen := make(map[string]string) for _, cmd := range cmds { @@ -98,11 +85,6 @@ type Opts struct { // compiles the busybox binary. GoBuildOpts *golang.BuildOpts - // AllowMixedMode allows mixed mode (module / non-module) compilation. - // - // If this is done with GO111MODULE=on, - AllowMixedMode bool - // Generate the tree but don't build it. This is useful for systems // like Tamago which have their own way of building. GenerateOnly bool @@ -197,8 +179,8 @@ func BuildBusybox(l ulog.Logger, opts *Opts) (nerr error) { numNoModule++ } } - if !opts.AllowMixedMode && len(modules) > 0 && numNoModule > 0 { - return fmt.Errorf("gobusybox does not support mixed module/non-module compilation -- commands contain main modules %v", strings.Join(listStrings(modules), ", ")) + if len(modules) > 0 && numNoModule > 0 { + return fmt.Errorf("gobusybox does not support mixed module/non-module compilation -- commands contain main modules %v", strings.Join(maps.Keys(modules), ", ")) } // List of packages to import in the real main file. @@ -214,7 +196,7 @@ func BuildBusybox(l ulog.Logger, opts *Opts) (nerr error) { } // Collect and write dependencies into pkgDir. - if err := copyLocalDeps(l, opts.Env, bbDir, tmpDir, pkgDir, cmds); err != nil { + if err := copyAllDeps(l, opts.Env, bbDir, tmpDir, pkgDir, cmds); err != nil { return fmt.Errorf("collecting and putting dependencies in place failed: %v", err) } @@ -222,80 +204,37 @@ func BuildBusybox(l ulog.Logger, opts *Opts) (nerr error) { return fmt.Errorf("failed to write main.go: %v", err) } - // Get ready to compile bb. - if opts.Env.GO111MODULE == "off" || numNoModule > 0 { - opts.Env.GOPATH = tmpDir - } else { - // Run go mod tidy in order to get the go.sum file - // sorted. This likely requires people to be online. - // - // Sorting a go.sum file seems to be required now in order to - // get builds to work. Sorting is only necessary when we merge - // more than one go.sum file (i.e. we are compiling commands - // from more than one module, e.g. u-root and u-bmc). - // - // This can only be done after writeBBMain, as it reads what - // main.go depends on and prunes everything that isn't needed. - cmd := opts.Env.GoCmd("mod", "tidy") - cmd.Dir = bbDir - if o, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("running `go mod tidy` on the generated busybox main package failed (%v): %s", err, o) - } - } - if opts.GenerateOnly { return nil } - // Compile bb. - if err := opts.Env.BuildDir(bbDir, opts.BinaryPath, opts.GoBuildOpts); err != nil { - if opts.Env.GO111MODULE == "off" || numNoModule > 0 { - return &ErrGopathBuild{ - CmdDir: bbDir, - GOPATH: tmpDir, - Err: err, - } - } else { - return &ErrModuleBuild{ - CmdDir: bbDir, - Err: err, - } + // Get ready to compile bb. + buildEnv := opts.Env.Copy(golang.WithGO111MODULE("off"), golang.WithGOPATH(tmpDir), golang.WithMod("")) + if err := buildEnv.BuildDir(bbDir, opts.BinaryPath, opts.GoBuildOpts); err != nil { + return &ErrBuild{ + CmdDir: bbDir, + GOPATH: tmpDir, + Err: err, } } return nil } -// ErrModuleBuild is returned for a go build failure when modules were enabled. -type ErrModuleBuild struct { - CmdDir string - Err error -} - -// Unwrap implements error.Unwrap. -func (e *ErrModuleBuild) Unwrap() error { - return e.Err -} - -// Error implements error.Error. -func (e *ErrModuleBuild) Error() string { - return fmt.Sprintf("go build with modules failed: %v", e.Err) -} - -// ErrGopathBuild is returned for a go build failure when modules were disabled. -type ErrGopathBuild struct { +// ErrBuild is returned for a go build failure when modules were disabled. +type ErrBuild struct { CmdDir string GOPATH string Err error } // Unwrap implements error.Unwrap. -func (e *ErrGopathBuild) Unwrap() error { +func (e *ErrBuild) Unwrap() error { return e.Err } // Error implements error.Error. -func (e *ErrGopathBuild) Error() string { - return fmt.Sprintf("non-module go build failed: %v", e.Err) +func (e *ErrBuild) Error() string { + return fmt.Sprintf("`(cd %s && GOPATH=%s GO111MODULE=off go build)` failed: %v", e.CmdDir, e.GOPATH, e.Err) } // writeBBMain writes $TMPDIR/src/bb.u-root.com/bb/pkg/bbmain/register.go and @@ -339,293 +278,16 @@ func writeBBMain(bbDir, tmpDir string, bbImports []string) error { return nil } -func isReplacedModuleLocal(m *packages.Module) bool { - // From "replace directive": https://golang.org/ref/mod#go - // - // If the path on the right side of the arrow is an absolute or - // relative path (beginning with ./ or ../), it is interpreted as the - // local file path to the replacement module root directory, which - // must contain a go.mod file. The replacement version must be - // omitted in this case. - return strings.HasPrefix(m.Path, "./") || strings.HasPrefix(m.Path, "../") || strings.HasPrefix(m.Path, "/") -} - -// copyLocalGoMods copies the go.sum and go.mod of all modules that are locally -// present in the file system into the right place in the generated tree. -func copyLocalGoMods(pkgDir, bbDir string, modules map[string]*packages.Module) error { - copyGoMod := func(mod *packages.Module) error { - if mod == nil { - return nil - } - - if err := os.MkdirAll(filepath.Join(pkgDir, mod.Path), 0755); os.IsExist(err) { - return nil - } else if err != nil { - return err - } - - // Use the module file for all outside dependencies. - if err := cp.Copy(mod.GoMod, filepath.Join(pkgDir, mod.Path, "go.mod")); err != nil { - return err - } - - // As of Go 1.16, the Go build system expects an accurate - // go.sum in the main module directory. We build it by - // concatenating all constituent go.sums. - // - // If it doesn't exist, that's okay! - gosum := filepath.Join(filepath.Dir(mod.GoMod), "go.sum") - if err := cp.Copy(gosum, filepath.Join(pkgDir, mod.Path, "go.sum")); os.IsNotExist(err) { - // Modules without dependencies don't have or need a go.sum. - return nil - } else if err != nil { - return err - } - - gosumf, err := os.Open(gosum) - if err != nil { - return err - } - defer gosumf.Close() - f, err := os.OpenFile(filepath.Join(bbDir, "go.sum"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0755) - if err != nil { - return err - } - defer f.Close() - _, err = io.Copy(f, gosumf) - return err - } - - for modPath, mod := range modules { - if err := copyGoMod(mod); err != nil { - return fmt.Errorf("failed to copy go.mod for %s: %v", modPath, err) - } - } - return nil -} - -// findLocalModules finds all modules that are locally present on the file -// system and raises an error if any modules dependencies conflict with each -// other (e.g. because one module requests a different `replace` dependency -// than another). Conflicts generally only arise when a module is replaced by a -// local directory, as Go takes care of other conflicts using -// minimum-version-selection (MVS). -func findLocalModules(l ulog.Logger, mainPkgs []*bbinternal.Package) (map[string]*packages.Module, error) { - type localModule struct { - m *packages.Module - provenance string - } - - localModules := make(map[string]*localModule) - - // These are all modules that the user requested to compile commands - // from. They are assumed to be local directories. - for _, p := range mainPkgs { - if p.Pkg.Module != nil { - if _, ok := localModules[p.Pkg.Module.Path]; !ok { - localModules[p.Pkg.Module.Path] = &localModule{ - m: p.Pkg.Module, - provenance: p.Pkg.PkgPath, - } - } - } - } - - // This finds all modules that are `replace`d in go.mod files of the - // commands. - for _, p := range mainPkgs { - replacedModules := locallyReplacedModules(p.Pkg) - for modPath, module := range replacedModules { - if original, ok := localModules[modPath]; ok { - // Is this module different from one that a - // previous definition provided? - // - // This can happen if: - // - there are 2 modules that have conflicting - // replace directives: - // replace u-root => ../foo - // replace u-root => ../bar - // - // - there is a module that has a replace - // directive that doesn't match the directory - // the user requested commands from, e.g. - // ./makebb ~/u-root/cmds/core/ip ~/cpu/cmds/cpu - // but cpu/go.mod has - // replace u-root => ../foobar (which is not ~/u-root!) - // - // TODO: write a pretty log message for the - // user with a suggestion of what to do. - if original.m.Dir != module.Dir { - return nil, fmt.Errorf("two conflicting versions of module %s have been requested; one from %s, the other from %s's go.mod", - modPath, original.provenance, p.Pkg.Module.Path) - } - } else { - localModules[modPath] = &localModule{ - m: module, - provenance: fmt.Sprintf("%s's go.mod (%s)", p.Pkg.Module.Path, p.Pkg.Module.GoMod), - } - } - } - } - - // Look for versioning conflicts between all modules. - // - // Go through the entire dependency graph of every command the user - // requested. Every dependency has a version through go.mod files, and - // that version number may conflict with either a replace directive or - // the fact that the user requested to compile a command from the - // dependency module. - // - // E.g. if u-bmc depends on u-root @ v0.8.0, but we are also compiling - // u-root from a local directory, those are conflicting requirements. - var conflict bool - - // seen is a map of user-requested-command-module => - // local-dependency-module combination that a warning has already been - // printed about. - seen := map[string]map[string]struct{}{} - - for _, mainPkg := range mainPkgs { - // Initialize the inner seen map. - if mainPkg.Pkg.Module != nil { - if _, ok := seen[mainPkg.Pkg.Module.Path]; !ok { - seen[mainPkg.Pkg.Module.Path] = map[string]struct{}{} - } - } - - // Visit visits all packages in the dependency graph of the named package. - packages.Visit([]*packages.Package{mainPkg.Pkg}, nil, func(p *packages.Package) { - if p.Module == nil { - return - } - if _, ok := seen[mainPkg.Pkg.Module.Path][p.Module.Path]; ok { - return - } - - if lm, ok := localModules[p.Module.Path]; ok && lm.m.Dir != p.Module.Dir { - gbbstrict, set := os.LookupEnv("GBB_STRICT") - if set == false { - l.Printf("GBB_STRICT is not set.") - } - if gbbstrict != "1" { - if p.Module.Version == "" || lm.m.Version == "" || (p.Module.Replace != nil && isReplacedModuleLocal(p.Module.Replace)) || (lm.m.Replace != nil && isReplacedModuleLocal(lm.m.Replace)) { - seen[mainPkg.Pkg.Module.Path][p.Module.Path] = struct{}{} - l.Printf("[WARNING] %s depends on %s @ %s\n", mainPkg.Pkg.PkgPath, p.Module.Path, moduleVersionIdentifier(p.Module)) - l.Printf("\tUsing %s @ %s to build it.", lm.m.Path, moduleVersionIdentifier(lm.m)) - return - } - } - fmt.Fprintln(os.Stderr, "") - l.Printf("Conflicting module dependencies on %s:", p.Module.Path) - l.Printf(" %s depends on %s @ %s", mainPkg.Pkg.PkgPath, p.Module.Path, moduleVersionIdentifier(p.Module)) - l.Printf(" %s depends on %s @ %s", lm.provenance, lm.m.Path, moduleVersionIdentifier(lm.m)) - replacePath, err := filepath.Rel(mainPkg.Pkg.Module.Dir, lm.m.Dir) - if err != nil { - replacePath = lm.m.Dir - } - fmt.Fprintln(os.Stderr, "") - l.Printf("%s: add `replace %s => %s` to %s", term.Bold("Suggestion to resolve"), p.Module.Path, replacePath, mainPkg.Pkg.Module.GoMod) - fmt.Fprintln(os.Stderr, "") - conflict = true - - // Don't print this particular warning combo again. - seen[mainPkg.Pkg.Module.Path][p.Module.Path] = struct{}{} - } - }) - } - if conflict { - return nil, fmt.Errorf("conflicting module dependencies found") - } - - modules := make(map[string]*packages.Module) - for modPath, mod := range localModules { - modules[modPath] = mod.m - } - return modules, nil -} - -func moduleVersionIdentifier(m *packages.Module) string { - // This module was one of the commands requested by the user, and hence was a local directory. - if m.Version == "" { - return fmt.Sprintf("directory %s", m.Dir) - } - - // This module was replaced by some dependency to a local directory. - if m.Replace != nil && isReplacedModuleLocal(m.Replace) { - return fmt.Sprintf("directory %s", m.Replace.Path) - } - - return fmt.Sprintf("version %s", m.Version) -} - -// copyLocalDeps tries to suss out local files that need to be in the generated tree. -// -// It copies files from all dependency packages and modules that need to be in -// the generated tree, but NOT for the main commands that are going to be -// rewritten. -// -// It helps to have read https://golang.org/ref/mod when editing this function. -// -// Module-enabled Go programs resolve their dependencies in one of two ways: -// -// - versioned dependencies: via a version control system at a specific -// version, potentially remotely downloaded -// -// - locally: a module that is either `replace`d with a local file system -// directory, or a command that is being built from a module that is on the -// local file system (e.g. ./makebb ../u-root/cmds/core/ip -- here, ../u-root -// will be a local directory module) -// -// Go minimum version selection (MVS) will take care of all versioned -// dependencies on its own. -// -// *We* have to take care of all files that are in local modules: everything -// required for compilation of the gobusybox within local modules has to be -// copied to the generated tree. -// -// For local dependencies, we copy all dependency packages' files over, as well -// as the local modules' go.sum and go.mod files. -// -// Then, in the generated tree's main module, we create a go.mod file with -// replace directives for all the local modules we just copied over. -func copyLocalDeps(l ulog.Logger, env *golang.Environ, bbDir, tmpDir, pkgDir string, mainPkgs []*bbinternal.Package) error { - localModules, err := findLocalModules(l, mainPkgs) - if err != nil { - return err - } - // Copy go.sum and go.mod files for all local modules to the generated tree. - if err := copyLocalGoMods(pkgDir, bbDir, localModules); err != nil { - return err - } - - // Find all packages that need to be copied over to the generated tree. - // - // This is going to be all the source code that - // (a) a requested command depends on (one of mainPkgs), AND - // (b) that is in one of the localModules. - var localDepPkgs []*packages.Package +func copyAllDeps(l ulog.Logger, env *golang.Environ, bbDir, tmpDir, pkgDir string, mainPkgs []*bbinternal.Package) error { + var deps []*packages.Package for _, p := range mainPkgs { - // Find all dependency packages that are *within* module boundaries for this package. - localDeps := collectDeps(p.Pkg, localModules) - localDepPkgs = append(localDepPkgs, localDeps...) + deps = append(deps, collectDeps(p.Pkg)...) } - // TODO(chrisko): We need to go through mainPkgs Module definitions to - // find exclude and replace directives, which only have an effect in - // the main module's go.mod, which will be the top-level go.mod we - // write. - // - // mainPkgs module files expect to be "the main module", since those - // are where Go compilation would normally occur. - // - // The top-level go.mod must have copies of the mainPkgs' modules' - // replace and exclude directives. If they conflict, we need to have a - // legible error message for the user. - // Copy local dependency packages into module directories at // tmpDir/src. seenIDs := make(map[string]struct{}) - for _, p := range localDepPkgs { + for _, p := range deps { if _, ok := seenIDs[p.ID]; !ok { if err := bbinternal.WritePkg(p, filepath.Join(pkgDir, p.PkgPath)); err != nil { return fmt.Errorf("writing package %s failed: %v", p, err) @@ -633,71 +295,9 @@ func copyLocalDeps(l ulog.Logger, env *golang.Environ, bbDir, tmpDir, pkgDir str seenIDs[p.ID] = struct{}{} } } - - // Avoid go.mod in the case of GO111MODULE=(auto|off) if there are no modules. - if env.GO111MODULE == "on" || len(localModules) > 0 { - // go.mod for the bb binary. - // - // Add local replace rules for all modules we're compiling. - // - // This is the only way to locally reference another modules' - // repository. Otherwise, go'll try to go online to get the source. - // - // The module name is something that'll never be online, lest Go - // decides to go on the internet. - var mod modfile.File - - mod.AddModuleStmt("bb.u-root.com/bb") - for mpath, module := range localModules { - v := module.Version - if len(v) == 0 { - // When we don't know the version, we can plug - // in a "Go-generated" version number to get - // past the validation in the compiler. - // - // We don't always do this because if the - // module path has a /v2 or /v3, Go expects the - // version number to match. So we use the - // module.Version when available, because it's - // the most accurate thing. - v = "v0.0.0" - } - if err := mod.AddRequire(mpath, v); err != nil { - return fmt.Errorf("could not add requiring %v to go.mod: %v", mpath, err) - } - if err := mod.AddReplace(mpath, "", path.Join("..", "..", mpath), ""); err != nil { - return fmt.Errorf("could not add replace rule for %v to go.mod: %v", mpath, err) - } - } - - gomod, err := mod.Format() - if err != nil { - return fmt.Errorf("could not generated go.mod: %v", err) - } - - // TODO(chrisko): add other go.mod files' replace and exclude - // directives. - // - // Warn the user if they are potentially incompatible. - if err := ioutil.WriteFile(filepath.Join(bbDir, "go.mod"), gomod, 0755); err != nil { - return err - } - return nil - } return nil } -func versionNum(mpath string) string { - last := path.Base(mpath) - if len(last) == 0 { - return "v0" - } - if matched, _ := regexp.Match("v[0-9]+", []byte(last)); matched { - return last - } - return "v0" -} - // deps recursively iterates through imports and returns the set of packages // for which filter returns true. func deps(p *packages.Package, filter func(p *packages.Package) bool) []*packages.Package { @@ -710,43 +310,7 @@ func deps(p *packages.Package, filter func(p *packages.Package) bool) []*package return pkgs } -func locallyReplacedModules(p *packages.Package) map[string]*packages.Module { - if p.Module == nil { - return nil - } - - m := make(map[string]*packages.Module) - // Collect all "local" dependency packages that are in `replace` - // directives somewhere, to be copied into the temporary directory - // structure later. - packages.Visit([]*packages.Package{p}, nil, func(pkg *packages.Package) { - if pkg.Module != nil && pkg.Module.Replace != nil && isReplacedModuleLocal(pkg.Module.Replace) { - m[pkg.Module.Path] = pkg.Module - } - }) - return m -} - -func collectDeps(p *packages.Package, localModules map[string]*packages.Module) []*packages.Package { - if p.Module != nil { - // Collect all "local" dependency packages, to be copied into - // the temporary directory structure later. - return deps(p, func(pkg *packages.Package) bool { - // Replaced modules can be local on the file system. - if pkg.Module != nil && pkg.Module.Replace != nil && isReplacedModuleLocal(pkg.Module.Replace) { - return true - } - - // Is this a dependency within a local module? - for modulePath := range localModules { - if strings.HasPrefix(pkg.PkgPath, modulePath) { - return true - } - } - return false - }) - } - +func collectDeps(p *packages.Package) []*packages.Package { // If modules are not enabled, we need a copy of *ALL* // non-standard-library dependencies in the temporary directory. return deps(p, func(pkg *packages.Package) bool { diff --git a/vendor/github.com/u-root/gobusybox/src/pkg/bb/findpkg/bb.go b/vendor/github.com/u-root/gobusybox/src/pkg/bb/findpkg/bb.go index 822b004511..37c55b1d3d 100644 --- a/vendor/github.com/u-root/gobusybox/src/pkg/bb/findpkg/bb.go +++ b/vendor/github.com/u-root/gobusybox/src/pkg/bb/findpkg/bb.go @@ -4,6 +4,9 @@ // Package findpkg finds packages from user-input strings that are either file // paths or Go package paths. +// +// findpkg supports globs and exclusions in addition to the normal `go list` +// syntax, as described in the [NewPackage] documentation. package findpkg import ( @@ -19,120 +22,9 @@ import ( "github.com/u-root/gobusybox/src/pkg/golang" "github.com/u-root/uio/ulog" "golang.org/x/tools/go/packages" + "mvdan.cc/sh/v3/shell" ) -// modules returns a list of module directories => directories of packages -// inside that module as well as packages that have no discernible module. -// -// The module for a package is determined by the **first** parent directory -// that contains a go.mod. -func modules(filesystemPaths []string) (map[string][]string, []string) { - // list of module directory => directories of packages it likely contains - moduledPackages := make(map[string][]string) - var noModulePkgs []string - for _, fullPath := range filesystemPaths { - components := strings.Split(fullPath, "/") - - inModule := false - for i := len(components); i >= 1; i-- { - prefixPath := "/" + filepath.Join(components[:i]...) - if _, err := os.Stat(filepath.Join(prefixPath, "go.mod")); err == nil { - moduledPackages[prefixPath] = append(moduledPackages[prefixPath], fullPath) - inModule = true - break - } - } - if !inModule { - noModulePkgs = append(noModulePkgs, fullPath) - } - } - return moduledPackages, noModulePkgs -} - -func loadRelative(moduleDir string, pkgDirs []string, loadFunc func(moduleDir string, dirs []string) error) error { - // Make all paths relative, because packages.Load/`go list -json` does - // not like absolute paths when what's being looked up is outside of - // GOPATH. It's fine though when those paths are relative to the PWD. - // Don't ask me why... - // - // E.g. - // * package $HOME/u-root/cmds/core/ip: -: import "$HOME/u-root/cmds/core/ip": cannot import absolute path - var relPkgDirs []string - for _, pkgDir := range pkgDirs { - relPkgDir, err := filepath.Rel(moduleDir, pkgDir) - if err != nil { - return fmt.Errorf("Go package path %s is not relative to directory %s: %v", pkgDir, moduleDir, err) - } - - // N.B. `go list -json cmd/foo` is not the same as `go list -json ./cmd/foo`. - // - // The former looks for cmd/foo in $GOROOT or $GOPATH, while - // the latter looks in the relative directory ./cmd/foo. - relPkgDirs = append(relPkgDirs, "./"+relPkgDir) - } - return loadFunc(moduleDir, relPkgDirs) -} - -// Find each packages' module, and batch package queries together by module. -// -// Query all packages that don't have a module at all together, as well. -// -// Batching these queries saves a *lot* of time; on the order of -// several minutes for 30+ commands. -func batchFSPackages(l ulog.Logger, absPaths []string, loadFunc func(moduleDir string, dirs []string) error) error { - mods, noModulePkgDirs := modules(absPaths) - - for moduleDir, pkgDirs := range mods { - if err := loadRelative(moduleDir, pkgDirs, loadFunc); err != nil { - return err - } - } - - if len(noModulePkgDirs) > 0 { - if err := loadRelative(noModulePkgDirs[0], noModulePkgDirs, loadFunc); err != nil { - return err - } - } - return nil -} - -// We look up file system paths differently, because there is a big difference between -// -// go list -json ../../foobar -// -// and -// -// (cd ../../foobar && go list -json .) -// -// Namely, PWD determines which go.mod to use. We want each -// package to use its own go.mod, if it has one. -// -// The easiest implementation would be to do (cd $packageDir && go list -json -// .), however doing that N times is very expensive -- takes several minutes -// for 30 packages. So here, we figure out every module involved and do one -// query per module and one query for everything that isn't in a module. -func batchLoadFSPackages(l ulog.Logger, env *golang.Environ, absPaths []string) ([]*packages.Package, error) { - var allps []*packages.Package - - err := batchFSPackages(l, absPaths, func(moduleDir string, packageDirs []string) error { - pkgs, err := loadPkgs(env, moduleDir, packageDirs...) - if err != nil { - return fmt.Errorf("could not find packages in module %s: %v", moduleDir, err) - } - for _, pkg := range pkgs { - allps, err = addPkg(l, allps, pkg) - if err != nil { - return err - } - } - return nil - }) - if err != nil { - return nil, err - } - return allps, nil -} - func addPkg(l ulog.Logger, plist []*packages.Package, p *packages.Package) ([]*packages.Package, error) { if len(p.Errors) > 0 { var merr error @@ -147,9 +39,6 @@ func addPkg(l ulog.Logger, plist []*packages.Package, p *packages.Package) ([]*p } func newPackages(l ulog.Logger, genv *golang.Environ, env Env, patterns ...string) ([]*packages.Package, error) { - var goImportPaths []string - var filesystemPaths []string - // Two steps: // // 1. Resolve globs, filter packages with build constraints. @@ -166,36 +55,18 @@ func newPackages(l ulog.Logger, genv *golang.Environ, env Env, patterns ...strin } // Step 2. - for _, name := range paths { - // ResolveGlobs returns either an absolute file system path or - // a Go import path. - if strings.HasPrefix(name, "/") { - filesystemPaths = append(filesystemPaths, name) - } else { - goImportPaths = append(goImportPaths, name) - } + importPkgs, err := loadPkgs(genv, paths...) + if err != nil { + return nil, fmt.Errorf("failed to load package %v: %v", paths, err) } - - var ps []*packages.Package - if len(goImportPaths) > 0 { - importPkgs, err := loadPkgs(genv, env.WorkingDirectory, goImportPaths...) + var pkgs []*packages.Package + for _, p := range importPkgs { + pkgs, err = addPkg(l, pkgs, p) if err != nil { - return nil, fmt.Errorf("failed to load package %v: %v", goImportPaths, err) - } - for _, p := range importPkgs { - ps, err = addPkg(l, ps, p) - if err != nil { - return nil, err - } + return nil, err } } - - pkgs, err := batchLoadFSPackages(l, genv, filesystemPaths) - if err != nil { - return nil, fmt.Errorf("could not load packages from file system: %v", err) - } - ps = append(ps, pkgs...) - return ps, nil + return pkgs, nil } // NewPackages collects package metadata about all named packages. @@ -206,6 +77,10 @@ func newPackages(l ulog.Logger, genv *golang.Environ, env Env, patterns ...strin // constraints in env and logs a "Skipping package {}" statement about such // directories/packages. // +// All given names have to be resolvable by GOPATH or by Go modules. Generally, +// if `go list ` works, it should work here. (Except go list does not +// support globs or exclusions.) +// // Allowed formats for names: // // - relative and absolute paths including globs following Go's @@ -262,45 +137,9 @@ func NewPackages(l ulog.Logger, genv *golang.Environ, env Env, names ...string) return ips, nil } -func loadPkgs(env *golang.Environ, dir string, patterns ...string) ([]*packages.Package, error) { +func loadPkgs(env *golang.Environ, patterns ...string) ([]*packages.Package, error) { mode := packages.NeedName | packages.NeedImports | packages.NeedFiles | packages.NeedDeps | packages.NeedTypes | packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedCompiledGoFiles | packages.NeedModule | packages.NeedEmbedFiles - return env.Lookup(mode, dir, patterns...) -} - -func filterDirectoryPaths(l ulog.Logger, env *golang.Environ, directories []string, excludes []string) ([]string, error) { - // Eligibility check: does each directory contain files that are - // compilable under the current GOROOT/GOPATH/GOOS/GOARCH and build - // tags? - // - // We filter this out first, because while packages.Load will give us - // an error for this, it is not distinguishable from other errors. We - // would like to give only a warning for these. - // - // This eligibility check requires Go 1.15, as before Go 1.15 the - // package loader would return an error "cannot find package" for - // packages not meeting build constraints. - var allps []*packages.Package - err := batchFSPackages(l, directories, func(moduleDir string, packageDirs []string) error { - pkgs, err := lookupPkgNameAndFiles(env, moduleDir, packageDirs...) - if err != nil { - return fmt.Errorf("could not look up packages %q: %v", packageDirs, err) - } - allps = append(allps, pkgs...) - return nil - }) - if err != nil { - return nil, err - } - - eligiblePkgs, err := checkEligibility(l, allps) - if err != nil { - return nil, err - } - var paths []string - for _, p := range eligiblePkgs { - paths = append(paths, filepath.Dir(p.GoFiles[0])) - } - return excludePaths(paths, excludes), nil + return env.Lookup(mode, patterns...) } func checkEligibility(l ulog.Logger, pkgs []*packages.Package) ([]*packages.Package, error) { @@ -349,8 +188,8 @@ func excludePaths(paths []string, exclusions []string) []string { } // Just looking up the stuff that doesn't take forever to parse. -func lookupPkgNameAndFiles(env *golang.Environ, dir string, patterns ...string) ([]*packages.Package, error) { - return env.Lookup(packages.NeedName|packages.NeedFiles, dir, patterns...) +func lookupPkgNameAndFiles(env *golang.Environ, patterns ...string) ([]*packages.Package, error) { + return env.Lookup(packages.NeedName|packages.NeedFiles, patterns...) } func couldBeGlob(s string) bool { @@ -361,7 +200,7 @@ func couldBeGlob(s string) bool { // Go command paths. It may return a list that contains errors. // // Precondition: couldBeGlob(pattern) is true -func lookupPkgsWithGlob(env *golang.Environ, wd string, pattern string) ([]*packages.Package, error) { +func lookupPkgsWithGlob(env *golang.Environ, pattern string) ([]*packages.Package, error) { elems := strings.Split(pattern, "/") globIndex := 0 @@ -374,7 +213,7 @@ func lookupPkgsWithGlob(env *golang.Environ, wd string, pattern string) ([]*pack nonGlobPath := strings.Join(append(elems[:globIndex], "..."), "/") - pkgs, err := lookupPkgNameAndFiles(env, wd, nonGlobPath) + pkgs, err := lookupPkgNameAndFiles(env, nonGlobPath) if err != nil { return nil, fmt.Errorf("%q is neither package or path/glob -- could not lookup %q (import path globs have to be within modules): %v", pattern, nonGlobPath, err) } @@ -394,7 +233,7 @@ func lookupPkgsWithGlob(env *golang.Environ, wd string, pattern string) ([]*pack // lookupCompilablePkgsWithGlob resolves Go package path globs to a realized // list of Go command paths. It filters out packages that have no files // matching our build constraints and other errors. -func lookupCompilablePkgsWithGlob(l ulog.Logger, env *golang.Environ, wd string, patterns ...string) ([]string, error) { +func lookupCompilablePkgsWithGlob(l ulog.Logger, env *golang.Environ, patterns ...string) ([]string, error) { var pkgs []*packages.Package // Batching saves time. Patterns with globs cannot be batched. // @@ -404,7 +243,7 @@ func lookupCompilablePkgsWithGlob(l ulog.Logger, env *golang.Environ, wd string, var batchedPatterns []string for _, pattern := range patterns { if couldBeGlob(pattern) { - ps, err := lookupPkgsWithGlob(env, wd, pattern) + ps, err := lookupPkgsWithGlob(env, pattern) if err != nil { return nil, err } @@ -414,7 +253,7 @@ func lookupCompilablePkgsWithGlob(l ulog.Logger, env *golang.Environ, wd string, } } if len(batchedPatterns) > 0 { - ps, err := lookupPkgNameAndFiles(env, wd, batchedPatterns...) + ps, err := lookupPkgNameAndFiles(env, batchedPatterns...) if err != nil { return nil, err } @@ -432,13 +271,13 @@ func lookupCompilablePkgsWithGlob(l ulog.Logger, env *golang.Environ, wd string, return paths, nil } -func filterGoPaths(l ulog.Logger, env *golang.Environ, wd string, gopathIncludes, gopathExcludes []string) ([]string, error) { - goInc, err := lookupCompilablePkgsWithGlob(l, env, wd, gopathIncludes...) +func filterGoPaths(l ulog.Logger, env *golang.Environ, gopathIncludes, gopathExcludes []string) ([]string, error) { + goInc, err := lookupCompilablePkgsWithGlob(l, env, gopathIncludes...) if err != nil { return nil, err } - goExc, err := lookupCompilablePkgsWithGlob(l, env, wd, gopathExcludes...) + goExc, err := lookupCompilablePkgsWithGlob(l, env, gopathExcludes...) if err != nil { return nil, err } @@ -510,15 +349,10 @@ type Env struct { // // The default is to use UROOT_SOURCE env var. URootSource string - - // WorkingDirectory is the directory used for module-enabled `go list` - // lookups. The go.mod in this directory (or one of its parents) is - // used to resolve Go package paths. - WorkingDirectory string } func (e Env) String() string { - return fmt.Sprintf("GBB_PATH=%s UROOT_SOURCE=%s PWD=%s", strings.Join(e.GBBPath, ":"), e.URootSource, e.WorkingDirectory) + return fmt.Sprintf("GBB_PATH=%s UROOT_SOURCE=%s", strings.Join(e.GBBPath, ":"), e.URootSource) } // DefaultEnv is the default environment derived from environment variables and @@ -531,9 +365,8 @@ func DefaultEnv() Env { gbbPaths = strings.Split(gbbPath, ":") } return Env{ - GBBPath: gbbPaths, - URootSource: os.Getenv("UROOT_SOURCE"), - WorkingDirectory: "", + GBBPath: gbbPaths, + URootSource: os.Getenv("UROOT_SOURCE"), } } @@ -541,56 +374,61 @@ func DefaultEnv() Env { // include globs and returns a valid list of Go commands (either addressed by // Go package path or directory path). // -// It returns only directories that have Go files subject to +// It returns only packages that have Go files subject to // the build constraints in env and logs a "Skipping package {}" statement // about packages that are excluded due to build constraints. // -// ResolveGlobs always returns either an absolute file system path and -// normalized Go package paths. The return list may be mixed. +// ResolveGlobs always returns normalized Go package paths. +// +// ResolveGlobs should work in all cases that `go list` works. // // See NewPackages for allowed formats. func ResolveGlobs(logger ulog.Logger, genv *golang.Environ, env Env, patterns []string) ([]string, error) { if genv == nil { return nil, fmt.Errorf("Go build environment must be specified") } - var dirIncludes []string - var dirExcludes []string - var gopathIncludes []string - var gopathExcludes []string + var includes []string + var excludes []string for _, pattern := range patterns { isExclude := strings.HasPrefix(pattern, "-") if isExclude { pattern = pattern[1:] } - if hasFileMatch, directories := findDirectoryMatches(logger, env, pattern); len(directories) > 0 { - if !isExclude { - dirIncludes = append(dirIncludes, directories...) - } else { - dirExcludes = append(dirExcludes, directories...) - } - } else if !hasFileMatch { - if !isExclude { - gopathIncludes = append(gopathIncludes, pattern) - } else { - gopathExcludes = append(gopathExcludes, pattern) + + fields, err := shell.Fields(pattern, func(_ string) string { + return "" + }) + if err != nil { + fields = []string{pattern} + } + for _, field := range fields { + if match, directories := findDirectoryMatches(logger, env, field); len(directories) > 0 { + if !isExclude { + includes = append(includes, directories...) + } else { + excludes = append(excludes, directories...) + } + } else if !match { + if !isExclude { + includes = append(includes, field) + } else { + excludes = append(excludes, field) + } } } } - directories, err := filterDirectoryPaths(logger, genv, dirIncludes, dirExcludes) - if err != nil { - return nil, err - } - - gopaths, err := filterGoPaths(logger, genv, env.WorkingDirectory, gopathIncludes, gopathExcludes) + paths, err := filterGoPaths(logger, genv, includes, excludes) if err != nil { + if strings.Contains(err.Error(), "go.mod file not found") { + return nil, fmt.Errorf("%w: gobusybox has removed previous multi-module functionality in favor of Go workspaces -- read https://github.com/u-root/gobusybox#path-resolution--multi-module-builds for more", err) + } return nil, err } - result := append(directories, gopaths...) - if len(result) == 0 { + if len(paths) == 0 { return nil, errNoMatch } - sort.Strings(result) - return result, nil + sort.Strings(paths) + return paths, nil } diff --git a/vendor/github.com/u-root/gobusybox/src/pkg/golang/build.go b/vendor/github.com/u-root/gobusybox/src/pkg/golang/build.go index 35bed508e9..ae4ba47bb5 100644 --- a/vendor/github.com/u-root/gobusybox/src/pkg/golang/build.go +++ b/vendor/github.com/u-root/gobusybox/src/pkg/golang/build.go @@ -38,17 +38,22 @@ type Environ struct { GBBDEBUG bool } +// Copy makes a copy of Environ with the given changes. +func (c *Environ) Copy(opts ...Opt) *Environ { + e := &Environ{ + Context: c.Context, + GO111MODULE: c.GO111MODULE, + Mod: c.Mod, + GBBDEBUG: c.GBBDEBUG, + } + e.Apply(opts...) + return e +} + // RegisterFlags registers flags for Environ. func (c *Environ) RegisterFlags(f *flag.FlagSet) { - arg := (*uflag.Strings)(&c.BuildTags) - f.Var(arg, "go-build-tags", "Go build tags") - - mod := (*string)(&c.Mod) - defMod := "" - if c.GO111MODULE != "off" { - defMod = "readonly" - } - f.StringVar(mod, "go-mod", defMod, "Value of -mod to go commands (allowed: (empty), vendor, mod, readonly)") + f.Var((*uflag.Strings)(&c.BuildTags), "go-build-tags", "Go build tags") + f.StringVar((*string)(&c.Mod), "go-mod", string(c.Mod), "Value of -mod to go commands (allowed: (empty), vendor, mod, readonly)") } // Valid returns an error if GOARCH, GOROOT, or GOOS are unset. @@ -88,11 +93,31 @@ func DisableCGO() Opt { // WithGOARCH is an option that overrides GOARCH. func WithGOARCH(goarch string) Opt { + if goarch == "" { + return nil + } return func(c *Environ) { c.GOARCH = goarch } } +// WithGOOS is an option that overrides GOOS. +func WithGOOS(goos string) Opt { + if goos == "" { + return nil + } + return func(c *Environ) { + c.GOOS = goos + } +} + +// WithBuildTag is an option that appends build tags. +func WithBuildTag(tag ...string) Opt { + return func(c *Environ) { + c.BuildTags = append(c.BuildTags, tag...) + } +} + // WithGOPATH is an option that overrides GOPATH. func WithGOPATH(gopath string) Opt { return func(c *Environ) { @@ -114,6 +139,20 @@ func WithGO111MODULE(go111module string) Opt { } } +// WithMod is an option that overrides module behavior. +func WithMod(mod ModBehavior) Opt { + return func(c *Environ) { + c.Mod = mod + } +} + +// WithWorkingDir sets the working directory for calls to `go`. +func WithWorkingDir(wd string) Opt { + return func(c *Environ) { + c.Dir = wd + } +} + // Default is the default build environment comprised of the default GOPATH, // GOROOT, GOOS, GOARCH, and CGO_ENABLED values. func Default(opts ...Opt) *Environ { @@ -122,10 +161,6 @@ func Default(opts ...Opt) *Environ { GO111MODULE: os.Getenv("GO111MODULE"), GBBDEBUG: parseBool(os.Getenv("GBBDEBUG")), } - - if env.GO111MODULE != "off" { - env.Mod = ModReadonly - } env.Apply(opts...) return env } @@ -140,11 +175,11 @@ func (c *Environ) Apply(opts ...Opt) { } // Lookup looks up packages by patterns relative to dir, using the Go environment from c. -func (c *Environ) Lookup(mode packages.LoadMode, dir string, patterns ...string) ([]*packages.Package, error) { +func (c *Environ) Lookup(mode packages.LoadMode, patterns ...string) ([]*packages.Package, error) { cfg := &packages.Config{ Mode: mode, Env: append(os.Environ(), c.Env()...), - Dir: dir, + Dir: c.Dir, } if len(c.Context.BuildTags) > 0 { tags := fmt.Sprintf("-tags=%s", strings.Join(c.Context.BuildTags, ",")) @@ -164,6 +199,7 @@ func (c Environ) GoCmd(gocmd string, args ...string) *exec.Cmd { if c.GBBDEBUG { log.Printf("GBB Go invocation: %s %s %#v", c, goBin, args) } + cmd.Dir = c.Dir cmd.Env = append(os.Environ(), c.Env()...) return cmd } @@ -261,13 +297,10 @@ func (b *BuildOpts) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&b.NoStrip, "go-no-strip", false, "Do not strip symbols & Build ID from the binary (will not produce a reproducible binary)") f.BoolVar(&b.EnableInlining, "go-enable-inlining", false, "Enable inlining (will likely produce a larger binary)") f.BoolVar(&b.NoTrimPath, "go-no-trimpath", false, "Disable -trimpath (will not produce a reproducible binary)") - arg := (*uflag.Strings)(&b.ExtraArgs) - f.Var(arg, "go-extra-args", "Extra args to 'go build'") + f.Var((*uflag.Strings)(&b.ExtraArgs), "go-extra-args", "Extra args to 'go build'") } -// BuildDir compiles the package in the directory `dirPath`, writing the build -// object to `binaryPath`. -func (c Environ) BuildDir(dirPath string, binaryPath string, opts *BuildOpts) error { +func (c Environ) build(dirPath string, binaryPath string, pattern []string, opts *BuildOpts) error { args := []string{ // Force rebuilding of packages. "-a", @@ -302,14 +335,26 @@ func (c Environ) BuildDir(dirPath string, binaryPath string, opts *BuildOpts) er if len(c.BuildTags) > 0 { args = append(args, []string{"-tags", strings.Join(c.BuildTags, " ")}...) } - // We always set the working directory, so this is always '.'. - args = append(args, ".") + args = append(args, pattern...) cmd := c.GoCmd("build", args...) - cmd.Dir = dirPath + if dirPath != "" { + cmd.Dir = dirPath + } if o, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("error building go package in %q: %v, %v", dirPath, string(o), err) } return nil } + +// BuildDir compiles the package in the directory `dirPath`, writing the build +// object to `binaryPath`. +func (c Environ) BuildDir(dirPath string, binaryPath string, opts *BuildOpts) error { + return c.build(dirPath, binaryPath, []string{"."}, opts) +} + +// Build compiles the pattern. +func (c Environ) Build(binaryPath string, pattern []string, opts *BuildOpts) error { + return c.build("", binaryPath, pattern, opts) +} diff --git a/vendor/github.com/u-root/mkuimage/uimage/builder/binary.go b/vendor/github.com/u-root/mkuimage/uimage/builder/binary.go index 431b7f9902..9b1bd27fc1 100644 --- a/vendor/github.com/u-root/mkuimage/uimage/builder/binary.go +++ b/vendor/github.com/u-root/mkuimage/uimage/builder/binary.go @@ -9,33 +9,10 @@ import ( "path/filepath" "sync" - "github.com/u-root/gobusybox/src/pkg/golang" "github.com/u-root/mkuimage/uimage/initramfs" "github.com/u-root/uio/llog" - "golang.org/x/tools/go/packages" ) -func dirFor(env *golang.Environ, pkg string) (string, error) { - pkgs, err := env.Lookup(packages.NeedName|packages.NeedFiles, "", pkg) - if err != nil { - return "", fmt.Errorf("failed to look up package %q: %v", pkg, err) - } - - // One directory = one package in standard Go, so - // finding the first file's parent directory should - // find us the package directory. - var dir string - for _, p := range pkgs { - if len(p.GoFiles) > 0 { - dir = filepath.Dir(p.GoFiles[0]) - } - } - if dir == "" { - return "", fmt.Errorf("%w for %q", ErrNoGoFiles, pkg) - } - return dir, nil -} - // BinaryBuilder builds each Go command as a separate binary. // // BinaryBuilder is an implementation of Builder. @@ -68,14 +45,9 @@ func (b BinaryBuilder) Build(l *llog.Logger, af *initramfs.Files, opts Opts) err wg.Add(1) go func(p string) { defer wg.Done() - dir, err := dirFor(opts.Env, p) - if err != nil { - result <- err - return - } - result <- opts.Env.BuildDir( - dir, + result <- opts.Env.Build( filepath.Join(opts.TempDir, binaryDir, filepath.Base(p)), + []string{p}, opts.BuildOpts) }(pkg) } @@ -85,7 +57,7 @@ func (b BinaryBuilder) Build(l *llog.Logger, af *initramfs.Files, opts Opts) err for err := range result { if err != nil { - return err + return fmt.Errorf("%w: %w", ErrBinaryFailed, err) } } diff --git a/vendor/github.com/u-root/mkuimage/uimage/builder/builder.go b/vendor/github.com/u-root/mkuimage/uimage/builder/builder.go index 4ee070a0b9..e2bcc3252f 100644 --- a/vendor/github.com/u-root/mkuimage/uimage/builder/builder.go +++ b/vendor/github.com/u-root/mkuimage/uimage/builder/builder.go @@ -16,17 +16,18 @@ import ( var ( // Busybox is a shared GBBBuilder instance. - Busybox = GBBBuilder{} + Busybox = &GBBBuilder{} // Binary is a shared BinaryBuilder instance. - Binary = BinaryBuilder{} + Binary = &BinaryBuilder{} ) // Possible build errors. var ( + ErrBusyboxFailed = errors.New("gobusybox build failed") + ErrBinaryFailed = errors.New("binary build failed") ErrEnvMissing = errors.New("must specify Go build environment") ErrTempDirMissing = errors.New("must supply temporary directory for build") - ErrNoGoFiles = errors.New("could not find package directory") ) // Opts are options passed to the Builder.Build function. diff --git a/vendor/github.com/u-root/mkuimage/uimage/builder/gbb.go b/vendor/github.com/u-root/mkuimage/uimage/builder/gbb.go index a6d7e79901..9b05bf2b60 100644 --- a/vendor/github.com/u-root/mkuimage/uimage/builder/gbb.go +++ b/vendor/github.com/u-root/mkuimage/uimage/builder/gbb.go @@ -5,7 +5,6 @@ package builder import ( - "errors" "fmt" "log/slog" "path" @@ -48,7 +47,7 @@ func (GBBBuilder) DefaultBinaryDir() string { } // Build is an implementation of Builder.Build for a busybox-like initramfs. -func (b GBBBuilder) Build(l *llog.Logger, af *initramfs.Files, opts Opts) error { +func (b *GBBBuilder) Build(l *llog.Logger, af *initramfs.Files, opts Opts) error { // Build the busybox binary. if len(opts.TempDir) == 0 { return ErrTempDirMissing @@ -71,21 +70,7 @@ func (b GBBBuilder) Build(l *llog.Logger, af *initramfs.Files, opts Opts) error GoBuildOpts: opts.BuildOpts, } if err := bb.BuildBusybox(l.AtLevel(slog.LevelInfo), bopts); err != nil { - // Return some instructions for the user; this is printed last in the u-root tool. - // - // TODO: yeah, this isn't a good way to do error handling. The - // error should be the thing that's returned, I just wanted - // that to be printed first, and the instructions for what to - // do about it to be last. - var errGopath *bb.ErrGopathBuild - var errGomod *bb.ErrModuleBuild - if errors.As(err, &errGopath) { - return fmt.Errorf("preserving bb generated source directory at %s due to error. To reproduce build, `cd %s` and `GO111MODULE=off GOPATH=%s go build`: %w", opts.TempDir, errGopath.CmdDir, errGopath.GOPATH, err) - } - if errors.As(err, &errGomod) { - return fmt.Errorf("preserving bb generated source directory at %s due to error. To debug build, `cd %s` and use `go build` to build, or `go mod [why|tidy|graph]` to debug dependencies, or `go list -m all` to list all dependency versions:\n%w", opts.TempDir, errGomod.CmdDir, err) - } - return fmt.Errorf("preserving bb generated source directory at %s due to error:\n%w", opts.TempDir, err) + return fmt.Errorf("%w: %w", ErrBusyboxFailed, err) } if err := af.AddFile(bbPath, "bbin/bb"); err != nil { diff --git a/vendor/github.com/u-root/mkuimage/uimage/mkuimage/cmd.go b/vendor/github.com/u-root/mkuimage/uimage/mkuimage/cmd.go new file mode 100644 index 0000000000..ecfb99b25d --- /dev/null +++ b/vendor/github.com/u-root/mkuimage/uimage/mkuimage/cmd.go @@ -0,0 +1,117 @@ +// Copyright 2024 the u-root Authors. All rights reserved +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mkuimage + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" + + "github.com/u-root/mkuimage/uimage" + "github.com/u-root/mkuimage/uimage/builder" + "github.com/u-root/mkuimage/uimage/templates" + "github.com/u-root/uio/llog" +) + +var recommendedVersions = []string{ + "go1.20", + "go1.21", + "go1.22", +} + +func isRecommendedVersion(v string) bool { + for _, r := range recommendedVersions { + if strings.HasPrefix(v, r) { + return true + } + } + return false +} + +func uimageOpts(l *llog.Logger, m []uimage.Modifier, tpl *templates.Templates, f *Flags, conf string, cmds []string) (*uimage.Opts, error) { + // Evaluate template first -- template settings may always be + // appended/overridden by further flag-based settings. + if conf != "" { + mods, err := tpl.Uimage(conf) + if err != nil { + return nil, err + } + l.Debugf("Config: %#v", tpl.Configs[conf]) + m = append(m, mods...) + } + + // Expand command templates. + if tpl != nil { + cmds = tpl.CommandsFor(cmds...) + } + + more, err := f.Modifiers(cmds...) + if err != nil { + return nil, err + } + return uimage.OptionsFor(append(m, more...)...) +} + +// CreateUimage creates a uimage with the given base modifiers and flags, using args as the list of commands. +func CreateUimage(l *llog.Logger, base []uimage.Modifier, tf *TemplateFlags, f *Flags, args []string) error { + tpl, err := tf.Get() + if err != nil { + return fmt.Errorf("failed to get template: %w", err) + } + + keepTempDir := f.KeepTempDir + if f.TempDir == nil { + tempDir, err := os.MkdirTemp("", "u-root") + if err != nil { + return err + } + f.TempDir = &tempDir + defer func() { + if keepTempDir { + l.Infof("Keeping temp dir %s", tempDir) + } else { + os.RemoveAll(tempDir) + } + }() + } else if _, err := os.Stat(*f.TempDir); os.IsNotExist(err) { + if err := os.MkdirAll(*f.TempDir, 0o755); err != nil { + return fmt.Errorf("temporary directory %q did not exist; tried to mkdir but failed: %v", *f.TempDir, err) + } + } + + opts, err := uimageOpts(l, base, tpl, f, tf.Config, args) + if err != nil { + return err + } + + env := opts.Env + + l.Infof("Build environment: %s", env) + if env.GOOS != "linux" { + l.Warnf("GOOS is not linux. Did you mean to set GOOS=linux?") + } + + v, err := env.Version() + if err != nil { + l.Infof("Could not get environment's Go version, using runtime's version: %v", err) + v = runtime.Version() + } + if !isRecommendedVersion(v) { + l.Warnf(`You are not using one of the recommended Go versions (have = %s, recommended = %v). + Some packages may not compile. + Go to https://golang.org/doc/install to find out how to install a newer version of Go, + or use https://godoc.org/golang.org/dl/%s to install an additional version of Go.`, + v, recommendedVersions, recommendedVersions[0]) + } + + err = opts.Create(l) + if errors.Is(err, builder.ErrBusyboxFailed) { + l.Errorf("Preserving temp dir due to busybox build error") + keepTempDir = true + } + return err +} diff --git a/vendor/github.com/u-root/mkuimage/uimage/mkuimage/uflags.go b/vendor/github.com/u-root/mkuimage/uimage/mkuimage/uflags.go new file mode 100644 index 0000000000..100003dc3f --- /dev/null +++ b/vendor/github.com/u-root/mkuimage/uimage/mkuimage/uflags.go @@ -0,0 +1,204 @@ +// Copyright 2024 the u-root Authors. All rights reserved +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mkuimage defines mkuimage flags and creation function. +package mkuimage + +import ( + "flag" + "fmt" + "os" + + "github.com/u-root/gobusybox/src/pkg/golang" + "github.com/u-root/gobusybox/src/pkg/uflag" + "github.com/u-root/mkuimage/uimage" + "github.com/u-root/mkuimage/uimage/builder" + "github.com/u-root/mkuimage/uimage/templates" +) + +type optionalStringVar struct { + s **string +} + +// Set implements flag.Value.Set. +func (o optionalStringVar) Set(s string) error { + *o.s = &s + return nil +} + +func (o *optionalStringVar) String() string { + if o == nil || o.s == nil || *(o.s) == nil { + return "" + } + return **(o.s) +} + +// CommandFlags are flags related to Go commands to be built by mkuimage. +type CommandFlags struct { + NoCommands bool + Builder string + ShellBang bool + Mod golang.ModBehavior + BuildTags []string + BuildOpts *golang.BuildOpts +} + +// RegisterFlags registers flags related to Go commands being built. +func (c *CommandFlags) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&c.Builder, "build", c.Builder, "uimage command build format (e.g. bb/gbb or binary).") + f.BoolVar(&c.NoCommands, "nocmd", c.NoCommands, "Build no Go commands; initramfs only") + f.BoolVar(&c.ShellBang, "shellbang", c.ShellBang, "Use #! instead of symlinks for busybox") + if c.BuildOpts == nil { + c.BuildOpts = &golang.BuildOpts{} + } + c.BuildOpts.RegisterFlags(f) + // Register an alias for -go-no-strip for backwards compatibility. + f.BoolVar(&c.BuildOpts.NoStrip, "no-strip", false, "Build unstripped binaries") + + // Flags for golang.Environ. + f.StringVar((*string)(&c.Mod), "go-mod", string(c.Mod), "Value of -mod to go commands (allowed: (empty), vendor, mod, readonly)") + // Register an alias for -go-build-tags for backwards compatibility. + f.Var((*uflag.Strings)(&c.BuildTags), "tags", "Go build tags -- repeat the flag for multiple values") + f.Var((*uflag.Strings)(&c.BuildTags), "go-build-tags", "Go build tags -- repeat the flag for multiple values") +} + +// Modifiers turns the flag values into uimage modifiers. +func (c *CommandFlags) Modifiers(packages ...string) ([]uimage.Modifier, error) { + if c.NoCommands { + // Later modifiers may still add packages, so let's set the right environment. + return []uimage.Modifier{ + uimage.WithEnv(golang.WithBuildTag(c.BuildTags...), func(e *golang.Environ) { + e.Mod = c.Mod + }), + }, nil + } + + switch c.Builder { + case "bb", "gbb": + return []uimage.Modifier{ + uimage.WithEnv(golang.WithBuildTag(c.BuildTags...), func(e *golang.Environ) { + e.Mod = c.Mod + }), + uimage.WithBusyboxCommands(packages...), + uimage.WithShellBang(c.ShellBang), + uimage.WithBusyboxBuildOpts(c.BuildOpts), + }, nil + case "binary": + return []uimage.Modifier{ + uimage.WithEnv(golang.WithBuildTag(c.BuildTags...), func(e *golang.Environ) { + e.Mod = c.Mod + }), + uimage.WithCommands(c.BuildOpts, builder.Binary, packages...), + }, nil + default: + return nil, fmt.Errorf("%w: could not find binary builder format %q", os.ErrInvalid, c.Builder) + } +} + +// String can be used to fill in values for Init, Uinit, and Shell. +func String(s string) *string { + return &s +} + +// Flags are mkuimage command-line flags. +type Flags struct { + TempDir *string + KeepTempDir bool + + Init *string + Uinit *string + Shell *string + + Files []string + + BaseArchive string + ArchiveFormat string + OutputFile string + UseExistingInit bool + + Commands CommandFlags +} + +// Modifiers return uimage modifiers created from the flags. +func (f *Flags) Modifiers(packages ...string) ([]uimage.Modifier, error) { + m := []uimage.Modifier{ + uimage.WithFiles(f.Files...), + uimage.WithExistingInit(f.UseExistingInit), + } + if f.TempDir != nil { + m = append(m, uimage.WithTempDir(*f.TempDir)) + } + if f.BaseArchive != "" { + // ArchiveFormat does not determine this, as only CPIO is supported. + m = append(m, uimage.WithBaseFile(f.BaseArchive)) + } + if f.Init != nil { + m = append(m, uimage.WithInit(*f.Init)) + } + if f.Uinit != nil { + m = append(m, uimage.WithUinitCommand(*f.Uinit)) + } + if f.Shell != nil { + m = append(m, uimage.WithShell(*f.Shell)) + } + switch f.ArchiveFormat { + case "cpio": + m = append(m, uimage.WithCPIOOutput(f.OutputFile)) + case "dir": + m = append(m, uimage.WithOutputDir(f.OutputFile)) + default: + return nil, fmt.Errorf("%w: could not find output format %q", os.ErrInvalid, f.ArchiveFormat) + } + more, err := f.Commands.Modifiers(packages...) + if err != nil { + return nil, err + } + return append(m, more...), nil +} + +// RegisterFlags registers flags. +func (f *Flags) RegisterFlags(fs *flag.FlagSet) { + fs.Var(&optionalStringVar{&f.TempDir}, "tmp-dir", "Temporary directory to build binary and archive in. Deleted after build if --keep-tmp-dir is not set.") + fs.BoolVar(&f.KeepTempDir, "keep-tmp-dir", f.KeepTempDir, "Keep temporary directory after build") + + fs.Var(&optionalStringVar{&f.Init}, "initcmd", "Symlink target for /init. Can be an absolute path or a Go command name. Use initcmd=\"\" if you don't want the symlink.") + fs.Var(&optionalStringVar{&f.Uinit}, "uinitcmd", "Symlink target and arguments for /bin/uinit. Can be an absolute path or a Go command name, followed by command-line args. Use uinitcmd=\"\" if you don't want the symlink. E.g. -uinitcmd=\"echo foobar\"") + fs.Var(&optionalStringVar{&f.Shell}, "defaultsh", "Default shell. Can be an absolute path or a Go command name. Use defaultsh=\"\" if you don't want the symlink.") + + fs.Var((*uflag.Strings)(&f.Files), "files", "Additional files, directories, and binaries (with their ldd dependencies) to add to archive. Can be specified multiple times.") + + fs.StringVar(&f.BaseArchive, "base", f.BaseArchive, "Base archive to add files to. By default, this is a couple of directories like /bin, /etc, etc. Has a default internally supplied set of files; use base=/dev/null if you don't want any base files.") + fs.StringVar(&f.ArchiveFormat, "format", f.ArchiveFormat, "Archival input (for -base) and output (for -o) format.") + fs.StringVar(&f.OutputFile, "o", f.OutputFile, "Path to output initramfs file.") + fs.BoolVar(&f.UseExistingInit, "useinit", f.UseExistingInit, "Use existing init from base archive (only if --base was specified).") + fs.BoolVar(&f.UseExistingInit, "use-init", f.UseExistingInit, "Use existing init from base archive (only if --base was specified).") + + f.Commands.RegisterFlags(fs) +} + +// TemplateFlags are flags for uimage config templates. +type TemplateFlags struct { + File string + Config string +} + +// RegisterFlags registers template flags. +func (tc *TemplateFlags) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&tc.Config, "config", "", "Config to pick from templates") + f.StringVar(&tc.File, "config-file", "", "Config file to read from (default: finds .mkuimage.yaml in cwd or parents)") +} + +// Get turns template flags into templates. +func (tc *TemplateFlags) Get() (*templates.Templates, error) { + if tc.File != "" { + return templates.TemplateFromFile(tc.File) + } + + tpl, err := templates.Template() + // Only complain about not finding a template if user requested a templated config. + if err != nil && tc.Config != "" { + return nil, err + } + return tpl, nil +} diff --git a/vendor/github.com/u-root/mkuimage/uimage/templates/templates.go b/vendor/github.com/u-root/mkuimage/uimage/templates/templates.go new file mode 100644 index 0000000000..f0fce96108 --- /dev/null +++ b/vendor/github.com/u-root/mkuimage/uimage/templates/templates.go @@ -0,0 +1,152 @@ +// Copyright 2024 the u-root Authors. All rights reserved +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package templates defines a uimage template configuration file parser. +package templates + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/u-root/gobusybox/src/pkg/golang" + "github.com/u-root/mkuimage/uimage" + "gopkg.in/yaml.v3" +) + +// ErrTemplateNotExist is returned when the given config name did not exist. +var ErrTemplateNotExist = errors.New("config template does not exist") + +func findConfigFile(name string) (string, error) { + dir, err := os.Getwd() + if err != nil { + return "", err + } + for dir != "/" { + p := filepath.Join(dir, name) + if _, err := os.Stat(p); err == nil { + return p, nil + } + dir = filepath.Dir(dir) + } + return "", fmt.Errorf("%w: could not find %s in current directory or any parent", os.ErrNotExist, name) +} + +// Command represents commands to build. +type Command struct { + // Builder is bb, gbb, or binary. + // + // Defaults to bb if not given. + Builder string + + // Commands are commands or template names. + Commands []string +} + +// Config is a mkuimage build configuration. +type Config struct { + GOOS string + GOARCH string + BuildTags []string `yaml:"build_tags"` + Commands []Command + Files []string + Init *string + Uinit *string + Shell *string +} + +// Templates are a set of mkuimage build configs and command templates. +type Templates struct { + Configs map[string]Config + + // Commands defines a set of command template name -> commands to expand. + Commands map[string][]string +} + +// Uimage returns the uimage modifiers for the given templated config name. +func (t *Templates) Uimage(config string) ([]uimage.Modifier, error) { + if config == "" { + return nil, nil + } + if t == nil { + return nil, fmt.Errorf("%w: no templates parsed", ErrTemplateNotExist) + } + c, ok := t.Configs[config] + if !ok { + return nil, fmt.Errorf("%w: %q", ErrTemplateNotExist, config) + } + m := []uimage.Modifier{ + uimage.WithFiles(c.Files...), + uimage.WithEnv( + golang.WithGOOS(c.GOOS), + golang.WithGOARCH(c.GOARCH), + golang.WithBuildTag(c.BuildTags...), + ), + } + if c.Init != nil { + m = append(m, uimage.WithInit(*c.Init)) + } + if c.Uinit != nil { + m = append(m, uimage.WithUinitCommand(*c.Uinit)) + } + if c.Shell != nil { + m = append(m, uimage.WithShell(*c.Shell)) + } + for _, cmds := range c.Commands { + switch cmds.Builder { + case "binary": + m = append(m, uimage.WithBinaryCommands(t.CommandsFor(cmds.Commands...)...)) + case "bb", "gbb": + fallthrough + default: + m = append(m, uimage.WithBusyboxCommands(t.CommandsFor(cmds.Commands...)...)) + } + } + return m, nil +} + +// CommandsFor expands commands according to command templates. +func (t *Templates) CommandsFor(names ...string) []string { + if t == nil { + return names + } + var c []string + for _, name := range names { + cmds, ok := t.Commands[name] + if ok { + c = append(c, cmds...) + } else { + c = append(c, name) + } + } + return c +} + +// TemplateFrom parses a template from bytes. +func TemplateFrom(b []byte) (*Templates, error) { + var tpl Templates + if err := yaml.Unmarshal(b, &tpl); err != nil { + return nil, err + } + return &tpl, nil +} + +// Template parses the first file named .mkuimage.yaml in the current directory or any of its parents. +func Template() (*Templates, error) { + p, err := findConfigFile(".mkuimage.yaml") + if err != nil { + return nil, fmt.Errorf("%w: no templates found", os.ErrNotExist) + } + return TemplateFromFile(p) +} + +// TemplateFromFile parses a template from the given file. +func TemplateFromFile(p string) (*Templates, error) { + b, err := os.ReadFile(p) + if err != nil { + return nil, err + } + return TemplateFrom(b) +} diff --git a/vendor/github.com/u-root/mkuimage/uimage/uimage.go b/vendor/github.com/u-root/mkuimage/uimage/uimage.go index edb2b2684e..bd5bc5cfcb 100644 --- a/vendor/github.com/u-root/mkuimage/uimage/uimage.go +++ b/vendor/github.com/u-root/mkuimage/uimage/uimage.go @@ -286,6 +286,14 @@ func WithSkipLDD() Modifier { } } +// WithReplaceEnv replaces the Go build environment. +func WithReplaceEnv(env *golang.Environ) Modifier { + return func(o *Opts) error { + o.Env = env + return nil + } +} + // WithEnv alters the Go build environment (e.g. build tags, GOARCH, GOOS env vars). func WithEnv(gopts ...golang.Opt) Modifier { return func(o *Opts) error { @@ -370,6 +378,55 @@ func WithBusyboxCommands(cmd ...string) Modifier { } } +// WithShellBang directs the busybox builder to use #! instead of symlinks. +func WithShellBang(b bool) Modifier { + return func(o *Opts) error { + for i, cmd := range o.Commands { + if _, ok := cmd.Builder.(*builder.GBBBuilder); ok { + // Make a copy, because the same object may + // have been used in other builds. + o.Commands[i].Builder = &builder.GBBBuilder{ + ShellBang: b, + } + return nil + } + } + + // Otherwise, add an empty builder with no packages. + // AddBusyboxCommands/WithBusyboxCommands will append to this. + // + // Yeah, it's a hack, sue me. + o.Commands = append(o.Commands, Commands{ + Builder: &builder.GBBBuilder{ShellBang: b}, + }) + return nil + } +} + +// WithBusyboxBuildOpts directs the busybox builder to use the given build opts. +// +// Overrides any previously defined build options. +func WithBusyboxBuildOpts(g *golang.BuildOpts) Modifier { + return func(o *Opts) error { + for i, cmd := range o.Commands { + if _, ok := cmd.Builder.(*builder.GBBBuilder); ok { + o.Commands[i].BuildOpts = g + return nil + } + } + + // Otherwise, add an empty builder with no packages. + // AddBusyboxCommands/WithBusyboxCommands will append to this. + // + // Yeah, it's a hack, sue me. + o.Commands = append(o.Commands, Commands{ + Builder: &builder.GBBBuilder{}, + BuildOpts: g, + }) + return nil + } +} + // WithBinaryCommands adds Go commands to compile as individual binaries and // add to the archive. // @@ -402,6 +459,16 @@ func WithOutput(w initramfs.WriteOpener) Modifier { } } +// WithExistingInit sets whether an existing init from BaseArchive should remain the init. +// +// If not, it will be renamed inito. +func WithExistingInit(use bool) Modifier { + return func(o *Opts) error { + o.UseExistingInit = use + return nil + } +} + // WithCPIOOutput sets the archive output file to be a CPIO created at the given path. func WithCPIOOutput(path string) Modifier { if path == "" { @@ -410,6 +477,11 @@ func WithCPIOOutput(path string) Modifier { return WithOutput(&initramfs.CPIOFile{Path: path}) } +// WithOutputDir sets the archive output to be in the given directory. +func WithOutputDir(path string) Modifier { + return WithOutput(&initramfs.Dir{Path: path}) +} + // WithBase is an existing initramfs to include in the resulting initramfs. func WithBase(base initramfs.ReadOpener) Modifier { return func(o *Opts) error { @@ -446,16 +518,17 @@ func WithBaseArchive(archive *cpio.Archive) Modifier { // If this is empty, no uinit symlink will be created, but a user may // still specify a command called uinit or include a /bin/uinit file. func WithUinitCommand(cmd string) Modifier { - if cmd == "" { - return nil - } return func(opts *Opts) error { args := shlex.Split(cmd) if len(args) > 0 { opts.UinitCmd = args[0] + } else { + opts.UinitCmd = "" } if len(args) > 1 { opts.UinitArgs = args[1:] + } else { + opts.UinitArgs = nil } return nil } @@ -502,9 +575,6 @@ func WithShell(arg0 string) Modifier { // WithTempDir sets a temporary directory to use for building commands. func WithTempDir(dir string) Modifier { - if dir == "" { - return nil - } return func(o *Opts) error { o.TempDir = dir return nil @@ -542,6 +612,9 @@ func CreateInitramfs(l *llog.Logger, opts Opts) error { // Expand commands. for index, cmds := range opts.Commands { + if len(cmds.Packages) == 0 { + continue + } paths, err := findpkg.ResolveGlobs(l.AtLevel(slog.LevelInfo), env, lookupEnv, cmds.Packages) if err != nil { return fmt.Errorf("%w: %w", errResolvePackage, err) @@ -551,6 +624,9 @@ func CreateInitramfs(l *llog.Logger, opts Opts) error { // Add each build mode's commands to the archive. for _, cmds := range opts.Commands { + if len(cmds.Packages) == 0 { + continue + } builderTmpDir, err := os.MkdirTemp(opts.TempDir, "builder") if err != nil { return err @@ -569,7 +645,7 @@ func CreateInitramfs(l *llog.Logger, opts Opts) error { BinaryDir: cmds.TargetDir(), } if err := cmds.Builder.Build(l, files, bOpts); err != nil { - return fmt.Errorf("error building: %v", err) + return fmt.Errorf("error building: %w", err) } } @@ -771,7 +847,7 @@ func (o *Opts) AddCommands(c ...Commands) { // AddBusyboxCommands adds Go commands to the busybox build. func (o *Opts) AddBusyboxCommands(pkgs ...string) { for i, cmds := range o.Commands { - if cmds.Builder == builder.Busybox { + if _, ok := cmds.Builder.(*builder.GBBBuilder); ok { o.Commands[i].Packages = append(o.Commands[i].Packages, pkgs...) return } diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go new file mode 100644 index 0000000000..ecc0dabb74 --- /dev/null +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package maps defines various functions useful with maps of any type. +package maps + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} + +// Equal reports whether two maps contain the same key/value pairs. +// Values are compared using ==. +func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// EqualFunc is like Equal, but compares values using eq. +// Keys are still compared with ==. +func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || !eq(v1, v2) { + return false + } + } + return true +} + +// Clear removes all entries from m, leaving it empty. +func Clear[M ~map[K]V, K comparable, V any](m M) { + for k := range m { + delete(m, k) + } +} + +// Clone returns a copy of m. This is a shallow clone: +// the new keys and values are set using ordinary assignment. +func Clone[M ~map[K]V, K comparable, V any](m M) M { + // Preserve nil in case it matters. + if m == nil { + return nil + } + r := make(M, len(m)) + for k, v := range m { + r[k] = v + } + return r +} + +// Copy copies all key/value pairs in src adding them to dst. +// When a key in src is already present in dst, +// the value in dst will be overwritten by the value associated +// with the key in src. +func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { + for k, v := range src { + dst[k] = v + } +} + +// DeleteFunc deletes any key/value pairs from m for which del returns true. +func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { + for k, v := range m { + if del(k, v) { + delete(m, k) + } + } +} diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go deleted file mode 100644 index 2a0123d4b9..0000000000 --- a/vendor/golang.org/x/mod/modfile/print.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Module file printer. - -package modfile - -import ( - "bytes" - "fmt" - "strings" -) - -// Format returns a go.mod file as a byte slice, formatted in standard style. -func Format(f *FileSyntax) []byte { - pr := &printer{} - pr.file(f) - - // remove trailing blank lines - b := pr.Bytes() - for len(b) > 0 && b[len(b)-1] == '\n' && (len(b) == 1 || b[len(b)-2] == '\n') { - b = b[:len(b)-1] - } - return b -} - -// A printer collects the state during printing of a file or expression. -type printer struct { - bytes.Buffer // output buffer - comment []Comment // pending end-of-line comments - margin int // left margin (indent), a number of tabs -} - -// printf prints to the buffer. -func (p *printer) printf(format string, args ...interface{}) { - fmt.Fprintf(p, format, args...) -} - -// indent returns the position on the current line, in bytes, 0-indexed. -func (p *printer) indent() int { - b := p.Bytes() - n := 0 - for n < len(b) && b[len(b)-1-n] != '\n' { - n++ - } - return n -} - -// newline ends the current line, flushing end-of-line comments. -func (p *printer) newline() { - if len(p.comment) > 0 { - p.printf(" ") - for i, com := range p.comment { - if i > 0 { - p.trim() - p.printf("\n") - for i := 0; i < p.margin; i++ { - p.printf("\t") - } - } - p.printf("%s", strings.TrimSpace(com.Token)) - } - p.comment = p.comment[:0] - } - - p.trim() - if b := p.Bytes(); len(b) == 0 || (len(b) >= 2 && b[len(b)-1] == '\n' && b[len(b)-2] == '\n') { - // skip the blank line at top of file or after a blank line - } else { - p.printf("\n") - } - for i := 0; i < p.margin; i++ { - p.printf("\t") - } -} - -// trim removes trailing spaces and tabs from the current line. -func (p *printer) trim() { - // Remove trailing spaces and tabs from line we're about to end. - b := p.Bytes() - n := len(b) - for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') { - n-- - } - p.Truncate(n) -} - -// file formats the given file into the print buffer. -func (p *printer) file(f *FileSyntax) { - for _, com := range f.Before { - p.printf("%s", strings.TrimSpace(com.Token)) - p.newline() - } - - for i, stmt := range f.Stmt { - switch x := stmt.(type) { - case *CommentBlock: - // comments already handled - p.expr(x) - - default: - p.expr(x) - p.newline() - } - - for _, com := range stmt.Comment().After { - p.printf("%s", strings.TrimSpace(com.Token)) - p.newline() - } - - if i+1 < len(f.Stmt) { - p.newline() - } - } -} - -func (p *printer) expr(x Expr) { - // Emit line-comments preceding this expression. - if before := x.Comment().Before; len(before) > 0 { - // Want to print a line comment. - // Line comments must be at the current margin. - p.trim() - if p.indent() > 0 { - // There's other text on the line. Start a new line. - p.printf("\n") - } - // Re-indent to margin. - for i := 0; i < p.margin; i++ { - p.printf("\t") - } - for _, com := range before { - p.printf("%s", strings.TrimSpace(com.Token)) - p.newline() - } - } - - switch x := x.(type) { - default: - panic(fmt.Errorf("printer: unexpected type %T", x)) - - case *CommentBlock: - // done - - case *LParen: - p.printf("(") - case *RParen: - p.printf(")") - - case *Line: - p.tokens(x.Token) - - case *LineBlock: - p.tokens(x.Token) - p.printf(" ") - p.expr(&x.LParen) - p.margin++ - for _, l := range x.Line { - p.newline() - p.expr(l) - } - p.margin-- - p.newline() - p.expr(&x.RParen) - } - - // Queue end-of-line comments for printing when we - // reach the end of the line. - p.comment = append(p.comment, x.Comment().Suffix...) -} - -func (p *printer) tokens(tokens []string) { - sep := "" - for _, t := range tokens { - if t == "," || t == ")" || t == "]" || t == "}" { - sep = "" - } - p.printf("%s%s", sep, t) - sep = " " - if t == "(" || t == "[" || t == "{" { - sep = "" - } - } -} diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go deleted file mode 100644 index 5b5bb5e115..0000000000 --- a/vendor/golang.org/x/mod/modfile/read.go +++ /dev/null @@ -1,958 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package modfile - -import ( - "bytes" - "errors" - "fmt" - "os" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// A Position describes an arbitrary source position in a file, including the -// file, line, column, and byte offset. -type Position struct { - Line int // line in input (starting at 1) - LineRune int // rune in line (starting at 1) - Byte int // byte in input (starting at 0) -} - -// add returns the position at the end of s, assuming it starts at p. -func (p Position) add(s string) Position { - p.Byte += len(s) - if n := strings.Count(s, "\n"); n > 0 { - p.Line += n - s = s[strings.LastIndex(s, "\n")+1:] - p.LineRune = 1 - } - p.LineRune += utf8.RuneCountInString(s) - return p -} - -// An Expr represents an input element. -type Expr interface { - // Span returns the start and end position of the expression, - // excluding leading or trailing comments. - Span() (start, end Position) - - // Comment returns the comments attached to the expression. - // This method would normally be named 'Comments' but that - // would interfere with embedding a type of the same name. - Comment() *Comments -} - -// A Comment represents a single // comment. -type Comment struct { - Start Position - Token string // without trailing newline - Suffix bool // an end of line (not whole line) comment -} - -// Comments collects the comments associated with an expression. -type Comments struct { - Before []Comment // whole-line comments before this expression - Suffix []Comment // end-of-line comments after this expression - - // For top-level expressions only, After lists whole-line - // comments following the expression. - After []Comment -} - -// Comment returns the receiver. This isn't useful by itself, but -// a [Comments] struct is embedded into all the expression -// implementation types, and this gives each of those a Comment -// method to satisfy the Expr interface. -func (c *Comments) Comment() *Comments { - return c -} - -// A FileSyntax represents an entire go.mod file. -type FileSyntax struct { - Name string // file path - Comments - Stmt []Expr -} - -func (x *FileSyntax) Span() (start, end Position) { - if len(x.Stmt) == 0 { - return - } - start, _ = x.Stmt[0].Span() - _, end = x.Stmt[len(x.Stmt)-1].Span() - return start, end -} - -// addLine adds a line containing the given tokens to the file. -// -// If the first token of the hint matches the first token of the -// line, the new line is added at the end of the block containing hint, -// extracting hint into a new block if it is not yet in one. -// -// If the hint is non-nil buts its first token does not match, -// the new line is added after the block containing hint -// (or hint itself, if not in a block). -// -// If no hint is provided, addLine appends the line to the end of -// the last block with a matching first token, -// or to the end of the file if no such block exists. -func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { - if hint == nil { - // If no hint given, add to the last statement of the given type. - Loop: - for i := len(x.Stmt) - 1; i >= 0; i-- { - stmt := x.Stmt[i] - switch stmt := stmt.(type) { - case *Line: - if stmt.Token != nil && stmt.Token[0] == tokens[0] { - hint = stmt - break Loop - } - case *LineBlock: - if stmt.Token[0] == tokens[0] { - hint = stmt - break Loop - } - } - } - } - - newLineAfter := func(i int) *Line { - new := &Line{Token: tokens} - if i == len(x.Stmt) { - x.Stmt = append(x.Stmt, new) - } else { - x.Stmt = append(x.Stmt, nil) - copy(x.Stmt[i+2:], x.Stmt[i+1:]) - x.Stmt[i+1] = new - } - return new - } - - if hint != nil { - for i, stmt := range x.Stmt { - switch stmt := stmt.(type) { - case *Line: - if stmt == hint { - if stmt.Token == nil || stmt.Token[0] != tokens[0] { - return newLineAfter(i) - } - - // Convert line to line block. - stmt.InBlock = true - block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} - stmt.Token = stmt.Token[1:] - x.Stmt[i] = block - new := &Line{Token: tokens[1:], InBlock: true} - block.Line = append(block.Line, new) - return new - } - - case *LineBlock: - if stmt == hint { - if stmt.Token[0] != tokens[0] { - return newLineAfter(i) - } - - new := &Line{Token: tokens[1:], InBlock: true} - stmt.Line = append(stmt.Line, new) - return new - } - - for j, line := range stmt.Line { - if line == hint { - if stmt.Token[0] != tokens[0] { - return newLineAfter(i) - } - - // Add new line after hint within the block. - stmt.Line = append(stmt.Line, nil) - copy(stmt.Line[j+2:], stmt.Line[j+1:]) - new := &Line{Token: tokens[1:], InBlock: true} - stmt.Line[j+1] = new - return new - } - } - } - } - } - - new := &Line{Token: tokens} - x.Stmt = append(x.Stmt, new) - return new -} - -func (x *FileSyntax) updateLine(line *Line, tokens ...string) { - if line.InBlock { - tokens = tokens[1:] - } - line.Token = tokens -} - -// markRemoved modifies line so that it (and its end-of-line comment, if any) -// will be dropped by (*FileSyntax).Cleanup. -func (line *Line) markRemoved() { - line.Token = nil - line.Comments.Suffix = nil -} - -// Cleanup cleans up the file syntax x after any edit operations. -// To avoid quadratic behavior, (*Line).markRemoved marks the line as dead -// by setting line.Token = nil but does not remove it from the slice -// in which it appears. After edits have all been indicated, -// calling Cleanup cleans out the dead lines. -func (x *FileSyntax) Cleanup() { - w := 0 - for _, stmt := range x.Stmt { - switch stmt := stmt.(type) { - case *Line: - if stmt.Token == nil { - continue - } - case *LineBlock: - ww := 0 - for _, line := range stmt.Line { - if line.Token != nil { - stmt.Line[ww] = line - ww++ - } - } - if ww == 0 { - continue - } - if ww == 1 { - // Collapse block into single line. - line := &Line{ - Comments: Comments{ - Before: commentsAdd(stmt.Before, stmt.Line[0].Before), - Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), - After: commentsAdd(stmt.Line[0].After, stmt.After), - }, - Token: stringsAdd(stmt.Token, stmt.Line[0].Token), - } - x.Stmt[w] = line - w++ - continue - } - stmt.Line = stmt.Line[:ww] - } - x.Stmt[w] = stmt - w++ - } - x.Stmt = x.Stmt[:w] -} - -func commentsAdd(x, y []Comment) []Comment { - return append(x[:len(x):len(x)], y...) -} - -func stringsAdd(x, y []string) []string { - return append(x[:len(x):len(x)], y...) -} - -// A CommentBlock represents a top-level block of comments separate -// from any rule. -type CommentBlock struct { - Comments - Start Position -} - -func (x *CommentBlock) Span() (start, end Position) { - return x.Start, x.Start -} - -// A Line is a single line of tokens. -type Line struct { - Comments - Start Position - Token []string - InBlock bool - End Position -} - -func (x *Line) Span() (start, end Position) { - return x.Start, x.End -} - -// A LineBlock is a factored block of lines, like -// -// require ( -// "x" -// "y" -// ) -type LineBlock struct { - Comments - Start Position - LParen LParen - Token []string - Line []*Line - RParen RParen -} - -func (x *LineBlock) Span() (start, end Position) { - return x.Start, x.RParen.Pos.add(")") -} - -// An LParen represents the beginning of a parenthesized line block. -// It is a place to store suffix comments. -type LParen struct { - Comments - Pos Position -} - -func (x *LParen) Span() (start, end Position) { - return x.Pos, x.Pos.add(")") -} - -// An RParen represents the end of a parenthesized line block. -// It is a place to store whole-line (before) comments. -type RParen struct { - Comments - Pos Position -} - -func (x *RParen) Span() (start, end Position) { - return x.Pos, x.Pos.add(")") -} - -// An input represents a single input file being parsed. -type input struct { - // Lexing state. - filename string // name of input file, for errors - complete []byte // entire input - remaining []byte // remaining input - tokenStart []byte // token being scanned to end of input - token token // next token to be returned by lex, peek - pos Position // current input position - comments []Comment // accumulated comments - - // Parser state. - file *FileSyntax // returned top-level syntax tree - parseErrors ErrorList // errors encountered during parsing - - // Comment assignment state. - pre []Expr // all expressions, in preorder traversal - post []Expr // all expressions, in postorder traversal -} - -func newInput(filename string, data []byte) *input { - return &input{ - filename: filename, - complete: data, - remaining: data, - pos: Position{Line: 1, LineRune: 1, Byte: 0}, - } -} - -// parse parses the input file. -func parse(file string, data []byte) (f *FileSyntax, err error) { - // The parser panics for both routine errors like syntax errors - // and for programmer bugs like array index errors. - // Turn both into error returns. Catching bug panics is - // especially important when processing many files. - in := newInput(file, data) - defer func() { - if e := recover(); e != nil && e != &in.parseErrors { - in.parseErrors = append(in.parseErrors, Error{ - Filename: in.filename, - Pos: in.pos, - Err: fmt.Errorf("internal error: %v", e), - }) - } - if err == nil && len(in.parseErrors) > 0 { - err = in.parseErrors - } - }() - - // Prime the lexer by reading in the first token. It will be available - // in the next peek() or lex() call. - in.readToken() - - // Invoke the parser. - in.parseFile() - if len(in.parseErrors) > 0 { - return nil, in.parseErrors - } - in.file.Name = in.filename - - // Assign comments to nearby syntax. - in.assignComments() - - return in.file, nil -} - -// Error is called to report an error. -// Error does not return: it panics. -func (in *input) Error(s string) { - in.parseErrors = append(in.parseErrors, Error{ - Filename: in.filename, - Pos: in.pos, - Err: errors.New(s), - }) - panic(&in.parseErrors) -} - -// eof reports whether the input has reached end of file. -func (in *input) eof() bool { - return len(in.remaining) == 0 -} - -// peekRune returns the next rune in the input without consuming it. -func (in *input) peekRune() int { - if len(in.remaining) == 0 { - return 0 - } - r, _ := utf8.DecodeRune(in.remaining) - return int(r) -} - -// peekPrefix reports whether the remaining input begins with the given prefix. -func (in *input) peekPrefix(prefix string) bool { - // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) - // but without the allocation of the []byte copy of prefix. - for i := 0; i < len(prefix); i++ { - if i >= len(in.remaining) || in.remaining[i] != prefix[i] { - return false - } - } - return true -} - -// readRune consumes and returns the next rune in the input. -func (in *input) readRune() int { - if len(in.remaining) == 0 { - in.Error("internal lexer error: readRune at EOF") - } - r, size := utf8.DecodeRune(in.remaining) - in.remaining = in.remaining[size:] - if r == '\n' { - in.pos.Line++ - in.pos.LineRune = 1 - } else { - in.pos.LineRune++ - } - in.pos.Byte += size - return int(r) -} - -type token struct { - kind tokenKind - pos Position - endPos Position - text string -} - -type tokenKind int - -const ( - _EOF tokenKind = -(iota + 1) - _EOLCOMMENT - _IDENT - _STRING - _COMMENT - - // newlines and punctuation tokens are allowed as ASCII codes. -) - -func (k tokenKind) isComment() bool { - return k == _COMMENT || k == _EOLCOMMENT -} - -// isEOL returns whether a token terminates a line. -func (k tokenKind) isEOL() bool { - return k == _EOF || k == _EOLCOMMENT || k == '\n' -} - -// startToken marks the beginning of the next input token. -// It must be followed by a call to endToken, once the token's text has -// been consumed using readRune. -func (in *input) startToken() { - in.tokenStart = in.remaining - in.token.text = "" - in.token.pos = in.pos -} - -// endToken marks the end of an input token. -// It records the actual token string in tok.text. -// A single trailing newline (LF or CRLF) will be removed from comment tokens. -func (in *input) endToken(kind tokenKind) { - in.token.kind = kind - text := string(in.tokenStart[:len(in.tokenStart)-len(in.remaining)]) - if kind.isComment() { - if strings.HasSuffix(text, "\r\n") { - text = text[:len(text)-2] - } else { - text = strings.TrimSuffix(text, "\n") - } - } - in.token.text = text - in.token.endPos = in.pos -} - -// peek returns the kind of the next token returned by lex. -func (in *input) peek() tokenKind { - return in.token.kind -} - -// lex is called from the parser to obtain the next input token. -func (in *input) lex() token { - tok := in.token - in.readToken() - return tok -} - -// readToken lexes the next token from the text and stores it in in.token. -func (in *input) readToken() { - // Skip past spaces, stopping at non-space or EOF. - for !in.eof() { - c := in.peekRune() - if c == ' ' || c == '\t' || c == '\r' { - in.readRune() - continue - } - - // Comment runs to end of line. - if in.peekPrefix("//") { - in.startToken() - - // Is this comment the only thing on its line? - // Find the last \n before this // and see if it's all - // spaces from there to here. - i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) - suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 - in.readRune() - in.readRune() - - // Consume comment. - for len(in.remaining) > 0 && in.readRune() != '\n' { - } - - // If we are at top level (not in a statement), hand the comment to - // the parser as a _COMMENT token. The grammar is written - // to handle top-level comments itself. - if !suffix { - in.endToken(_COMMENT) - return - } - - // Otherwise, save comment for later attachment to syntax tree. - in.endToken(_EOLCOMMENT) - in.comments = append(in.comments, Comment{in.token.pos, in.token.text, suffix}) - return - } - - if in.peekPrefix("/*") { - in.Error("mod files must use // comments (not /* */ comments)") - } - - // Found non-space non-comment. - break - } - - // Found the beginning of the next token. - in.startToken() - - // End of file. - if in.eof() { - in.endToken(_EOF) - return - } - - // Punctuation tokens. - switch c := in.peekRune(); c { - case '\n', '(', ')', '[', ']', '{', '}', ',': - in.readRune() - in.endToken(tokenKind(c)) - return - - case '"', '`': // quoted string - quote := c - in.readRune() - for { - if in.eof() { - in.pos = in.token.pos - in.Error("unexpected EOF in string") - } - if in.peekRune() == '\n' { - in.Error("unexpected newline in string") - } - c := in.readRune() - if c == quote { - break - } - if c == '\\' && quote != '`' { - if in.eof() { - in.pos = in.token.pos - in.Error("unexpected EOF in string") - } - in.readRune() - } - } - in.endToken(_STRING) - return - } - - // Checked all punctuation. Must be identifier token. - if c := in.peekRune(); !isIdent(c) { - in.Error(fmt.Sprintf("unexpected input character %#q", c)) - } - - // Scan over identifier. - for isIdent(in.peekRune()) { - if in.peekPrefix("//") { - break - } - if in.peekPrefix("/*") { - in.Error("mod files must use // comments (not /* */ comments)") - } - in.readRune() - } - in.endToken(_IDENT) -} - -// isIdent reports whether c is an identifier rune. -// We treat most printable runes as identifier runes, except for a handful of -// ASCII punctuation characters. -func isIdent(c int) bool { - switch r := rune(c); r { - case ' ', '(', ')', '[', ']', '{', '}', ',': - return false - default: - return !unicode.IsSpace(r) && unicode.IsPrint(r) - } -} - -// Comment assignment. -// We build two lists of all subexpressions, preorder and postorder. -// The preorder list is ordered by start location, with outer expressions first. -// The postorder list is ordered by end location, with outer expressions last. -// We use the preorder list to assign each whole-line comment to the syntax -// immediately following it, and we use the postorder list to assign each -// end-of-line comment to the syntax immediately preceding it. - -// order walks the expression adding it and its subexpressions to the -// preorder and postorder lists. -func (in *input) order(x Expr) { - if x != nil { - in.pre = append(in.pre, x) - } - switch x := x.(type) { - default: - panic(fmt.Errorf("order: unexpected type %T", x)) - case nil: - // nothing - case *LParen, *RParen: - // nothing - case *CommentBlock: - // nothing - case *Line: - // nothing - case *FileSyntax: - for _, stmt := range x.Stmt { - in.order(stmt) - } - case *LineBlock: - in.order(&x.LParen) - for _, l := range x.Line { - in.order(l) - } - in.order(&x.RParen) - } - if x != nil { - in.post = append(in.post, x) - } -} - -// assignComments attaches comments to nearby syntax. -func (in *input) assignComments() { - const debug = false - - // Generate preorder and postorder lists. - in.order(in.file) - - // Split into whole-line comments and suffix comments. - var line, suffix []Comment - for _, com := range in.comments { - if com.Suffix { - suffix = append(suffix, com) - } else { - line = append(line, com) - } - } - - if debug { - for _, c := range line { - fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) - } - } - - // Assign line comments to syntax immediately following. - for _, x := range in.pre { - start, _ := x.Span() - if debug { - fmt.Fprintf(os.Stderr, "pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) - } - xcom := x.Comment() - for len(line) > 0 && start.Byte >= line[0].Start.Byte { - if debug { - fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) - } - xcom.Before = append(xcom.Before, line[0]) - line = line[1:] - } - } - - // Remaining line comments go at end of file. - in.file.After = append(in.file.After, line...) - - if debug { - for _, c := range suffix { - fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) - } - } - - // Assign suffix comments to syntax immediately before. - for i := len(in.post) - 1; i >= 0; i-- { - x := in.post[i] - - start, end := x.Span() - if debug { - fmt.Fprintf(os.Stderr, "post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) - } - - // Do not assign suffix comments to end of line block or whole file. - // Instead assign them to the last element inside. - switch x.(type) { - case *FileSyntax: - continue - } - - // Do not assign suffix comments to something that starts - // on an earlier line, so that in - // - // x ( y - // z ) // comment - // - // we assign the comment to z and not to x ( ... ). - if start.Line != end.Line { - continue - } - xcom := x.Comment() - for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { - if debug { - fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) - } - xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) - suffix = suffix[:len(suffix)-1] - } - } - - // We assigned suffix comments in reverse. - // If multiple suffix comments were appended to the same - // expression node, they are now in reverse. Fix that. - for _, x := range in.post { - reverseComments(x.Comment().Suffix) - } - - // Remaining suffix comments go at beginning of file. - in.file.Before = append(in.file.Before, suffix...) -} - -// reverseComments reverses the []Comment list. -func reverseComments(list []Comment) { - for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { - list[i], list[j] = list[j], list[i] - } -} - -func (in *input) parseFile() { - in.file = new(FileSyntax) - var cb *CommentBlock - for { - switch in.peek() { - case '\n': - in.lex() - if cb != nil { - in.file.Stmt = append(in.file.Stmt, cb) - cb = nil - } - case _COMMENT: - tok := in.lex() - if cb == nil { - cb = &CommentBlock{Start: tok.pos} - } - com := cb.Comment() - com.Before = append(com.Before, Comment{Start: tok.pos, Token: tok.text}) - case _EOF: - if cb != nil { - in.file.Stmt = append(in.file.Stmt, cb) - } - return - default: - in.parseStmt() - if cb != nil { - in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before - cb = nil - } - } - } -} - -func (in *input) parseStmt() { - tok := in.lex() - start := tok.pos - end := tok.endPos - tokens := []string{tok.text} - for { - tok := in.lex() - switch { - case tok.kind.isEOL(): - in.file.Stmt = append(in.file.Stmt, &Line{ - Start: start, - Token: tokens, - End: end, - }) - return - - case tok.kind == '(': - if next := in.peek(); next.isEOL() { - // Start of block: no more tokens on this line. - in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, tokens, tok)) - return - } else if next == ')' { - rparen := in.lex() - if in.peek().isEOL() { - // Empty block. - in.lex() - in.file.Stmt = append(in.file.Stmt, &LineBlock{ - Start: start, - Token: tokens, - LParen: LParen{Pos: tok.pos}, - RParen: RParen{Pos: rparen.pos}, - }) - return - } - // '( )' in the middle of the line, not a block. - tokens = append(tokens, tok.text, rparen.text) - } else { - // '(' in the middle of the line, not a block. - tokens = append(tokens, tok.text) - } - - default: - tokens = append(tokens, tok.text) - end = tok.endPos - } - } -} - -func (in *input) parseLineBlock(start Position, token []string, lparen token) *LineBlock { - x := &LineBlock{ - Start: start, - Token: token, - LParen: LParen{Pos: lparen.pos}, - } - var comments []Comment - for { - switch in.peek() { - case _EOLCOMMENT: - // Suffix comment, will be attached later by assignComments. - in.lex() - case '\n': - // Blank line. Add an empty comment to preserve it. - in.lex() - if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { - comments = append(comments, Comment{}) - } - case _COMMENT: - tok := in.lex() - comments = append(comments, Comment{Start: tok.pos, Token: tok.text}) - case _EOF: - in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) - case ')': - rparen := in.lex() - x.RParen.Before = comments - x.RParen.Pos = rparen.pos - if !in.peek().isEOL() { - in.Error("syntax error (expected newline after closing paren)") - } - in.lex() - return x - default: - l := in.parseLine() - x.Line = append(x.Line, l) - l.Comment().Before = comments - comments = nil - } - } -} - -func (in *input) parseLine() *Line { - tok := in.lex() - if tok.kind.isEOL() { - in.Error("internal parse error: parseLine at end of line") - } - start := tok.pos - end := tok.endPos - tokens := []string{tok.text} - for { - tok := in.lex() - if tok.kind.isEOL() { - return &Line{ - Start: start, - Token: tokens, - End: end, - InBlock: true, - } - } - tokens = append(tokens, tok.text) - end = tok.endPos - } -} - -var ( - slashSlash = []byte("//") - moduleStr = []byte("module") -) - -// ModulePath returns the module path from the gomod file text. -// If it cannot find a module path, it returns an empty string. -// It is tolerant of unrelated problems in the go.mod file. -func ModulePath(mod []byte) string { - for len(mod) > 0 { - line := mod - mod = nil - if i := bytes.IndexByte(line, '\n'); i >= 0 { - line, mod = line[:i], line[i+1:] - } - if i := bytes.Index(line, slashSlash); i >= 0 { - line = line[:i] - } - line = bytes.TrimSpace(line) - if !bytes.HasPrefix(line, moduleStr) { - continue - } - line = line[len(moduleStr):] - n := len(line) - line = bytes.TrimSpace(line) - if len(line) == n || len(line) == 0 { - continue - } - - if line[0] == '"' || line[0] == '`' { - p, err := strconv.Unquote(string(line)) - if err != nil { - return "" // malformed quoted string or multiline module path - } - return p - } - - return string(line) - } - return "" // missing module path -} diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go deleted file mode 100644 index 35fd1f534c..0000000000 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ /dev/null @@ -1,1663 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package modfile implements a parser and formatter for go.mod files. -// -// The go.mod syntax is described in -// https://pkg.go.dev/cmd/go/#hdr-The_go_mod_file. -// -// The [Parse] and [ParseLax] functions both parse a go.mod file and return an -// abstract syntax tree. ParseLax ignores unknown statements and may be used to -// parse go.mod files that may have been developed with newer versions of Go. -// -// The [File] struct returned by Parse and ParseLax represent an abstract -// go.mod file. File has several methods like [File.AddNewRequire] and -// [File.DropReplace] that can be used to programmatically edit a file. -// -// The [Format] function formats a File back to a byte slice which can be -// written to a file. -package modfile - -import ( - "errors" - "fmt" - "path/filepath" - "sort" - "strconv" - "strings" - "unicode" - - "golang.org/x/mod/internal/lazyregexp" - "golang.org/x/mod/module" - "golang.org/x/mod/semver" -) - -// A File is the parsed, interpreted form of a go.mod file. -type File struct { - Module *Module - Go *Go - Toolchain *Toolchain - Require []*Require - Exclude []*Exclude - Replace []*Replace - Retract []*Retract - - Syntax *FileSyntax -} - -// A Module is the module statement. -type Module struct { - Mod module.Version - Deprecated string - Syntax *Line -} - -// A Go is the go statement. -type Go struct { - Version string // "1.23" - Syntax *Line -} - -// A Toolchain is the toolchain statement. -type Toolchain struct { - Name string // "go1.21rc1" - Syntax *Line -} - -// An Exclude is a single exclude statement. -type Exclude struct { - Mod module.Version - Syntax *Line -} - -// A Replace is a single replace statement. -type Replace struct { - Old module.Version - New module.Version - Syntax *Line -} - -// A Retract is a single retract statement. -type Retract struct { - VersionInterval - Rationale string - Syntax *Line -} - -// A VersionInterval represents a range of versions with upper and lower bounds. -// Intervals are closed: both bounds are included. When Low is equal to High, -// the interval may refer to a single version ('v1.2.3') or an interval -// ('[v1.2.3, v1.2.3]'); both have the same representation. -type VersionInterval struct { - Low, High string -} - -// A Require is a single require statement. -type Require struct { - Mod module.Version - Indirect bool // has "// indirect" comment - Syntax *Line -} - -func (r *Require) markRemoved() { - r.Syntax.markRemoved() - *r = Require{} -} - -func (r *Require) setVersion(v string) { - r.Mod.Version = v - - if line := r.Syntax; len(line.Token) > 0 { - if line.InBlock { - // If the line is preceded by an empty line, remove it; see - // https://golang.org/issue/33779. - if len(line.Comments.Before) == 1 && len(line.Comments.Before[0].Token) == 0 { - line.Comments.Before = line.Comments.Before[:0] - } - if len(line.Token) >= 2 { // example.com v1.2.3 - line.Token[1] = v - } - } else { - if len(line.Token) >= 3 { // require example.com v1.2.3 - line.Token[2] = v - } - } - } -} - -// setIndirect sets line to have (or not have) a "// indirect" comment. -func (r *Require) setIndirect(indirect bool) { - r.Indirect = indirect - line := r.Syntax - if isIndirect(line) == indirect { - return - } - if indirect { - // Adding comment. - if len(line.Suffix) == 0 { - // New comment. - line.Suffix = []Comment{{Token: "// indirect", Suffix: true}} - return - } - - com := &line.Suffix[0] - text := strings.TrimSpace(strings.TrimPrefix(com.Token, string(slashSlash))) - if text == "" { - // Empty comment. - com.Token = "// indirect" - return - } - - // Insert at beginning of existing comment. - com.Token = "// indirect; " + text - return - } - - // Removing comment. - f := strings.TrimSpace(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) - if f == "indirect" { - // Remove whole comment. - line.Suffix = nil - return - } - - // Remove comment prefix. - com := &line.Suffix[0] - i := strings.Index(com.Token, "indirect;") - com.Token = "//" + com.Token[i+len("indirect;"):] -} - -// isIndirect reports whether line has a "// indirect" comment, -// meaning it is in go.mod only for its effect on indirect dependencies, -// so that it can be dropped entirely once the effective version of the -// indirect dependency reaches the given minimum version. -func isIndirect(line *Line) bool { - if len(line.Suffix) == 0 { - return false - } - f := strings.Fields(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) - return (len(f) == 1 && f[0] == "indirect" || len(f) > 1 && f[0] == "indirect;") -} - -func (f *File) AddModuleStmt(path string) error { - if f.Syntax == nil { - f.Syntax = new(FileSyntax) - } - if f.Module == nil { - f.Module = &Module{ - Mod: module.Version{Path: path}, - Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)), - } - } else { - f.Module.Mod.Path = path - f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path)) - } - return nil -} - -func (f *File) AddComment(text string) { - if f.Syntax == nil { - f.Syntax = new(FileSyntax) - } - f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{ - Comments: Comments{ - Before: []Comment{ - { - Token: text, - }, - }, - }, - }) -} - -type VersionFixer func(path, version string) (string, error) - -// errDontFix is returned by a VersionFixer to indicate the version should be -// left alone, even if it's not canonical. -var dontFixRetract VersionFixer = func(_, vers string) (string, error) { - return vers, nil -} - -// Parse parses and returns a go.mod file. -// -// file is the name of the file, used in positions and errors. -// -// data is the content of the file. -// -// fix is an optional function that canonicalizes module versions. -// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] -// must return the same string). -func Parse(file string, data []byte, fix VersionFixer) (*File, error) { - return parseToFile(file, data, fix, true) -} - -// ParseLax is like Parse but ignores unknown statements. -// It is used when parsing go.mod files other than the main module, -// under the theory that most statement types we add in the future will -// only apply in the main module, like exclude and replace, -// and so we get better gradual deployments if old go commands -// simply ignore those statements when found in go.mod files -// in dependencies. -func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { - return parseToFile(file, data, fix, false) -} - -func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parsed *File, err error) { - fs, err := parse(file, data) - if err != nil { - return nil, err - } - f := &File{ - Syntax: fs, - } - var errs ErrorList - - // fix versions in retract directives after the file is parsed. - // We need the module path to fix versions, and it might be at the end. - defer func() { - oldLen := len(errs) - f.fixRetract(fix, &errs) - if len(errs) > oldLen { - parsed, err = nil, errs - } - }() - - for _, x := range fs.Stmt { - switch x := x.(type) { - case *Line: - f.add(&errs, nil, x, x.Token[0], x.Token[1:], fix, strict) - - case *LineBlock: - if len(x.Token) > 1 { - if strict { - errs = append(errs, Error{ - Filename: file, - Pos: x.Start, - Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), - }) - } - continue - } - switch x.Token[0] { - default: - if strict { - errs = append(errs, Error{ - Filename: file, - Pos: x.Start, - Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), - }) - } - continue - case "module", "require", "exclude", "replace", "retract": - for _, l := range x.Line { - f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) - } - } - } - } - - if len(errs) > 0 { - return nil, errs - } - return f, nil -} - -var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?([a-z]+[0-9]+)?$`) -var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].*)$`) - -// Toolchains must be named beginning with `go1`, -// like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted. -var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`) - -func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { - // If strict is false, this module is a dependency. - // We ignore all unknown directives as well as main-module-only - // directives like replace and exclude. It will work better for - // forward compatibility if we can depend on modules that have unknown - // statements (presumed relevant only when acting as the main module) - // and simply ignore those statements. - if !strict { - switch verb { - case "go", "module", "retract", "require": - // want these even for dependency go.mods - default: - return - } - } - - wrapModPathError := func(modPath string, err error) { - *errs = append(*errs, Error{ - Filename: f.Syntax.Name, - Pos: line.Start, - ModPath: modPath, - Verb: verb, - Err: err, - }) - } - wrapError := func(err error) { - *errs = append(*errs, Error{ - Filename: f.Syntax.Name, - Pos: line.Start, - Err: err, - }) - } - errorf := func(format string, args ...interface{}) { - wrapError(fmt.Errorf(format, args...)) - } - - switch verb { - default: - errorf("unknown directive: %s", verb) - - case "go": - if f.Go != nil { - errorf("repeated go statement") - return - } - if len(args) != 1 { - errorf("go directive expects exactly one argument") - return - } else if !GoVersionRE.MatchString(args[0]) { - fixed := false - if !strict { - if m := laxGoVersionRE.FindStringSubmatch(args[0]); m != nil { - args[0] = m[1] - fixed = true - } - } - if !fixed { - errorf("invalid go version '%s': must match format 1.23.0", args[0]) - return - } - } - - f.Go = &Go{Syntax: line} - f.Go.Version = args[0] - - case "toolchain": - if f.Toolchain != nil { - errorf("repeated toolchain statement") - return - } - if len(args) != 1 { - errorf("toolchain directive expects exactly one argument") - return - } else if strict && !ToolchainRE.MatchString(args[0]) { - errorf("invalid toolchain version '%s': must match format go1.23.0 or local", args[0]) - return - } - f.Toolchain = &Toolchain{Syntax: line} - f.Toolchain.Name = args[0] - - case "module": - if f.Module != nil { - errorf("repeated module statement") - return - } - deprecated := parseDeprecation(block, line) - f.Module = &Module{ - Syntax: line, - Deprecated: deprecated, - } - if len(args) != 1 { - errorf("usage: module module/path") - return - } - s, err := parseString(&args[0]) - if err != nil { - errorf("invalid quoted string: %v", err) - return - } - f.Module.Mod = module.Version{Path: s} - - case "require", "exclude": - if len(args) != 2 { - errorf("usage: %s module/path v1.2.3", verb) - return - } - s, err := parseString(&args[0]) - if err != nil { - errorf("invalid quoted string: %v", err) - return - } - v, err := parseVersion(verb, s, &args[1], fix) - if err != nil { - wrapError(err) - return - } - pathMajor, err := modulePathMajor(s) - if err != nil { - wrapError(err) - return - } - if err := module.CheckPathMajor(v, pathMajor); err != nil { - wrapModPathError(s, err) - return - } - if verb == "require" { - f.Require = append(f.Require, &Require{ - Mod: module.Version{Path: s, Version: v}, - Syntax: line, - Indirect: isIndirect(line), - }) - } else { - f.Exclude = append(f.Exclude, &Exclude{ - Mod: module.Version{Path: s, Version: v}, - Syntax: line, - }) - } - - case "replace": - replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) - if wrappederr != nil { - *errs = append(*errs, *wrappederr) - return - } - f.Replace = append(f.Replace, replace) - - case "retract": - rationale := parseDirectiveComment(block, line) - vi, err := parseVersionInterval(verb, "", &args, dontFixRetract) - if err != nil { - if strict { - wrapError(err) - return - } else { - // Only report errors parsing intervals in the main module. We may - // support additional syntax in the future, such as open and half-open - // intervals. Those can't be supported now, because they break the - // go.mod parser, even in lax mode. - return - } - } - if len(args) > 0 && strict { - // In the future, there may be additional information after the version. - errorf("unexpected token after version: %q", args[0]) - return - } - retract := &Retract{ - VersionInterval: vi, - Rationale: rationale, - Syntax: line, - } - f.Retract = append(f.Retract, retract) - } -} - -func parseReplace(filename string, line *Line, verb string, args []string, fix VersionFixer) (*Replace, *Error) { - wrapModPathError := func(modPath string, err error) *Error { - return &Error{ - Filename: filename, - Pos: line.Start, - ModPath: modPath, - Verb: verb, - Err: err, - } - } - wrapError := func(err error) *Error { - return &Error{ - Filename: filename, - Pos: line.Start, - Err: err, - } - } - errorf := func(format string, args ...interface{}) *Error { - return wrapError(fmt.Errorf(format, args...)) - } - - arrow := 2 - if len(args) >= 2 && args[1] == "=>" { - arrow = 1 - } - if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" { - return nil, errorf("usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory", verb, verb) - } - s, err := parseString(&args[0]) - if err != nil { - return nil, errorf("invalid quoted string: %v", err) - } - pathMajor, err := modulePathMajor(s) - if err != nil { - return nil, wrapModPathError(s, err) - - } - var v string - if arrow == 2 { - v, err = parseVersion(verb, s, &args[1], fix) - if err != nil { - return nil, wrapError(err) - } - if err := module.CheckPathMajor(v, pathMajor); err != nil { - return nil, wrapModPathError(s, err) - } - } - ns, err := parseString(&args[arrow+1]) - if err != nil { - return nil, errorf("invalid quoted string: %v", err) - } - nv := "" - if len(args) == arrow+2 { - if !IsDirectoryPath(ns) { - if strings.Contains(ns, "@") { - return nil, errorf("replacement module must match format 'path version', not 'path@version'") - } - return nil, errorf("replacement module without version must be directory path (rooted or starting with . or ..)") - } - if filepath.Separator == '/' && strings.Contains(ns, `\`) { - return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") - } - } - if len(args) == arrow+3 { - nv, err = parseVersion(verb, ns, &args[arrow+2], fix) - if err != nil { - return nil, wrapError(err) - } - if IsDirectoryPath(ns) { - return nil, errorf("replacement module directory path %q cannot have version", ns) - } - } - return &Replace{ - Old: module.Version{Path: s, Version: v}, - New: module.Version{Path: ns, Version: nv}, - Syntax: line, - }, nil -} - -// fixRetract applies fix to each retract directive in f, appending any errors -// to errs. -// -// Most versions are fixed as we parse the file, but for retract directives, -// the relevant module path is the one specified with the module directive, -// and that might appear at the end of the file (or not at all). -func (f *File) fixRetract(fix VersionFixer, errs *ErrorList) { - if fix == nil { - return - } - path := "" - if f.Module != nil { - path = f.Module.Mod.Path - } - var r *Retract - wrapError := func(err error) { - *errs = append(*errs, Error{ - Filename: f.Syntax.Name, - Pos: r.Syntax.Start, - Err: err, - }) - } - - for _, r = range f.Retract { - if path == "" { - wrapError(errors.New("no module directive found, so retract cannot be used")) - return // only print the first one of these - } - - args := r.Syntax.Token - if args[0] == "retract" { - args = args[1:] - } - vi, err := parseVersionInterval("retract", path, &args, fix) - if err != nil { - wrapError(err) - } - r.VersionInterval = vi - } -} - -func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, fix VersionFixer) { - wrapError := func(err error) { - *errs = append(*errs, Error{ - Filename: f.Syntax.Name, - Pos: line.Start, - Err: err, - }) - } - errorf := func(format string, args ...interface{}) { - wrapError(fmt.Errorf(format, args...)) - } - - switch verb { - default: - errorf("unknown directive: %s", verb) - - case "go": - if f.Go != nil { - errorf("repeated go statement") - return - } - if len(args) != 1 { - errorf("go directive expects exactly one argument") - return - } else if !GoVersionRE.MatchString(args[0]) { - errorf("invalid go version '%s': must match format 1.23", args[0]) - return - } - - f.Go = &Go{Syntax: line} - f.Go.Version = args[0] - - case "toolchain": - if f.Toolchain != nil { - errorf("repeated toolchain statement") - return - } - if len(args) != 1 { - errorf("toolchain directive expects exactly one argument") - return - } else if !ToolchainRE.MatchString(args[0]) { - errorf("invalid toolchain version '%s': must match format go1.23 or local", args[0]) - return - } - - f.Toolchain = &Toolchain{Syntax: line} - f.Toolchain.Name = args[0] - - case "use": - if len(args) != 1 { - errorf("usage: %s local/dir", verb) - return - } - s, err := parseString(&args[0]) - if err != nil { - errorf("invalid quoted string: %v", err) - return - } - f.Use = append(f.Use, &Use{ - Path: s, - Syntax: line, - }) - - case "replace": - replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) - if wrappederr != nil { - *errs = append(*errs, *wrappederr) - return - } - f.Replace = append(f.Replace, replace) - } -} - -// IsDirectoryPath reports whether the given path should be interpreted as a directory path. -// Just like on the go command line, relative paths starting with a '.' or '..' path component -// and rooted paths are directory paths; the rest are module paths. -func IsDirectoryPath(ns string) bool { - // Because go.mod files can move from one system to another, - // we check all known path syntaxes, both Unix and Windows. - return ns == "." || strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, `.\`) || - ns == ".." || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, `..\`) || - strings.HasPrefix(ns, "/") || strings.HasPrefix(ns, `\`) || - len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' -} - -// MustQuote reports whether s must be quoted in order to appear as -// a single token in a go.mod line. -func MustQuote(s string) bool { - for _, r := range s { - switch r { - case ' ', '"', '\'', '`': - return true - - case '(', ')', '[', ']', '{', '}', ',': - if len(s) > 1 { - return true - } - - default: - if !unicode.IsPrint(r) { - return true - } - } - } - return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*") -} - -// AutoQuote returns s or, if quoting is required for s to appear in a go.mod, -// the quotation of s. -func AutoQuote(s string) string { - if MustQuote(s) { - return strconv.Quote(s) - } - return s -} - -func parseVersionInterval(verb string, path string, args *[]string, fix VersionFixer) (VersionInterval, error) { - toks := *args - if len(toks) == 0 || toks[0] == "(" { - return VersionInterval{}, fmt.Errorf("expected '[' or version") - } - if toks[0] != "[" { - v, err := parseVersion(verb, path, &toks[0], fix) - if err != nil { - return VersionInterval{}, err - } - *args = toks[1:] - return VersionInterval{Low: v, High: v}, nil - } - toks = toks[1:] - - if len(toks) == 0 { - return VersionInterval{}, fmt.Errorf("expected version after '['") - } - low, err := parseVersion(verb, path, &toks[0], fix) - if err != nil { - return VersionInterval{}, err - } - toks = toks[1:] - - if len(toks) == 0 || toks[0] != "," { - return VersionInterval{}, fmt.Errorf("expected ',' after version") - } - toks = toks[1:] - - if len(toks) == 0 { - return VersionInterval{}, fmt.Errorf("expected version after ','") - } - high, err := parseVersion(verb, path, &toks[0], fix) - if err != nil { - return VersionInterval{}, err - } - toks = toks[1:] - - if len(toks) == 0 || toks[0] != "]" { - return VersionInterval{}, fmt.Errorf("expected ']' after version") - } - toks = toks[1:] - - *args = toks - return VersionInterval{Low: low, High: high}, nil -} - -func parseString(s *string) (string, error) { - t := *s - if strings.HasPrefix(t, `"`) { - var err error - if t, err = strconv.Unquote(t); err != nil { - return "", err - } - } else if strings.ContainsAny(t, "\"'`") { - // Other quotes are reserved both for possible future expansion - // and to avoid confusion. For example if someone types 'x' - // we want that to be a syntax error and not a literal x in literal quotation marks. - return "", fmt.Errorf("unquoted string cannot contain quote") - } - *s = AutoQuote(t) - return t, nil -} - -var deprecatedRE = lazyregexp.New(`(?s)(?:^|\n\n)Deprecated: *(.*?)(?:$|\n\n)`) - -// parseDeprecation extracts the text of comments on a "module" directive and -// extracts a deprecation message from that. -// -// A deprecation message is contained in a paragraph within a block of comments -// that starts with "Deprecated:" (case sensitive). The message runs until the -// end of the paragraph and does not include the "Deprecated:" prefix. If the -// comment block has multiple paragraphs that start with "Deprecated:", -// parseDeprecation returns the message from the first. -func parseDeprecation(block *LineBlock, line *Line) string { - text := parseDirectiveComment(block, line) - m := deprecatedRE.FindStringSubmatch(text) - if m == nil { - return "" - } - return m[1] -} - -// parseDirectiveComment extracts the text of comments on a directive. -// If the directive's line does not have comments and is part of a block that -// does have comments, the block's comments are used. -func parseDirectiveComment(block *LineBlock, line *Line) string { - comments := line.Comment() - if block != nil && len(comments.Before) == 0 && len(comments.Suffix) == 0 { - comments = block.Comment() - } - groups := [][]Comment{comments.Before, comments.Suffix} - var lines []string - for _, g := range groups { - for _, c := range g { - if !strings.HasPrefix(c.Token, "//") { - continue // blank line - } - lines = append(lines, strings.TrimSpace(strings.TrimPrefix(c.Token, "//"))) - } - } - return strings.Join(lines, "\n") -} - -type ErrorList []Error - -func (e ErrorList) Error() string { - errStrs := make([]string, len(e)) - for i, err := range e { - errStrs[i] = err.Error() - } - return strings.Join(errStrs, "\n") -} - -type Error struct { - Filename string - Pos Position - Verb string - ModPath string - Err error -} - -func (e *Error) Error() string { - var pos string - if e.Pos.LineRune > 1 { - // Don't print LineRune if it's 1 (beginning of line). - // It's always 1 except in scanner errors, which are rare. - pos = fmt.Sprintf("%s:%d:%d: ", e.Filename, e.Pos.Line, e.Pos.LineRune) - } else if e.Pos.Line > 0 { - pos = fmt.Sprintf("%s:%d: ", e.Filename, e.Pos.Line) - } else if e.Filename != "" { - pos = fmt.Sprintf("%s: ", e.Filename) - } - - var directive string - if e.ModPath != "" { - directive = fmt.Sprintf("%s %s: ", e.Verb, e.ModPath) - } else if e.Verb != "" { - directive = fmt.Sprintf("%s: ", e.Verb) - } - - return pos + directive + e.Err.Error() -} - -func (e *Error) Unwrap() error { return e.Err } - -func parseVersion(verb string, path string, s *string, fix VersionFixer) (string, error) { - t, err := parseString(s) - if err != nil { - return "", &Error{ - Verb: verb, - ModPath: path, - Err: &module.InvalidVersionError{ - Version: *s, - Err: err, - }, - } - } - if fix != nil { - fixed, err := fix(path, t) - if err != nil { - if err, ok := err.(*module.ModuleError); ok { - return "", &Error{ - Verb: verb, - ModPath: path, - Err: err.Err, - } - } - return "", err - } - t = fixed - } else { - cv := module.CanonicalVersion(t) - if cv == "" { - return "", &Error{ - Verb: verb, - ModPath: path, - Err: &module.InvalidVersionError{ - Version: t, - Err: errors.New("must be of the form v1.2.3"), - }, - } - } - t = cv - } - *s = t - return *s, nil -} - -func modulePathMajor(path string) (string, error) { - _, major, ok := module.SplitPathVersion(path) - if !ok { - return "", fmt.Errorf("invalid module path") - } - return major, nil -} - -func (f *File) Format() ([]byte, error) { - return Format(f.Syntax), nil -} - -// Cleanup cleans up the file f after any edit operations. -// To avoid quadratic behavior, modifications like [File.DropRequire] -// clear the entry but do not remove it from the slice. -// Cleanup cleans out all the cleared entries. -func (f *File) Cleanup() { - w := 0 - for _, r := range f.Require { - if r.Mod.Path != "" { - f.Require[w] = r - w++ - } - } - f.Require = f.Require[:w] - - w = 0 - for _, x := range f.Exclude { - if x.Mod.Path != "" { - f.Exclude[w] = x - w++ - } - } - f.Exclude = f.Exclude[:w] - - w = 0 - for _, r := range f.Replace { - if r.Old.Path != "" { - f.Replace[w] = r - w++ - } - } - f.Replace = f.Replace[:w] - - w = 0 - for _, r := range f.Retract { - if r.Low != "" || r.High != "" { - f.Retract[w] = r - w++ - } - } - f.Retract = f.Retract[:w] - - f.Syntax.Cleanup() -} - -func (f *File) AddGoStmt(version string) error { - if !GoVersionRE.MatchString(version) { - return fmt.Errorf("invalid language version %q", version) - } - if f.Go == nil { - var hint Expr - if f.Module != nil && f.Module.Syntax != nil { - hint = f.Module.Syntax - } - f.Go = &Go{ - Version: version, - Syntax: f.Syntax.addLine(hint, "go", version), - } - } else { - f.Go.Version = version - f.Syntax.updateLine(f.Go.Syntax, "go", version) - } - return nil -} - -// DropGoStmt deletes the go statement from the file. -func (f *File) DropGoStmt() { - if f.Go != nil { - f.Go.Syntax.markRemoved() - f.Go = nil - } -} - -// DropToolchainStmt deletes the toolchain statement from the file. -func (f *File) DropToolchainStmt() { - if f.Toolchain != nil { - f.Toolchain.Syntax.markRemoved() - f.Toolchain = nil - } -} - -func (f *File) AddToolchainStmt(name string) error { - if !ToolchainRE.MatchString(name) { - return fmt.Errorf("invalid toolchain name %q", name) - } - if f.Toolchain == nil { - var hint Expr - if f.Go != nil && f.Go.Syntax != nil { - hint = f.Go.Syntax - } else if f.Module != nil && f.Module.Syntax != nil { - hint = f.Module.Syntax - } - f.Toolchain = &Toolchain{ - Name: name, - Syntax: f.Syntax.addLine(hint, "toolchain", name), - } - } else { - f.Toolchain.Name = name - f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) - } - return nil -} - -// AddRequire sets the first require line for path to version vers, -// preserving any existing comments for that line and removing all -// other lines for path. -// -// If no line currently exists for path, AddRequire adds a new line -// at the end of the last require block. -func (f *File) AddRequire(path, vers string) error { - need := true - for _, r := range f.Require { - if r.Mod.Path == path { - if need { - r.Mod.Version = vers - f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers) - need = false - } else { - r.Syntax.markRemoved() - *r = Require{} - } - } - } - - if need { - f.AddNewRequire(path, vers, false) - } - return nil -} - -// AddNewRequire adds a new require line for path at version vers at the end of -// the last require block, regardless of any existing require lines for path. -func (f *File) AddNewRequire(path, vers string, indirect bool) { - line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers) - r := &Require{ - Mod: module.Version{Path: path, Version: vers}, - Syntax: line, - } - r.setIndirect(indirect) - f.Require = append(f.Require, r) -} - -// SetRequire updates the requirements of f to contain exactly req, preserving -// the existing block structure and line comment contents (except for 'indirect' -// markings) for the first requirement on each named module path. -// -// The Syntax field is ignored for the requirements in req. -// -// Any requirements not already present in the file are added to the block -// containing the last require line. -// -// The requirements in req must specify at most one distinct version for each -// module path. -// -// If any existing requirements may be removed, the caller should call -// [File.Cleanup] after all edits are complete. -func (f *File) SetRequire(req []*Require) { - type elem struct { - version string - indirect bool - } - need := make(map[string]elem) - for _, r := range req { - if prev, dup := need[r.Mod.Path]; dup && prev.version != r.Mod.Version { - panic(fmt.Errorf("SetRequire called with conflicting versions for path %s (%s and %s)", r.Mod.Path, prev.version, r.Mod.Version)) - } - need[r.Mod.Path] = elem{r.Mod.Version, r.Indirect} - } - - // Update or delete the existing Require entries to preserve - // only the first for each module path in req. - for _, r := range f.Require { - e, ok := need[r.Mod.Path] - if ok { - r.setVersion(e.version) - r.setIndirect(e.indirect) - } else { - r.markRemoved() - } - delete(need, r.Mod.Path) - } - - // Add new entries in the last block of the file for any paths that weren't - // already present. - // - // This step is nondeterministic, but the final result will be deterministic - // because we will sort the block. - for path, e := range need { - f.AddNewRequire(path, e.version, e.indirect) - } - - f.SortBlocks() -} - -// SetRequireSeparateIndirect updates the requirements of f to contain the given -// requirements. Comment contents (except for 'indirect' markings) are retained -// from the first existing requirement for each module path. Like SetRequire, -// SetRequireSeparateIndirect adds requirements for new paths in req, -// updates the version and "// indirect" comment on existing requirements, -// and deletes requirements on paths not in req. Existing duplicate requirements -// are deleted. -// -// As its name suggests, SetRequireSeparateIndirect puts direct and indirect -// requirements into two separate blocks, one containing only direct -// requirements, and the other containing only indirect requirements. -// SetRequireSeparateIndirect may move requirements between these two blocks -// when their indirect markings change. However, SetRequireSeparateIndirect -// won't move requirements from other blocks, especially blocks with comments. -// -// If the file initially has one uncommented block of requirements, -// SetRequireSeparateIndirect will split it into a direct-only and indirect-only -// block. This aids in the transition to separate blocks. -func (f *File) SetRequireSeparateIndirect(req []*Require) { - // hasComments returns whether a line or block has comments - // other than "indirect". - hasComments := func(c Comments) bool { - return len(c.Before) > 0 || len(c.After) > 0 || len(c.Suffix) > 1 || - (len(c.Suffix) == 1 && - strings.TrimSpace(strings.TrimPrefix(c.Suffix[0].Token, string(slashSlash))) != "indirect") - } - - // moveReq adds r to block. If r was in another block, moveReq deletes - // it from that block and transfers its comments. - moveReq := func(r *Require, block *LineBlock) { - var line *Line - if r.Syntax == nil { - line = &Line{Token: []string{AutoQuote(r.Mod.Path), r.Mod.Version}} - r.Syntax = line - if r.Indirect { - r.setIndirect(true) - } - } else { - line = new(Line) - *line = *r.Syntax - if !line.InBlock && len(line.Token) > 0 && line.Token[0] == "require" { - line.Token = line.Token[1:] - } - r.Syntax.Token = nil // Cleanup will delete the old line. - r.Syntax = line - } - line.InBlock = true - block.Line = append(block.Line, line) - } - - // Examine existing require lines and blocks. - var ( - // We may insert new requirements into the last uncommented - // direct-only and indirect-only blocks. We may also move requirements - // to the opposite block if their indirect markings change. - lastDirectIndex = -1 - lastIndirectIndex = -1 - - // If there are no direct-only or indirect-only blocks, a new block may - // be inserted after the last require line or block. - lastRequireIndex = -1 - - // If there's only one require line or block, and it's uncommented, - // we'll move its requirements to the direct-only or indirect-only blocks. - requireLineOrBlockCount = 0 - - // Track the block each requirement belongs to (if any) so we can - // move them later. - lineToBlock = make(map[*Line]*LineBlock) - ) - for i, stmt := range f.Syntax.Stmt { - switch stmt := stmt.(type) { - case *Line: - if len(stmt.Token) == 0 || stmt.Token[0] != "require" { - continue - } - lastRequireIndex = i - requireLineOrBlockCount++ - if !hasComments(stmt.Comments) { - if isIndirect(stmt) { - lastIndirectIndex = i - } else { - lastDirectIndex = i - } - } - - case *LineBlock: - if len(stmt.Token) == 0 || stmt.Token[0] != "require" { - continue - } - lastRequireIndex = i - requireLineOrBlockCount++ - allDirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) - allIndirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) - for _, line := range stmt.Line { - lineToBlock[line] = stmt - if hasComments(line.Comments) { - allDirect = false - allIndirect = false - } else if isIndirect(line) { - allDirect = false - } else { - allIndirect = false - } - } - if allDirect { - lastDirectIndex = i - } - if allIndirect { - lastIndirectIndex = i - } - } - } - - oneFlatUncommentedBlock := requireLineOrBlockCount == 1 && - !hasComments(*f.Syntax.Stmt[lastRequireIndex].Comment()) - - // Create direct and indirect blocks if needed. Convert lines into blocks - // if needed. If we end up with an empty block or a one-line block, - // Cleanup will delete it or convert it to a line later. - insertBlock := func(i int) *LineBlock { - block := &LineBlock{Token: []string{"require"}} - f.Syntax.Stmt = append(f.Syntax.Stmt, nil) - copy(f.Syntax.Stmt[i+1:], f.Syntax.Stmt[i:]) - f.Syntax.Stmt[i] = block - return block - } - - ensureBlock := func(i int) *LineBlock { - switch stmt := f.Syntax.Stmt[i].(type) { - case *LineBlock: - return stmt - case *Line: - block := &LineBlock{ - Token: []string{"require"}, - Line: []*Line{stmt}, - } - stmt.Token = stmt.Token[1:] // remove "require" - stmt.InBlock = true - f.Syntax.Stmt[i] = block - return block - default: - panic(fmt.Sprintf("unexpected statement: %v", stmt)) - } - } - - var lastDirectBlock *LineBlock - if lastDirectIndex < 0 { - if lastIndirectIndex >= 0 { - lastDirectIndex = lastIndirectIndex - lastIndirectIndex++ - } else if lastRequireIndex >= 0 { - lastDirectIndex = lastRequireIndex + 1 - } else { - lastDirectIndex = len(f.Syntax.Stmt) - } - lastDirectBlock = insertBlock(lastDirectIndex) - } else { - lastDirectBlock = ensureBlock(lastDirectIndex) - } - - var lastIndirectBlock *LineBlock - if lastIndirectIndex < 0 { - lastIndirectIndex = lastDirectIndex + 1 - lastIndirectBlock = insertBlock(lastIndirectIndex) - } else { - lastIndirectBlock = ensureBlock(lastIndirectIndex) - } - - // Delete requirements we don't want anymore. - // Update versions and indirect comments on requirements we want to keep. - // If a requirement is in last{Direct,Indirect}Block with the wrong - // indirect marking after this, or if the requirement is in an single - // uncommented mixed block (oneFlatUncommentedBlock), move it to the - // correct block. - // - // Some blocks may be empty after this. Cleanup will remove them. - need := make(map[string]*Require) - for _, r := range req { - need[r.Mod.Path] = r - } - have := make(map[string]*Require) - for _, r := range f.Require { - path := r.Mod.Path - if need[path] == nil || have[path] != nil { - // Requirement not needed, or duplicate requirement. Delete. - r.markRemoved() - continue - } - have[r.Mod.Path] = r - r.setVersion(need[path].Mod.Version) - r.setIndirect(need[path].Indirect) - if need[path].Indirect && - (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastDirectBlock) { - moveReq(r, lastIndirectBlock) - } else if !need[path].Indirect && - (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastIndirectBlock) { - moveReq(r, lastDirectBlock) - } - } - - // Add new requirements. - for path, r := range need { - if have[path] == nil { - if r.Indirect { - moveReq(r, lastIndirectBlock) - } else { - moveReq(r, lastDirectBlock) - } - f.Require = append(f.Require, r) - } - } - - f.SortBlocks() -} - -func (f *File) DropRequire(path string) error { - for _, r := range f.Require { - if r.Mod.Path == path { - r.Syntax.markRemoved() - *r = Require{} - } - } - return nil -} - -// AddExclude adds a exclude statement to the mod file. Errors if the provided -// version is not a canonical version string -func (f *File) AddExclude(path, vers string) error { - if err := checkCanonicalVersion(path, vers); err != nil { - return err - } - - var hint *Line - for _, x := range f.Exclude { - if x.Mod.Path == path && x.Mod.Version == vers { - return nil - } - if x.Mod.Path == path { - hint = x.Syntax - } - } - - f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)}) - return nil -} - -func (f *File) DropExclude(path, vers string) error { - for _, x := range f.Exclude { - if x.Mod.Path == path && x.Mod.Version == vers { - x.Syntax.markRemoved() - *x = Exclude{} - } - } - return nil -} - -func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error { - return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) -} - -func addReplace(syntax *FileSyntax, replace *[]*Replace, oldPath, oldVers, newPath, newVers string) error { - need := true - old := module.Version{Path: oldPath, Version: oldVers} - new := module.Version{Path: newPath, Version: newVers} - tokens := []string{"replace", AutoQuote(oldPath)} - if oldVers != "" { - tokens = append(tokens, oldVers) - } - tokens = append(tokens, "=>", AutoQuote(newPath)) - if newVers != "" { - tokens = append(tokens, newVers) - } - - var hint *Line - for _, r := range *replace { - if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) { - if need { - // Found replacement for old; update to use new. - r.New = new - syntax.updateLine(r.Syntax, tokens...) - need = false - continue - } - // Already added; delete other replacements for same. - r.Syntax.markRemoved() - *r = Replace{} - } - if r.Old.Path == oldPath { - hint = r.Syntax - } - } - if need { - *replace = append(*replace, &Replace{Old: old, New: new, Syntax: syntax.addLine(hint, tokens...)}) - } - return nil -} - -func (f *File) DropReplace(oldPath, oldVers string) error { - for _, r := range f.Replace { - if r.Old.Path == oldPath && r.Old.Version == oldVers { - r.Syntax.markRemoved() - *r = Replace{} - } - } - return nil -} - -// AddRetract adds a retract statement to the mod file. Errors if the provided -// version interval does not consist of canonical version strings -func (f *File) AddRetract(vi VersionInterval, rationale string) error { - var path string - if f.Module != nil { - path = f.Module.Mod.Path - } - if err := checkCanonicalVersion(path, vi.High); err != nil { - return err - } - if err := checkCanonicalVersion(path, vi.Low); err != nil { - return err - } - - r := &Retract{ - VersionInterval: vi, - } - if vi.Low == vi.High { - r.Syntax = f.Syntax.addLine(nil, "retract", AutoQuote(vi.Low)) - } else { - r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") - } - if rationale != "" { - for _, line := range strings.Split(rationale, "\n") { - com := Comment{Token: "// " + line} - r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) - } - } - return nil -} - -func (f *File) DropRetract(vi VersionInterval) error { - for _, r := range f.Retract { - if r.VersionInterval == vi { - r.Syntax.markRemoved() - *r = Retract{} - } - } - return nil -} - -func (f *File) SortBlocks() { - f.removeDups() // otherwise sorting is unsafe - - // semanticSortForExcludeVersionV is the Go version (plus leading "v") at which - // lines in exclude blocks start to use semantic sort instead of lexicographic sort. - // See go.dev/issue/60028. - const semanticSortForExcludeVersionV = "v1.21" - useSemanticSortForExclude := f.Go != nil && semver.Compare("v"+f.Go.Version, semanticSortForExcludeVersionV) >= 0 - - for _, stmt := range f.Syntax.Stmt { - block, ok := stmt.(*LineBlock) - if !ok { - continue - } - less := lineLess - if block.Token[0] == "exclude" && useSemanticSortForExclude { - less = lineExcludeLess - } else if block.Token[0] == "retract" { - less = lineRetractLess - } - sort.SliceStable(block.Line, func(i, j int) bool { - return less(block.Line[i], block.Line[j]) - }) - } -} - -// removeDups removes duplicate exclude and replace directives. -// -// Earlier exclude directives take priority. -// -// Later replace directives take priority. -// -// require directives are not de-duplicated. That's left up to higher-level -// logic (MVS). -// -// retract directives are not de-duplicated since comments are -// meaningful, and versions may be retracted multiple times. -func (f *File) removeDups() { - removeDups(f.Syntax, &f.Exclude, &f.Replace) -} - -func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { - kill := make(map[*Line]bool) - - // Remove duplicate excludes. - if exclude != nil { - haveExclude := make(map[module.Version]bool) - for _, x := range *exclude { - if haveExclude[x.Mod] { - kill[x.Syntax] = true - continue - } - haveExclude[x.Mod] = true - } - var excl []*Exclude - for _, x := range *exclude { - if !kill[x.Syntax] { - excl = append(excl, x) - } - } - *exclude = excl - } - - // Remove duplicate replacements. - // Later replacements take priority over earlier ones. - haveReplace := make(map[module.Version]bool) - for i := len(*replace) - 1; i >= 0; i-- { - x := (*replace)[i] - if haveReplace[x.Old] { - kill[x.Syntax] = true - continue - } - haveReplace[x.Old] = true - } - var repl []*Replace - for _, x := range *replace { - if !kill[x.Syntax] { - repl = append(repl, x) - } - } - *replace = repl - - // Duplicate require and retract directives are not removed. - - // Drop killed statements from the syntax tree. - var stmts []Expr - for _, stmt := range syntax.Stmt { - switch stmt := stmt.(type) { - case *Line: - if kill[stmt] { - continue - } - case *LineBlock: - var lines []*Line - for _, line := range stmt.Line { - if !kill[line] { - lines = append(lines, line) - } - } - stmt.Line = lines - if len(lines) == 0 { - continue - } - } - stmts = append(stmts, stmt) - } - syntax.Stmt = stmts -} - -// lineLess returns whether li should be sorted before lj. It sorts -// lexicographically without assigning any special meaning to tokens. -func lineLess(li, lj *Line) bool { - for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { - if li.Token[k] != lj.Token[k] { - return li.Token[k] < lj.Token[k] - } - } - return len(li.Token) < len(lj.Token) -} - -// lineExcludeLess reports whether li should be sorted before lj for lines in -// an "exclude" block. -func lineExcludeLess(li, lj *Line) bool { - if len(li.Token) != 2 || len(lj.Token) != 2 { - // Not a known exclude specification. - // Fall back to sorting lexicographically. - return lineLess(li, lj) - } - // An exclude specification has two tokens: ModulePath and Version. - // Compare module path by string order and version by semver rules. - if pi, pj := li.Token[0], lj.Token[0]; pi != pj { - return pi < pj - } - return semver.Compare(li.Token[1], lj.Token[1]) < 0 -} - -// lineRetractLess returns whether li should be sorted before lj for lines in -// a "retract" block. It treats each line as a version interval. Single versions -// are compared as if they were intervals with the same low and high version. -// Intervals are sorted in descending order, first by low version, then by -// high version, using semver.Compare. -func lineRetractLess(li, lj *Line) bool { - interval := func(l *Line) VersionInterval { - if len(l.Token) == 1 { - return VersionInterval{Low: l.Token[0], High: l.Token[0]} - } else if len(l.Token) == 5 && l.Token[0] == "[" && l.Token[2] == "," && l.Token[4] == "]" { - return VersionInterval{Low: l.Token[1], High: l.Token[3]} - } else { - // Line in unknown format. Treat as an invalid version. - return VersionInterval{} - } - } - vii := interval(li) - vij := interval(lj) - if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 { - return cmp > 0 - } - return semver.Compare(vii.High, vij.High) > 0 -} - -// checkCanonicalVersion returns a non-nil error if vers is not a canonical -// version string or does not match the major version of path. -// -// If path is non-empty, the error text suggests a format with a major version -// corresponding to the path. -func checkCanonicalVersion(path, vers string) error { - _, pathMajor, pathMajorOk := module.SplitPathVersion(path) - - if vers == "" || vers != module.CanonicalVersion(vers) { - if pathMajor == "" { - return &module.InvalidVersionError{ - Version: vers, - Err: fmt.Errorf("must be of the form v1.2.3"), - } - } - return &module.InvalidVersionError{ - Version: vers, - Err: fmt.Errorf("must be of the form %s.2.3", module.PathMajorPrefix(pathMajor)), - } - } - - if pathMajorOk { - if err := module.CheckPathMajor(vers, pathMajor); err != nil { - if pathMajor == "" { - // In this context, the user probably wrote "v2.3.4" when they meant - // "v2.3.4+incompatible". Suggest that instead of "v0 or v1". - return &module.InvalidVersionError{ - Version: vers, - Err: fmt.Errorf("should be %s+incompatible (or module %s/%v)", vers, path, semver.Major(vers)), - } - } - return err - } - } - - return nil -} diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go deleted file mode 100644 index d7b99376eb..0000000000 --- a/vendor/golang.org/x/mod/modfile/work.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package modfile - -import ( - "fmt" - "sort" - "strings" -) - -// A WorkFile is the parsed, interpreted form of a go.work file. -type WorkFile struct { - Go *Go - Toolchain *Toolchain - Use []*Use - Replace []*Replace - - Syntax *FileSyntax -} - -// A Use is a single directory statement. -type Use struct { - Path string // Use path of module. - ModulePath string // Module path in the comment. - Syntax *Line -} - -// ParseWork parses and returns a go.work file. -// -// file is the name of the file, used in positions and errors. -// -// data is the content of the file. -// -// fix is an optional function that canonicalizes module versions. -// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] -// must return the same string). -func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { - fs, err := parse(file, data) - if err != nil { - return nil, err - } - f := &WorkFile{ - Syntax: fs, - } - var errs ErrorList - - for _, x := range fs.Stmt { - switch x := x.(type) { - case *Line: - f.add(&errs, x, x.Token[0], x.Token[1:], fix) - - case *LineBlock: - if len(x.Token) > 1 { - errs = append(errs, Error{ - Filename: file, - Pos: x.Start, - Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), - }) - continue - } - switch x.Token[0] { - default: - errs = append(errs, Error{ - Filename: file, - Pos: x.Start, - Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), - }) - continue - case "use", "replace": - for _, l := range x.Line { - f.add(&errs, l, x.Token[0], l.Token, fix) - } - } - } - } - - if len(errs) > 0 { - return nil, errs - } - return f, nil -} - -// Cleanup cleans up the file f after any edit operations. -// To avoid quadratic behavior, modifications like [WorkFile.DropRequire] -// clear the entry but do not remove it from the slice. -// Cleanup cleans out all the cleared entries. -func (f *WorkFile) Cleanup() { - w := 0 - for _, r := range f.Use { - if r.Path != "" { - f.Use[w] = r - w++ - } - } - f.Use = f.Use[:w] - - w = 0 - for _, r := range f.Replace { - if r.Old.Path != "" { - f.Replace[w] = r - w++ - } - } - f.Replace = f.Replace[:w] - - f.Syntax.Cleanup() -} - -func (f *WorkFile) AddGoStmt(version string) error { - if !GoVersionRE.MatchString(version) { - return fmt.Errorf("invalid language version %q", version) - } - if f.Go == nil { - stmt := &Line{Token: []string{"go", version}} - f.Go = &Go{ - Version: version, - Syntax: stmt, - } - // Find the first non-comment-only block and add - // the go statement before it. That will keep file comments at the top. - i := 0 - for i = 0; i < len(f.Syntax.Stmt); i++ { - if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { - break - } - } - f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) - } else { - f.Go.Version = version - f.Syntax.updateLine(f.Go.Syntax, "go", version) - } - return nil -} - -func (f *WorkFile) AddToolchainStmt(name string) error { - if !ToolchainRE.MatchString(name) { - return fmt.Errorf("invalid toolchain name %q", name) - } - if f.Toolchain == nil { - stmt := &Line{Token: []string{"toolchain", name}} - f.Toolchain = &Toolchain{ - Name: name, - Syntax: stmt, - } - // Find the go line and add the toolchain line after it. - // Or else find the first non-comment-only block and add - // the toolchain line before it. That will keep file comments at the top. - i := 0 - for i = 0; i < len(f.Syntax.Stmt); i++ { - if line, ok := f.Syntax.Stmt[i].(*Line); ok && len(line.Token) > 0 && line.Token[0] == "go" { - i++ - goto Found - } - } - for i = 0; i < len(f.Syntax.Stmt); i++ { - if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { - break - } - } - Found: - f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) - } else { - f.Toolchain.Name = name - f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) - } - return nil -} - -// DropGoStmt deletes the go statement from the file. -func (f *WorkFile) DropGoStmt() { - if f.Go != nil { - f.Go.Syntax.markRemoved() - f.Go = nil - } -} - -// DropToolchainStmt deletes the toolchain statement from the file. -func (f *WorkFile) DropToolchainStmt() { - if f.Toolchain != nil { - f.Toolchain.Syntax.markRemoved() - f.Toolchain = nil - } -} - -func (f *WorkFile) AddUse(diskPath, modulePath string) error { - need := true - for _, d := range f.Use { - if d.Path == diskPath { - if need { - d.ModulePath = modulePath - f.Syntax.updateLine(d.Syntax, "use", AutoQuote(diskPath)) - need = false - } else { - d.Syntax.markRemoved() - *d = Use{} - } - } - } - - if need { - f.AddNewUse(diskPath, modulePath) - } - return nil -} - -func (f *WorkFile) AddNewUse(diskPath, modulePath string) { - line := f.Syntax.addLine(nil, "use", AutoQuote(diskPath)) - f.Use = append(f.Use, &Use{Path: diskPath, ModulePath: modulePath, Syntax: line}) -} - -func (f *WorkFile) SetUse(dirs []*Use) { - need := make(map[string]string) - for _, d := range dirs { - need[d.Path] = d.ModulePath - } - - for _, d := range f.Use { - if modulePath, ok := need[d.Path]; ok { - d.ModulePath = modulePath - } else { - d.Syntax.markRemoved() - *d = Use{} - } - } - - // TODO(#45713): Add module path to comment. - - for diskPath, modulePath := range need { - f.AddNewUse(diskPath, modulePath) - } - f.SortBlocks() -} - -func (f *WorkFile) DropUse(path string) error { - for _, d := range f.Use { - if d.Path == path { - d.Syntax.markRemoved() - *d = Use{} - } - } - return nil -} - -func (f *WorkFile) AddReplace(oldPath, oldVers, newPath, newVers string) error { - return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) -} - -func (f *WorkFile) DropReplace(oldPath, oldVers string) error { - for _, r := range f.Replace { - if r.Old.Path == oldPath && r.Old.Version == oldVers { - r.Syntax.markRemoved() - *r = Replace{} - } - } - return nil -} - -func (f *WorkFile) SortBlocks() { - f.removeDups() // otherwise sorting is unsafe - - for _, stmt := range f.Syntax.Stmt { - block, ok := stmt.(*LineBlock) - if !ok { - continue - } - sort.SliceStable(block.Line, func(i, j int) bool { - return lineLess(block.Line[i], block.Line[j]) - }) - } -} - -// removeDups removes duplicate replace directives. -// -// Later replace directives take priority. -// -// require directives are not de-duplicated. That's left up to higher-level -// logic (MVS). -// -// retract directives are not de-duplicated since comments are -// meaningful, and versions may be retracted multiple times. -func (f *WorkFile) removeDups() { - removeDups(f.Syntax, nil, &f.Replace) -} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c6492020ec..fdcaa974d2 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -584,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index a5d3ff8df9..36bf8399f4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2465,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2669,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2913,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3075,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821cf3..42ff8c3c1b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e41127..dca436004f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c63985560f..5cca668ac3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e25c..d8cae6d153 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09e2..28e39afdcb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642a5..cd66e92cb4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d758..c1595eba78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0d1..ee9456b0da 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84f0..8cfca81e1b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d68..60b0deb3af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc72f..f90aa7281b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e2b..ba9e015033 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813ed5..07cdfd6e9f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4ec8..2f1dd214a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994da7..f40519d901 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbdde..0cc3ce496e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc2504a..856d92d69e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf24676..8d467094cf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e23..edc173244d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77e1..445eba2061 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362ef4..adba01bca7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4fd3..014c4e9c7a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca818..ccc97d74d0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d391..ec2b64a95d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e37471..21a839e338 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c87..c11121ec3b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e5911..909b631fcb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195a0..e49bed16ea 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943bc4..66017d2d32 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc515..47bab18dce 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399ff5..dc0c955eec 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -3399,7 +3403,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4187,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5139,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5552,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad192506..d4577a4238 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ffb8708ccf..6395a031d4 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index b2a0b7c6a6..a8d7b06ac0 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -15,22 +15,10 @@ Load passes most patterns directly to the underlying build tool. The default build tool is the go command. Its supported patterns are described at https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. -Load may be used in Go projects that use alternative build systems, by -installing an appropriate "driver" program for the build system and -specifying its location in the GOPACKAGESDRIVER environment variable. -For example, -https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration -explains how to use the driver for Bazel. -The driver program is responsible for interpreting patterns in its -preferred notation and reporting information about the packages that -they identify. -(See driverRequest and driverResponse types for the JSON -schema used by the protocol. -Though the protocol is supported, these types are currently unexported; -see #64608 for a proposal to publish them.) - -Regardless of driver, all patterns with the prefix "query=", where query is a +All patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -86,7 +74,29 @@ for details. Most tools should pass their command-line arguments (after any flags) uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. + See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7db1d1293a..4335c1eb14 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file enables an external tool to intercept package requests. -// If the tool is present then its results are used in preference to -// the go list command. - package packages +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + import ( "bytes" "encoding/json" @@ -17,31 +16,71 @@ import ( "strings" ) -// The Driver Protocol +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. // -// The driver, given the inputs to a call to Load, returns metadata about the packages specified. -// This allows for different build systems to support go/packages by telling go/packages how the -// packages' source is organized. -// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in -// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package -// documentation in doc.go for the full description of the patterns that need to be supported. -// A driver receives as a JSON-serialized driverRequest struct in standard input and will -// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. - -// driverRequest is used to provide the portion of Load's Config that is needed by a driver. -type driverRequest struct { +// See the package documentation for an overview. +type DriverRequest struct { Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents // of overlay files. Overlay map[string][]byte `json:"overlay"` } +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found." // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its @@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*driverResponse, error) { - req, err := json.Marshal(driverRequest{ + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, BuildFlags: cfg.BuildFlags, @@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver { fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) } - var response driverResponse + var response DriverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index cd375fbc3c..22305d9c90 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -35,23 +35,23 @@ type goTooOldError struct { error } -// responseDeduper wraps a driverResponse, deduplicating its contents. +// responseDeduper wraps a DriverResponse, deduplicating its contents. type responseDeduper struct { seenRoots map[string]bool seenPackages map[string]*Package - dr *driverResponse + dr *DriverResponse } func newDeduper() *responseDeduper { return &responseDeduper{ - dr: &driverResponse{}, + dr: &DriverResponse{}, seenRoots: map[string]bool{}, seenPackages: map[string]*Package{}, } } -// addAll fills in r with a driverResponse. -func (r *responseDeduper) addAll(dr *driverResponse) { +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { for _, pkg := range dr.Packages { r.addPackage(pkg) } @@ -128,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -146,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { } // Fill in response.Sizes asynchronously if necessary. - var sizeserr error - var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - sizeswg.Add(1) + errCh := make(chan error) go func() { compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - sizeserr = err response.dr.Compiler = compiler response.dr.Arch = arch - sizeswg.Done() + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } }() } @@ -208,10 +210,7 @@ extractQueries: } } - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } + // (We may yet return an error due to defer.) return response.dr, nil } @@ -266,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries // adhocPackage attempts to load or construct an ad-hoc package for a given // query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { response, err := state.createDriverResponse(query) if err != nil { return nil, err @@ -357,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string { // createDriverResponse uses the "go list" command to expand the pattern // words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -384,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - response := &driverResponse{ + response := &DriverResponse{ GoVersion: goVersion, } for dec := json.NewDecoder(buf); dec.More(); { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 81e9e6a727..f33b0afc22 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -206,43 +206,6 @@ type Config struct { Overlay map[string][]byte } -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*driverResponse, error) - -// driverResponse contains the results for a driver query. -type driverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the driverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Compiler and Arch are the arguments pass of types.SizesFor - // to get a types.Sizes to use when type checking. - Compiler string - Arch string - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package - - // GoVersion is the minor version number used by the driver - // (e.g. the go command on the PATH) when selecting .go files. - // Zero means unknown. - GoVersion int -} - // Load loads and returns the Go packages named by the given patterns. // // Config specifies loading options; @@ -291,7 +254,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. // The boolean result indicates that an external driver handled the request. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { if driver := findExternalDriver(cfg); driver != nil { response, err := driver(cfg, patterns...) if err != nil { @@ -303,7 +266,10 @@ func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, erro } response, err := goListDriver(cfg, patterns...) - return response, false, err + if err != nil { + return nil, false, err + } + return response, false, nil } // A Package describes a loaded Go package. @@ -648,7 +614,7 @@ func newLoader(cfg *Config) *loader { // refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. -func (ld *loader) refine(response *driverResponse) ([]*Package, error) { +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 9bde15e3bc..9fffa9ad05 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -224,6 +224,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte // Gather the relevant packages from the manifest. items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) @@ -248,6 +249,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) } // Request packages all at once from the client, diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 52f74e643b..8361515519 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -9,11 +9,13 @@ package gopathwalk import ( "bufio" "bytes" + "io" "io/fs" - "log" "os" "path/filepath" + "runtime" "strings" + "sync" "time" ) @@ -21,8 +23,13 @@ import ( type Options struct { // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) + // Search module caches. Also disables legacy goimports ignore rules. ModulesEnabled bool + + // Maximum number of concurrent calls to user-provided callbacks, + // or 0 for GOMAXPROCS. + Concurrency int } // RootType indicates the type of a Root. @@ -43,19 +50,28 @@ type Root struct { Type RootType } -// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// // For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. +// +// Unlike filepath.WalkDir, Walk follows symbolic links +// (while guarding against cycles). func Walk(roots []Root, add func(root Root, dir string), opts Options) { WalkSkip(roots, add, func(Root, string) bool { return false }, opts) } -// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to +// find packages. +// // For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. // For each directory that will be scanned, skip will be called // with the absolute paths of the containing source directory and the directory. // If skip returns false on a directory it will be processed. +// +// Unlike filepath.WalkDir, WalkSkip follows symbolic links +// (while guarding against cycles). func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { for _, root := range roots { walkDir(root, add, skip, opts) @@ -64,45 +80,51 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if opts.Logf == nil { + opts.Logf = func(format string, args ...interface{}) {} + } if _, err := os.Stat(root.Path); os.IsNotExist(err) { - if opts.Logf != nil { - opts.Logf("skipping nonexistent directory: %v", root.Path) - } + opts.Logf("skipping nonexistent directory: %v", root.Path) return } start := time.Now() - if opts.Logf != nil { - opts.Logf("scanning %s", root.Path) + opts.Logf("scanning %s", root.Path) + + concurrency := opts.Concurrency + if concurrency == 0 { + // The walk be either CPU-bound or I/O-bound, depending on what the + // caller-supplied add function does and the details of the user's platform + // and machine. Rather than trying to fine-tune the concurrency level for a + // specific environment, we default to GOMAXPROCS: it is likely to be a good + // choice for a CPU-bound add function, and if it is instead I/O-bound, then + // dealing with I/O saturation is arguably the job of the kernel and/or + // runtime. (Oversaturating I/O seems unlikely to harm performance as badly + // as failing to saturate would.) + concurrency = runtime.GOMAXPROCS(0) } - w := &walker{ - root: root, - add: add, - skip: skip, - opts: opts, - added: make(map[string]bool), + root: root, + add: add, + skip: skip, + opts: opts, + sem: make(chan struct{}, concurrency), } w.init() - // Add a trailing path separator to cause filepath.WalkDir to traverse symlinks. + w.sem <- struct{}{} path := root.Path - if len(path) == 0 { - path = "." + string(filepath.Separator) - } else if !os.IsPathSeparator(path[len(path)-1]) { - path = path + string(filepath.Separator) + if path == "" { + path = "." } - - if err := filepath.WalkDir(path, w.walk); err != nil { - logf := opts.Logf - if logf == nil { - logf = log.Printf - } - logf("scanning directory %v: %v", root.Path, err) + if fi, err := os.Lstat(path); err == nil { + w.walk(path, nil, fs.FileInfoToDirEntry(fi)) + } else { + w.opts.Logf("scanning directory %v: %v", root.Path, err) } + <-w.sem + w.walking.Wait() - if opts.Logf != nil { - opts.Logf("scanned %s in %v", root.Path, time.Since(start)) - } + opts.Logf("scanned %s in %v", root.Path, time.Since(start)) } // walker is the callback for fastwalk.Walk. @@ -112,10 +134,18 @@ type walker struct { skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. opts Options // Options passed to Walk by the user. - pathSymlinks []os.FileInfo - ignoredDirs []string + walking sync.WaitGroup + sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release. + ignoredDirs []string - added map[string]bool + added sync.Map // map[string]bool +} + +// A symlinkList is a linked list of os.FileInfos for parent directories +// reached via symlinks. +type symlinkList struct { + info os.FileInfo + prev *symlinkList } // init initializes the walker based on its Options @@ -132,9 +162,7 @@ func (w *walker) init() { for _, p := range ignoredPaths { full := filepath.Join(w.root.Path, p) w.ignoredDirs = append(w.ignoredDirs, full) - if w.opts.Logf != nil { - w.opts.Logf("Directory added to ignore list: %s", full) - } + w.opts.Logf("Directory added to ignore list: %s", full) } } @@ -144,12 +172,10 @@ func (w *walker) init() { func (w *walker) getIgnoredDirs(path string) []string { file := filepath.Join(path, ".goimportsignore") slurp, err := os.ReadFile(file) - if w.opts.Logf != nil { - if err != nil { - w.opts.Logf("%v", err) - } else { - w.opts.Logf("Read %s", file) - } + if err != nil { + w.opts.Logf("%v", err) + } else { + w.opts.Logf("Read %s", file) } if err != nil { return nil @@ -183,149 +209,129 @@ func (w *walker) shouldSkipDir(dir string) bool { // walk walks through the given path. // -// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored: -// walk returns only nil or fs.SkipDir. -func (w *walker) walk(path string, d fs.DirEntry, err error) error { - if err != nil { - // We have no way to report errors back through Walk or WalkSkip, - // so just log and ignore them. - if w.opts.Logf != nil { +// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored. +func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) { + if d.Type()&os.ModeSymlink != 0 { + // Walk the symlink's target rather than the symlink itself. + // + // (Note that os.Stat, unlike the lower-lever os.Readlink, + // follows arbitrarily many layers of symlinks, so it will eventually + // reach either a non-symlink or a nonexistent target.) + // + // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src + // and GOPATH/src. Do we really need to traverse them here? If so, why? + + fi, err := os.Stat(path) + if err != nil { w.opts.Logf("%v", err) + return + } + + // Avoid walking symlink cycles: if we have already followed a symlink to + // this directory as a parent of itself, don't follow it again. + // + // This doesn't catch the first time through a cycle, but it also minimizes + // the number of extra stat calls we make if we *don't* encounter a cycle. + // Since we don't actually expect to encounter symlink cycles in practice, + // this seems like the right tradeoff. + for parent := pathSymlinks; parent != nil; parent = parent.prev { + if os.SameFile(fi, parent.info) { + return + } } - if d == nil { - // Nothing more to do: the error prevents us from knowing - // what path even represents. - return nil + + pathSymlinks = &symlinkList{ + info: fi, + prev: pathSymlinks, } + d = fs.FileInfoToDirEntry(fi) } if d.Type().IsRegular() { if !strings.HasSuffix(path, ".go") { - return nil + return } dir := filepath.Dir(path) if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { // Doesn't make sense to have regular files // directly in your $GOPATH/src or $GOROOT/src. - return nil + // + // TODO(bcmills): there are many levels of directory within + // RootModuleCache where this also wouldn't make sense, + // Can we generalize this to any directory without a corresponding + // import path? + return } - if !w.added[dir] { + if _, dup := w.added.LoadOrStore(dir, true); !dup { w.add(w.root, dir) - w.added[dir] = true } - return nil } - if d.IsDir() { - base := filepath.Base(path) - if base == "" || base[0] == '.' || base[0] == '_' || - base == "testdata" || - (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || - (!w.opts.ModulesEnabled && base == "node_modules") { - return fs.SkipDir - } - if w.shouldSkipDir(path) { - return fs.SkipDir - } - return nil + if !d.IsDir() { + return } - if d.Type()&os.ModeSymlink != 0 { - // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src - // and GOPATH/src. Do we really need to traverse them here? If so, why? - - fi, err := os.Stat(path) - if err != nil || !fi.IsDir() { - // Not a directory. Just walk the file (or broken link) and be done. - return w.walk(path, fs.FileInfoToDirEntry(fi), err) - } - - // Avoid walking symlink cycles: if we have already followed a symlink to - // this directory as a parent of itself, don't follow it again. - // - // This doesn't catch the first time through a cycle, but it also minimizes - // the number of extra stat calls we make if we *don't* encounter a cycle. - // Since we don't actually expect to encounter symlink cycles in practice, - // this seems like the right tradeoff. - for _, parent := range w.pathSymlinks { - if os.SameFile(fi, parent) { - return nil - } - } + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") || + w.shouldSkipDir(path) { + return + } - w.pathSymlinks = append(w.pathSymlinks, fi) - defer func() { - w.pathSymlinks = w.pathSymlinks[:len(w.pathSymlinks)-1] - }() + // Read the directory and walk its entries. - // On some platforms the OS (or the Go os package) sometimes fails to - // resolve directory symlinks before a trailing slash - // (even though POSIX requires it to do so). - // - // On macOS that failure may be caused by a known libc/kernel bug; - // see https://go.dev/issue/59586. - // - // On Windows before Go 1.21, it may be caused by a bug in - // os.Lstat (fixed in https://go.dev/cl/463177). - // - // Since we need to handle this explicitly on broken platforms anyway, - // it is simplest to just always do that and not rely on POSIX pathname - // resolution to walk the directory (such as by calling WalkDir with - // a trailing slash appended to the path). + f, err := os.Open(path) + if err != nil { + w.opts.Logf("%v", err) + return + } + defer f.Close() + + for { + // We impose an arbitrary limit on the number of ReadDir results per + // directory to limit the amount of memory consumed for stale or upcoming + // directory entries. The limit trades off CPU (number of syscalls to read + // the whole directory) against RAM (reachable directory entries other than + // the one currently being processed). // - // Instead, we make a sequence of walk calls — directly and through - // recursive calls to filepath.WalkDir — simulating what WalkDir would do - // if the symlink were a regular directory. - - // First we call walk on the path as a directory - // (instead of a symlink). - err = w.walk(path, fs.FileInfoToDirEntry(fi), nil) - if err == fs.SkipDir { - return nil - } else if err != nil { - // This should be impossible, but handle it anyway in case - // walk is changed to return other errors. - return err - } - - // Now read the directory and walk its entries. - ents, err := os.ReadDir(path) + // Since we process the directories recursively, we will end up maintaining + // a slice of entries for each level of the directory tree. + // (Compare https://go.dev/issue/36197.) + ents, err := f.ReadDir(1024) if err != nil { - // Report the ReadDir error, as filepath.WalkDir would do. - err = w.walk(path, fs.FileInfoToDirEntry(fi), err) - if err == fs.SkipDir { - return nil - } else if err != nil { - return err // Again, should be impossible. + if err != io.EOF { + w.opts.Logf("%v", err) } - // Fall through and iterate over whatever entries we did manage to get. + break } for _, d := range ents { nextPath := filepath.Join(path, d.Name()) if d.IsDir() { - // We want to walk the whole directory tree rooted at nextPath, - // not just the single entry for the directory. - err := filepath.WalkDir(nextPath, w.walk) - if err != nil && w.opts.Logf != nil { - w.opts.Logf("%v", err) - } - } else { - err := w.walk(nextPath, d, nil) - if err == fs.SkipDir { - // Skip the rest of the entries in the parent directory of nextPath - // (that is, path itself). - break - } else if err != nil { - return err // Again, should be impossible. + select { + case w.sem <- struct{}{}: + // Got a new semaphore token, so we can traverse the directory concurrently. + d := d + w.walking.Add(1) + go func() { + defer func() { + <-w.sem + w.walking.Done() + }() + w.walk(nextPath, pathSymlinks, d) + }() + continue + + default: + // No tokens available, so traverse serially. } } + + w.walk(nextPath, pathSymlinks, d) } - return nil } - - // Not a file, regular directory, or symlink; skip. - return nil } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index dd369c072e..6a18f63a44 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -13,6 +13,7 @@ import ( "go/build" "go/parser" "go/token" + "go/types" "io/fs" "io/ioutil" "os" @@ -700,20 +701,21 @@ func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map return result, nil } -func PrimeCache(ctx context.Context, env *ProcessEnv) error { +func PrimeCache(ctx context.Context, resolver Resolver) error { // Fully scan the disk for directories, but don't actually read any Go files. callback := &scanCallback{ - rootFound: func(gopathwalk.Root) bool { - return true + rootFound: func(root gopathwalk.Root) bool { + // See getCandidatePkgs: walking GOROOT is apparently expensive and + // unnecessary. + return root.Type != gopathwalk.RootGOROOT }, dirFound: func(pkg *pkg) bool { return false }, - packageNameLoaded: func(pkg *pkg) bool { - return false - }, + // packageNameLoaded and exportsLoaded must never be called. } - return getCandidatePkgs(ctx, callback, "", "", env) + + return resolver.scan(ctx, callback) } func candidateImportName(pkg *pkg) string { @@ -827,16 +829,45 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } -var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} +// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate +// imports when doing cross-platform development. +var requiredGoEnvVars = []string{ + "GO111MODULE", + "GOFLAGS", + "GOINSECURE", + "GOMOD", + "GOMODCACHE", + "GONOPROXY", + "GONOSUMDB", + "GOPATH", + "GOPROXY", + "GOROOT", + "GOSUMDB", + "GOWORK", +} // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. +// +// ...a ProcessEnv *also* overwrites its Env along with derived state in the +// form of the resolver. And because it is lazily initialized, an env may just +// be broken and unusable, but there is no way for the caller to detect that: +// all queries will just fail. +// +// TODO(rfindley): refactor this package so that this type (perhaps renamed to +// just Env or Config) is an immutable configuration struct, to be exchanged +// for an initialized object via a constructor that returns an error. Perhaps +// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where +// resolver is a concrete type used for resolving imports. Via this +// refactoring, we can avoid the need to call ProcessEnv.init and +// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this +// these are misused. Also, we'd delegate the caller the decision of how to +// handle a broken environment. type ProcessEnv struct { GocmdRunner *gocommand.Runner BuildFlags []string ModFlag string - ModFile string // SkipPathInScan returns true if the path should be skipped from scans of // the RootCurrentModule root type. The function argument is a clean, @@ -846,7 +877,7 @@ type ProcessEnv struct { // Env overrides the OS environment, and can be used to specify // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because // exec.Command will not honor it. - // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + // Specifying all of requiredGoEnvVars avoids a call to `go env`. Env map[string]string WorkingDir string @@ -854,9 +885,17 @@ type ProcessEnv struct { // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) - initialized bool + // If set, ModCache holds a shared cache of directory info to use across + // multiple ProcessEnvs. + ModCache *DirInfoCache - resolver Resolver + initialized bool // see TODO above + + // resolver and resolverErr are lazily evaluated (see GetResolver). + // This is unclean, but see the big TODO in the docstring for ProcessEnv + // above: for now, we can't be sure that the ProcessEnv is fully initialized. + resolver Resolver + resolverErr error } func (e *ProcessEnv) goEnv() (map[string]string, error) { @@ -936,20 +975,31 @@ func (e *ProcessEnv) env() []string { } func (e *ProcessEnv) GetResolver() (Resolver, error) { - if e.resolver != nil { - return e.resolver, nil - } if err := e.init(); err != nil { return nil, err } - if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { - e.resolver = newGopathResolver(e) - return e.resolver, nil + + if e.resolver == nil && e.resolverErr == nil { + // TODO(rfindley): we should only use a gopathResolver here if the working + // directory is actually *in* GOPATH. (I seem to recall an open gopls issue + // for this behavior, but I can't find it). + // + // For gopls, we can optionally explicitly choose a resolver type, since we + // already know the view type. + if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + e.resolver = newGopathResolver(e) + } else { + e.resolver, e.resolverErr = newModuleResolver(e, e.ModCache) + } } - e.resolver = newModuleResolver(e) - return e.resolver, nil + + return e.resolver, e.resolverErr } +// buildContext returns the build.Context to use for matching files. +// +// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform +// development. func (e *ProcessEnv) buildContext() (*build.Context, error) { ctx := build.Default goenv, err := e.goEnv() @@ -1029,15 +1079,23 @@ func addStdlibCandidates(pass *pass, refs references) error { type Resolver interface { // loadPackageNames loads the package names in importPaths. loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + // scan works with callback to search for packages. See scanCallback for details. scan(ctx context.Context, callback *scanCallback) error + // loadExports returns the set of exported symbols in the package at dir. // loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. scoreImportPath(ctx context.Context, path string) float64 - ClearForNewScan() + // ClearForNewScan returns a new Resolver based on the receiver that has + // cleared its internal caches of directory contents. + // + // The new resolver should be primed and then set via + // [ProcessEnv.UpdateResolver]. + ClearForNewScan() Resolver } // A scanCallback controls a call to scan and receives its results. @@ -1120,7 +1178,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil go func(pkgName string, symbols map[string]bool) { defer wg.Done() - found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols) if err != nil { firstErrOnce.Do(func() { @@ -1151,6 +1209,17 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil }() for result := range results { + // Don't offer completions that would shadow predeclared + // names, such as github.com/coreos/etcd/error. + if types.Universe.Lookup(result.pkg.name) != nil { // predeclared + // Ideally we would skip this candidate only + // if the predeclared name is actually + // referenced by the file, but that's a lot + // trickier to compute and would still create + // an import that is likely to surprise the + // user before long. + continue + } pass.addCandidate(result.imp, result.pkg) } return firstErr @@ -1193,31 +1262,22 @@ func ImportPathToAssumedName(importPath string) string { type gopathResolver struct { env *ProcessEnv walked bool - cache *dirInfoCache + cache *DirInfoCache scanSema chan struct{} // scanSema prevents concurrent scans. } func newGopathResolver(env *ProcessEnv) *gopathResolver { r := &gopathResolver{ - env: env, - cache: &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - }, + env: env, + cache: NewDirInfoCache(), scanSema: make(chan struct{}, 1), } r.scanSema <- struct{}{} return r } -func (r *gopathResolver) ClearForNewScan() { - <-r.scanSema - r.cache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } - r.walked = false - r.scanSema <- struct{}{} +func (r *gopathResolver) ClearForNewScan() Resolver { + return newGopathResolver(r.env) } func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { @@ -1538,7 +1598,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 58e637b90f..660407548e 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -236,7 +236,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast src = src[:len(src)-len("}\n")] // Gofmt has also indented the function body one level. // Remove that indent. - src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) + src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n")) return matchSpace(orig, src) } return file, adjust, nil diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 5f4d435d3c..3d0f38f6c2 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -23,49 +23,88 @@ import ( "golang.org/x/tools/internal/gopathwalk" ) -// ModuleResolver implements resolver for modules using the go command as little -// as feasible. +// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning +// as fast as possible, which is desirable for a call to goimports from the +// command line, but it doesn't work as well for gopls, where it suffers from +// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216), +// both caused by populating the cache, albeit in slightly different ways. +// +// A high level list of TODOs: +// - Optimize the scan itself, as there is some redundancy statting and +// reading go.mod files. +// - Invert the relationship between ProcessEnv and Resolver (see the +// docstring of ProcessEnv). +// - Make it easier to use an external resolver implementation. +// +// Smaller TODOs are annotated in the code below. + +// ModuleResolver implements the Resolver interface for a workspace using +// modules. +// +// A goal of the ModuleResolver is to invoke the Go command as little as +// possible. To this end, it runs the Go command only for listing module +// information (i.e. `go list -m -e -json ...`). Package scanning, the process +// of loading package information for the modules, is implemented internally +// via the scan method. +// +// It has two types of state: the state derived from the go command, which +// is populated by init, and the state derived from scans, which is populated +// via scan. A root is considered scanned if it has been walked to discover +// directories. However, if the scan did not require additional information +// from the directory (such as package name or exports), the directory +// information itself may be partially populated. It will be lazily filled in +// as needed by scans, using the scanCallback. type ModuleResolver struct { - env *ProcessEnv - moduleCacheDir string - dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. - roots []gopathwalk.Root - scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. - scannedRoots map[gopathwalk.Root]bool - - initialized bool - mains []*gocommand.ModuleJSON - mainByDir map[string]*gocommand.ModuleJSON - modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir. - - // moduleCacheCache stores information about the module cache. - moduleCacheCache *dirInfoCache - otherCache *dirInfoCache + env *ProcessEnv + + // Module state, populated during construction + dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory + moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset + roots []gopathwalk.Root // roots to scan, in approximate order of importance + mains []*gocommand.ModuleJSON // main modules + mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots + modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path + modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir. + + // Scanning state, populated by scan + + // scanSema prevents concurrent scans, and guards scannedRoots and the cache + // fields below (though the caches themselves are concurrency safe). + // Receive to acquire, send to release. + scanSema chan struct{} + scannedRoots map[gopathwalk.Root]bool // if true, root has been walked + + // Caches of directory info, populated by scans and scan callbacks + // + // moduleCacheCache stores cached information about roots in the module + // cache, which are immutable and therefore do not need to be invalidated. + // + // otherCache stores information about all other roots (even GOROOT), which + // may change. + moduleCacheCache *DirInfoCache + otherCache *DirInfoCache } -func newModuleResolver(e *ProcessEnv) *ModuleResolver { +// newModuleResolver returns a new module-aware goimports resolver. +// +// Note: use caution when modifying this constructor: changes must also be +// reflected in ModuleResolver.ClearForNewScan. +func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) { r := &ModuleResolver{ env: e, scanSema: make(chan struct{}, 1), } - r.scanSema <- struct{}{} - return r -} - -func (r *ModuleResolver) init() error { - if r.initialized { - return nil - } + r.scanSema <- struct{}{} // release goenv, err := r.env.goEnv() if err != nil { - return err + return nil, err } + + // TODO(rfindley): can we refactor to share logic with r.env.invokeGo? inv := gocommand.Invocation{ BuildFlags: r.env.BuildFlags, ModFlag: r.env.ModFlag, - ModFile: r.env.ModFile, Env: r.env.env(), Logf: r.env.Logf, WorkingDir: r.env.WorkingDir, @@ -77,9 +116,12 @@ func (r *ModuleResolver) init() error { // Module vendor directories are ignored in workspace mode: // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md if len(r.env.Env["GOWORK"]) == 0 { + // TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but + // they should be available from the ProcessEnv. Can we avoid the redundant + // invocation? vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) if err != nil { - return err + return nil, err } } @@ -100,19 +142,14 @@ func (r *ModuleResolver) init() error { // GO111MODULE=on. Other errors are fatal. if err != nil { if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") { - return err + return nil, err } } } - if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { - r.moduleCacheDir = gmc - } else { - gopaths := filepath.SplitList(goenv["GOPATH"]) - if len(gopaths) == 0 { - return fmt.Errorf("empty GOPATH") - } - r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod") + r.moduleCacheDir = gomodcacheForEnv(goenv) + if r.moduleCacheDir == "" { + return nil, fmt.Errorf("cannot resolve GOMODCACHE") } sort.Slice(r.modsByModPath, func(i, j int) bool { @@ -141,7 +178,11 @@ func (r *ModuleResolver) init() error { } else { addDep := func(mod *gocommand.ModuleJSON) { if mod.Replace == nil { - // This is redundant with the cache, but we'll skip it cheaply enough. + // This is redundant with the cache, but we'll skip it cheaply enough + // when we encounter it in the module cache scan. + // + // Including it at a lower index in r.roots than the module cache dir + // helps prioritize matches from within existing dependencies. r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache}) } else { r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther}) @@ -158,24 +199,40 @@ func (r *ModuleResolver) init() error { addDep(mod) } } + // If provided, share the moduleCacheCache. + // + // TODO(rfindley): The module cache is immutable. However, the loaded + // exports do depend on GOOS and GOARCH. Fortunately, the + // ProcessEnv.buildContext does not adjust these from build.DefaultContext + // (even though it should). So for now, this is OK to share, but we need to + // add logic for handling GOOS/GOARCH. + r.moduleCacheCache = moduleCacheCache r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache}) } r.scannedRoots = map[gopathwalk.Root]bool{} if r.moduleCacheCache == nil { - r.moduleCacheCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } - } - if r.otherCache == nil { - r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } + r.moduleCacheCache = NewDirInfoCache() } - r.initialized = true - return nil + r.otherCache = NewDirInfoCache() + return r, nil +} + +// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env +// map, which must have GOMODCACHE and GOPATH populated. +// +// TODO(rfindley): this is defensive refactoring. +// 1. Is this even relevant anymore? Can't we just read GOMODCACHE. +// 2. Use this to separate module cache scanning from other scanning. +func gomodcacheForEnv(goenv map[string]string) string { + if gmc := goenv["GOMODCACHE"]; gmc != "" { + return gmc + } + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return "" + } + return filepath.Join(gopaths[0], "/pkg/mod") } func (r *ModuleResolver) initAllMods() error { @@ -206,30 +263,82 @@ func (r *ModuleResolver) initAllMods() error { return nil } -func (r *ModuleResolver) ClearForNewScan() { - <-r.scanSema - r.scannedRoots = map[gopathwalk.Root]bool{} - r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, +// ClearForNewScan invalidates the last scan. +// +// It preserves the set of roots, but forgets about the set of directories. +// Though it forgets the set of module cache directories, it remembers their +// contents, since they are assumed to be immutable. +func (r *ModuleResolver) ClearForNewScan() Resolver { + <-r.scanSema // acquire r, to guard scannedRoots + r2 := &ModuleResolver{ + env: r.env, + dummyVendorMod: r.dummyVendorMod, + moduleCacheDir: r.moduleCacheDir, + roots: r.roots, + mains: r.mains, + mainByDir: r.mainByDir, + modsByModPath: r.modsByModPath, + + scanSema: make(chan struct{}, 1), + scannedRoots: make(map[gopathwalk.Root]bool), + otherCache: NewDirInfoCache(), + moduleCacheCache: r.moduleCacheCache, + } + r2.scanSema <- struct{}{} // r2 must start released + // Invalidate root scans. We don't need to invalidate module cache roots, + // because they are immutable. + // (We don't support a use case where GOMODCACHE is cleaned in the middle of + // e.g. a gopls session: the user must restart gopls to get accurate + // imports.) + // + // Scanning for new directories in GOMODCACHE should be handled elsewhere, + // via a call to ScanModuleCache. + for _, root := range r.roots { + if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] { + r2.scannedRoots[root] = true + } } - r.scanSema <- struct{}{} + r.scanSema <- struct{}{} // release r + return r2 } -func (r *ModuleResolver) ClearForNewMod() { - <-r.scanSema - *r = ModuleResolver{ - env: r.env, - moduleCacheCache: r.moduleCacheCache, - otherCache: r.otherCache, - scanSema: r.scanSema, +// ClearModuleInfo invalidates resolver state that depends on go.mod file +// contents (essentially, the output of go list -m -json ...). +// +// Notably, it does not forget directory contents, which are reset +// asynchronously via ClearForNewScan. +// +// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op. +// +// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods. +func (e *ProcessEnv) ClearModuleInfo() { + if r, ok := e.resolver.(*ModuleResolver); ok { + resolver, resolverErr := newModuleResolver(e, e.ModCache) + if resolverErr == nil { + <-r.scanSema // acquire (guards caches) + resolver.moduleCacheCache = r.moduleCacheCache + resolver.otherCache = r.otherCache + r.scanSema <- struct{}{} // release + } + e.resolver = resolver + e.resolverErr = resolverErr } - r.init() - r.scanSema <- struct{}{} } -// findPackage returns the module and directory that contains the package at -// the given import path, or returns nil, "" if no module is in scope. +// UpdateResolver sets the resolver for the ProcessEnv to use in imports +// operations. Only for use with the result of [Resolver.ClearForNewScan]. +// +// TODO(rfindley): this awkward API is a result of the (arguably) inverted +// relationship between configuration and state described in the doc comment +// for [ProcessEnv]. +func (e *ProcessEnv) UpdateResolver(r Resolver) { + e.resolver = r + e.resolverErr = nil +} + +// findPackage returns the module and directory from within the main modules +// and their dependencies that contains the package at the given import path, +// or returns nil, "" if no module is in scope. func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. @@ -295,10 +404,6 @@ func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { } } -func (r *ModuleResolver) cacheKeys() []string { - return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...) -} - // cachePackageName caches the package name for a dir already in the cache. func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { if info.rootType == gopathwalk.RootModuleCache { @@ -367,15 +472,15 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON return modDir != mod.Dir } -func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { - readModName := func(modFile string) string { - modBytes, err := os.ReadFile(modFile) - if err != nil { - return "" - } - return modulePath(modBytes) +func readModName(modFile string) string { + modBytes, err := os.ReadFile(modFile) + if err != nil { + return "" } + return modulePath(modBytes) +} +func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) { if r.dirInModuleCache(dir) { if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { index := strings.Index(dir, matches[1]+"@"+matches[2]) @@ -409,11 +514,9 @@ func (r *ModuleResolver) dirInModuleCache(dir string) bool { } func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - if err := r.init(); err != nil { - return nil, err - } names := map[string]string{} for _, path := range importPaths { + // TODO(rfindley): shouldn't this use the dirInfoCache? _, packageDir := r.findPackage(path) if packageDir == "" { continue @@ -431,10 +534,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") defer done() - if err := r.init(); err != nil { - return err - } - processDir := func(info directoryPackageInfo) { // Skip this directory if we were not able to get the package information successfully. if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { @@ -444,18 +543,18 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error if err != nil { return } - if !callback.dirFound(pkg) { return } + pkg.packageName, err = r.cachePackageName(info) if err != nil { return } - if !callback.packageNameLoaded(pkg) { return } + _, exports, err := r.loadExports(ctx, pkg, false) if err != nil { return @@ -494,7 +593,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error return packageScanned } - // Add anything new to the cache, and process it if we're still listening. add := func(root gopathwalk.Root, dir string) { r.cacheStore(r.scanDirForPackage(root, dir)) } @@ -509,9 +607,9 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error select { case <-ctx.Done(): return - case <-r.scanSema: + case <-r.scanSema: // acquire } - defer func() { r.scanSema <- struct{}{} }() + defer func() { r.scanSema <- struct{}{} }() // release // We have the lock on r.scannedRoots, and no other scans can run. for _, root := range roots { if ctx.Err() != nil { @@ -613,9 +711,6 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { } func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { - if err := r.init(); err != nil { - return "", nil, err - } if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { return r.cacheExports(ctx, r.env, info) } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index 45690abbb4..cfc5465765 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -7,8 +7,12 @@ package imports import ( "context" "fmt" + "path" + "path/filepath" + "strings" "sync" + "golang.org/x/mod/module" "golang.org/x/tools/internal/gopathwalk" ) @@ -39,6 +43,8 @@ const ( exportsLoaded ) +// directoryPackageInfo holds (possibly incomplete) information about packages +// contained in a given directory. type directoryPackageInfo struct { // status indicates the extent to which this struct has been filled in. status directoryPackageStatus @@ -63,7 +69,10 @@ type directoryPackageInfo struct { packageName string // the package name, as declared in the source. // Set when status >= exportsLoaded. - + // TODO(rfindley): it's hard to see this, but exports depend implicitly on + // the default build context GOOS and GOARCH. + // + // We can make this explicit, and key exports by GOOS, GOARCH. exports []string } @@ -79,7 +88,7 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( return true, nil } -// dirInfoCache is a concurrency safe map for storing information about +// DirInfoCache is a concurrency-safe map for storing information about // directories that may contain packages. // // The information in this cache is built incrementally. Entries are initialized in scan. @@ -92,21 +101,26 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( // The information in the cache is not expected to change for the cache's // lifetime, so there is no protection against competing writes. Users should // take care not to hold the cache across changes to the underlying files. -// -// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) -type dirInfoCache struct { +type DirInfoCache struct { mu sync.Mutex // dirs stores information about packages in directories, keyed by absolute path. dirs map[string]*directoryPackageInfo listeners map[*int]cacheListener } +func NewDirInfoCache() *DirInfoCache { + return &DirInfoCache{ + dirs: make(map[string]*directoryPackageInfo), + listeners: make(map[*int]cacheListener), + } +} + type cacheListener func(directoryPackageInfo) // ScanAndListen calls listener on all the items in the cache, and on anything // newly added. The returned stop function waits for all in-flight callbacks to // finish and blocks new ones. -func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { +func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { ctx, cancel := context.WithCancel(ctx) // Flushing out all the callbacks is tricky without knowing how many there @@ -162,8 +176,10 @@ func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener } // Store stores the package info for dir. -func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { +func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) { d.mu.Lock() + // TODO(rfindley, golang/go#59216): should we overwrite an existing entry? + // That seems incorrect as the cache should be idempotent. _, old := d.dirs[dir] d.dirs[dir] = &info var listeners []cacheListener @@ -180,7 +196,7 @@ func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { } // Load returns a copy of the directoryPackageInfo for absolute directory dir. -func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { +func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) { d.mu.Lock() defer d.mu.Unlock() info, ok := d.dirs[dir] @@ -191,7 +207,7 @@ func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { } // Keys returns the keys currently present in d. -func (d *dirInfoCache) Keys() (keys []string) { +func (d *DirInfoCache) Keys() (keys []string) { d.mu.Lock() defer d.mu.Unlock() for key := range d.dirs { @@ -200,7 +216,7 @@ func (d *dirInfoCache) Keys() (keys []string) { return keys } -func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { +func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { if loaded, err := info.reachedStatus(nameLoaded); loaded { return info.packageName, err } @@ -213,7 +229,7 @@ func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, erro return info.packageName, info.err } -func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { +func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { if reached, _ := info.reachedStatus(exportsLoaded); reached { return info.packageName, info.exports, info.err } @@ -234,3 +250,81 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d d.Store(info.dir, info) return info.packageName, info.exports, info.err } + +// ScanModuleCache walks the given directory, which must be a GOMODCACHE value, +// for directory package information, storing the results in cache. +func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) { + // Note(rfindley): it's hard to see, but this function attempts to implement + // just the side effects on cache of calling PrimeCache with a ProcessEnv + // that has the given dir as its GOMODCACHE. + // + // Teasing out the control flow, we see that we can avoid any handling of + // vendor/ and can infer module info entirely from the path, simplifying the + // logic here. + + root := gopathwalk.Root{ + Path: filepath.Clean(dir), + Type: gopathwalk.RootModuleCache, + } + + directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo { + // This is a copy of ModuleResolver.scanDirForPackage, trimmed down to + // logic that applies to a module cache directory. + + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if logf != nil { + logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath := path.Join(modPath, filepath.ToSlash(matches[3])) + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + modName := readModName(filepath.Join(modDir, "go.mod")) + return directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + } + + add := func(root gopathwalk.Root, dir string) { + info := directoryInfo(root, dir) + cache.Store(info.dir, info) + } + + skip := func(_ gopathwalk.Root, dir string) bool { + // Skip directories that have already been scanned. + // + // Note that gopathwalk only adds "package" directories, which must contain + // a .go file, and all such package directories in the module cache are + // immutable. So if we can load a dir, it can be skipped. + info, ok := cache.Load(dir) + if !ok { + return false + } + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true}) +} diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 9f992c2bec..8db24df2ff 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -151,6 +151,7 @@ var stdlib = map[string][]string{ "cmp": { "Compare", "Less", + "Or", "Ordered", }, "compress/bzip2": { @@ -632,6 +633,8 @@ var stdlib = map[string][]string{ "NameMismatch", "NewCertPool", "NotAuthorizedToSign", + "OID", + "OIDFromInts", "PEMCipher", "PEMCipher3DES", "PEMCipherAES128", @@ -706,6 +709,7 @@ var stdlib = map[string][]string{ "LevelWriteCommitted", "Named", "NamedArg", + "Null", "NullBool", "NullByte", "NullFloat64", @@ -1921,6 +1925,7 @@ var stdlib = map[string][]string{ "R_LARCH_32", "R_LARCH_32_PCREL", "R_LARCH_64", + "R_LARCH_64_PCREL", "R_LARCH_ABS64_HI12", "R_LARCH_ABS64_LO20", "R_LARCH_ABS_HI20", @@ -1928,12 +1933,17 @@ var stdlib = map[string][]string{ "R_LARCH_ADD16", "R_LARCH_ADD24", "R_LARCH_ADD32", + "R_LARCH_ADD6", "R_LARCH_ADD64", "R_LARCH_ADD8", + "R_LARCH_ADD_ULEB128", + "R_LARCH_ALIGN", "R_LARCH_B16", "R_LARCH_B21", "R_LARCH_B26", + "R_LARCH_CFA", "R_LARCH_COPY", + "R_LARCH_DELETE", "R_LARCH_GNU_VTENTRY", "R_LARCH_GNU_VTINHERIT", "R_LARCH_GOT64_HI12", @@ -1953,6 +1963,7 @@ var stdlib = map[string][]string{ "R_LARCH_PCALA64_LO20", "R_LARCH_PCALA_HI20", "R_LARCH_PCALA_LO12", + "R_LARCH_PCREL20_S2", "R_LARCH_RELATIVE", "R_LARCH_RELAX", "R_LARCH_SOP_ADD", @@ -1983,8 +1994,10 @@ var stdlib = map[string][]string{ "R_LARCH_SUB16", "R_LARCH_SUB24", "R_LARCH_SUB32", + "R_LARCH_SUB6", "R_LARCH_SUB64", "R_LARCH_SUB8", + "R_LARCH_SUB_ULEB128", "R_LARCH_TLS_DTPMOD32", "R_LARCH_TLS_DTPMOD64", "R_LARCH_TLS_DTPREL32", @@ -2035,6 +2048,7 @@ var stdlib = map[string][]string{ "R_MIPS_LO16", "R_MIPS_NONE", "R_MIPS_PC16", + "R_MIPS_PC32", "R_MIPS_PJUMP", "R_MIPS_REL16", "R_MIPS_REL32", @@ -2952,6 +2966,8 @@ var stdlib = map[string][]string{ "RegisterName", }, "encoding/hex": { + "AppendDecode", + "AppendEncode", "Decode", "DecodeString", "DecodedLen", @@ -3233,6 +3249,7 @@ var stdlib = map[string][]string{ "TypeSpec", "TypeSwitchStmt", "UnaryExpr", + "Unparen", "ValueSpec", "Var", "Visitor", @@ -3492,6 +3509,7 @@ var stdlib = map[string][]string{ "XOR_ASSIGN", }, "go/types": { + "Alias", "ArgumentError", "Array", "AssertableTo", @@ -3559,6 +3577,7 @@ var stdlib = map[string][]string{ "MethodVal", "MissingMethod", "Named", + "NewAlias", "NewArray", "NewChan", "NewChecker", @@ -3627,6 +3646,7 @@ var stdlib = map[string][]string{ "Uint64", "Uint8", "Uintptr", + "Unalias", "Union", "Universe", "Unsafe", @@ -3643,6 +3663,11 @@ var stdlib = map[string][]string{ "WriteSignature", "WriteType", }, + "go/version": { + "Compare", + "IsValid", + "Lang", + }, "hash": { "Hash", "Hash32", @@ -4078,6 +4103,7 @@ var stdlib = map[string][]string{ "NewTextHandler", "Record", "SetDefault", + "SetLogLoggerLevel", "Source", "SourceKey", "String", @@ -4367,6 +4393,35 @@ var stdlib = map[string][]string{ "Uint64", "Zipf", }, + "math/rand/v2": { + "ChaCha8", + "ExpFloat64", + "Float32", + "Float64", + "Int", + "Int32", + "Int32N", + "Int64", + "Int64N", + "IntN", + "N", + "New", + "NewChaCha8", + "NewPCG", + "NewZipf", + "NormFloat64", + "PCG", + "Perm", + "Rand", + "Shuffle", + "Source", + "Uint32", + "Uint32N", + "Uint64", + "Uint64N", + "UintN", + "Zipf", + }, "mime": { "AddExtensionType", "BEncoding", @@ -4540,6 +4595,7 @@ var stdlib = map[string][]string{ "FS", "File", "FileServer", + "FileServerFS", "FileSystem", "Flusher", "Get", @@ -4566,6 +4622,7 @@ var stdlib = map[string][]string{ "MethodPut", "MethodTrace", "NewFileTransport", + "NewFileTransportFS", "NewRequest", "NewRequestWithContext", "NewResponseController", @@ -4599,6 +4656,7 @@ var stdlib = map[string][]string{ "Serve", "ServeContent", "ServeFile", + "ServeFileFS", "ServeMux", "ServeTLS", "Server", @@ -5106,6 +5164,7 @@ var stdlib = map[string][]string{ "StructTag", "Swapper", "Type", + "TypeFor", "TypeOf", "Uint", "Uint16", @@ -5342,6 +5401,7 @@ var stdlib = map[string][]string{ "CompactFunc", "Compare", "CompareFunc", + "Concat", "Contains", "ContainsFunc", "Delete", @@ -10824,6 +10884,7 @@ var stdlib = map[string][]string{ "Value", }, "testing/slogtest": { + "Run", "TestHandler", }, "text/scanner": { diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE new file mode 100644 index 0000000000..2683e4bb1f --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE new file mode 100644 index 0000000000..866d74a7ad --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md new file mode 100644 index 0000000000..08eb1babdd --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -0,0 +1,150 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v3*. + +To install it, run: + + go get gopkg.in/yaml.v3 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) + +API stability +------------- + +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go new file mode 100644 index 0000000000..ae7d049f18 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go new file mode 100644 index 0000000000..0173b6982e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -0,0 +1,1000 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go new file mode 100644 index 0000000000..0f47c9ca8a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -0,0 +1,2020 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 0000000000..de9e72a3e6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go new file mode 100644 index 0000000000..ac66fccc05 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -0,0 +1,1249 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go new file mode 100644 index 0000000000..b7de0a89c4 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go new file mode 100644 index 0000000000..64ae888057 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go new file mode 100644 index 0000000000..ca0070108f --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -0,0 +1,3038 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go new file mode 100644 index 0000000000..9210ece7e9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 0000000000..b8a116bf9a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go new file mode 100644 index 0000000000..8cec6da48d --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -0,0 +1,698 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go new file mode 100644 index 0000000000..7c6d007706 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -0,0 +1,807 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go new file mode 100644 index 0000000000..e88f9c54ae --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b60cfcda0a..0097899f6c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -115,9 +115,6 @@ github.com/google/go-tpm/tpm2/transport github.com/google/go-tpm/tpmutil github.com/google/go-tpm/tpmutil/mssim github.com/google/go-tpm/tpmutil/tbs -# github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 -## explicit -github.com/google/goterm/term # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid @@ -140,6 +137,8 @@ github.com/hugelgupf/vmtest/scriptvm github.com/hugelgupf/vmtest/testtmp github.com/hugelgupf/vmtest/vminit/gouinit github.com/hugelgupf/vmtest/vminit/shelluinit +github.com/hugelgupf/vmtest/vminit/shutdownafter +github.com/hugelgupf/vmtest/vminit/vmmount # github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 ## explicit; go 1.20 github.com/insomniacslk/dhcp/dhcpv4 @@ -266,7 +265,7 @@ github.com/sahilm/fuzzy # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/u-root/gobusybox/src v0.0.0-20240212035024-44ff0bf359ad +# github.com/u-root/gobusybox/src v0.0.0-20240225013946-a274a8d5d83a ## explicit; go 1.20 github.com/u-root/gobusybox/src/pkg/bb github.com/u-root/gobusybox/src/pkg/bb/bbinternal @@ -276,7 +275,7 @@ github.com/u-root/gobusybox/src/pkg/uflag # github.com/u-root/iscsinl v0.1.1-0.20210528121423-84c32645822a ## explicit; go 1.13 github.com/u-root/iscsinl -# github.com/u-root/mkuimage v0.0.0-20240216050315-5f527d1fae2e +# github.com/u-root/mkuimage v0.0.0-20240225063926-11a3bcc79c2a ## explicit; go 1.21 github.com/u-root/mkuimage/cpio github.com/u-root/mkuimage/cpio/internal/upath @@ -285,6 +284,8 @@ github.com/u-root/mkuimage/ldd github.com/u-root/mkuimage/uimage github.com/u-root/mkuimage/uimage/builder github.com/u-root/mkuimage/uimage/initramfs +github.com/u-root/mkuimage/uimage/mkuimage +github.com/u-root/mkuimage/uimage/templates # github.com/u-root/uio v0.0.0-20240209044354-b3d14b93376a ## explicit; go 1.21 github.com/u-root/uio/cp @@ -312,7 +313,7 @@ github.com/vtolstov/go-ioctl # golang.org/x/arch v0.2.0 ## explicit; go 1.17 golang.org/x/arch/x86/x86asm -# golang.org/x/crypto v0.18.0 +# golang.org/x/crypto v0.19.0 ## explicit; go 1.18 golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -326,16 +327,16 @@ golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 +# golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 ## explicit; go 1.20 golang.org/x/exp/constraints -# golang.org/x/mod v0.14.0 +golang.org/x/exp/maps +# golang.org/x/mod v0.15.0 ## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp -golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.20.0 +# golang.org/x/net v0.21.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/icmp @@ -346,13 +347,13 @@ golang.org/x/net/ipv6 # golang.org/x/sync v0.6.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.16.0 +# golang.org/x/sys v0.17.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.16.0 +# golang.org/x/term v0.17.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -371,7 +372,7 @@ golang.org/x/text/message golang.org/x/text/message/catalog golang.org/x/text/transform golang.org/x/text/unicode/norm -# golang.org/x/tools v0.17.0 +# golang.org/x/tools v0.18.0 ## explicit; go 1.18 golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata @@ -397,12 +398,16 @@ golang.org/x/tools/internal/versions # gopkg.in/yaml.v2 v2.2.8 ## explicit gopkg.in/yaml.v2 +# gopkg.in/yaml.v3 v3.0.0 +## explicit +gopkg.in/yaml.v3 # mvdan.cc/sh/v3 v3.7.0 ## explicit; go 1.19 mvdan.cc/sh/v3/expand mvdan.cc/sh/v3/fileutil mvdan.cc/sh/v3/interp mvdan.cc/sh/v3/pattern +mvdan.cc/sh/v3/shell mvdan.cc/sh/v3/syntax # pack.ag/tftp v1.0.1-0.20181129014014-07909dfbde3c ## explicit diff --git a/vendor/mvdan.cc/sh/v3/shell/doc.go b/vendor/mvdan.cc/sh/v3/shell/doc.go new file mode 100644 index 0000000000..81e0c2f768 --- /dev/null +++ b/vendor/mvdan.cc/sh/v3/shell/doc.go @@ -0,0 +1,14 @@ +// Copyright (c) 2017, Daniel Martí +// See LICENSE for licensing information + +// Package shell contains high-level features that use the syntax, expand, and +// interp packages under the hood. +// +// Please note that this package uses POSIX Shell syntax. As such, path names on +// Windows need to use double backslashes or be within single quotes when given +// to functions like Fields. For example: +// +// shell.Fields("echo /foo/bar") // on Unix-like +// shell.Fields("echo C:\\foo\\bar") // on Windows +// shell.Fields("echo 'C:\foo\bar'") // on Windows, with quotes +package shell diff --git a/vendor/mvdan.cc/sh/v3/shell/expand.go b/vendor/mvdan.cc/sh/v3/shell/expand.go new file mode 100644 index 0000000000..e286c0a95a --- /dev/null +++ b/vendor/mvdan.cc/sh/v3/shell/expand.go @@ -0,0 +1,63 @@ +// Copyright (c) 2018, Daniel Martí +// See LICENSE for licensing information + +package shell + +import ( + "os" + "strings" + + "mvdan.cc/sh/v3/expand" + "mvdan.cc/sh/v3/syntax" +) + +// Expand performs shell expansion on s as if it were within double quotes, +// using env to resolve variables. This includes parameter expansion, arithmetic +// expansion, and quote removal. +// +// If env is nil, the current environment variables are used. Empty variables +// are treated as unset; to support variables which are set but empty, use the +// expand package directly. +// +// Command substitutions like $(echo foo) aren't supported to avoid running +// arbitrary code. To support those, use an interpreter with the expand package. +// +// An error will be reported if the input string had invalid syntax. +func Expand(s string, env func(string) string) (string, error) { + p := syntax.NewParser() + word, err := p.Document(strings.NewReader(s)) + if err != nil { + return "", err + } + if env == nil { + env = os.Getenv + } + cfg := &expand.Config{Env: expand.FuncEnviron(env)} + return expand.Document(cfg, word) +} + +// Fields performs shell expansion on s as if it were a command's arguments, +// using env to resolve variables. It is similar to Expand, but includes brace +// expansion, tilde expansion, and globbing. +// +// If env is nil, the current environment variables are used. Empty variables +// are treated as unset; to support variables which are set but empty, use the +// expand package directly. +// +// An error will be reported if the input string had invalid syntax. +func Fields(s string, env func(string) string) ([]string, error) { + p := syntax.NewParser() + var words []*syntax.Word + err := p.Words(strings.NewReader(s), func(w *syntax.Word) bool { + words = append(words, w) + return true + }) + if err != nil { + return nil, err + } + if env == nil { + env = os.Getenv + } + cfg := &expand.Config{Env: expand.FuncEnviron(env)} + return expand.Fields(cfg, words...) +}