diff --git a/cli/clistat/cgroup.go b/cli/clistat/cgroup.go
new file mode 100644
index 0000000000000..e22f0c4309b7a
--- /dev/null
+++ b/cli/clistat/cgroup.go
@@ -0,0 +1,311 @@
+package clistat
+
+import (
+ "bufio"
+ "bytes"
+ "strconv"
+ "strings"
+
+ "github.com/spf13/afero"
+ "golang.org/x/xerrors"
+ "tailscale.com/types/ptr"
+)
+
+// Paths for CGroupV1.
+// Ref: https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
+const (
+ // CPU usage of all tasks in cgroup in nanoseconds.
+ cgroupV1CPUAcctUsage = "/sys/fs/cgroup/cpu/cpuacct.usage"
+ // Alternate path
+ cgroupV1CPUAcctUsageAlt = "/sys/fs/cgroup/cpu,cpuacct/cpuacct.usage"
+ // CFS quota and period for cgroup in MICROseconds
+ cgroupV1CFSQuotaUs = "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us"
+ cgroupV1CFSPeriodUs = "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us"
+ // Maximum memory usable by cgroup in bytes
+ cgroupV1MemoryMaxUsageBytes = "/sys/fs/cgroup/memory/memory.max_usage_in_bytes"
+ // Current memory usage of cgroup in bytes
+ cgroupV1MemoryUsageBytes = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
+ // Other memory stats - we are interested in total_inactive_file
+ cgroupV1MemoryStat = "/sys/fs/cgroup/memory/memory.stat"
+)
+
+// Paths for CGroupV2.
+// Ref: https://docs.kernel.org/admin-guide/cgroup-v2.html
+const (
+ // Contains quota and period in microseconds separated by a space.
+ cgroupV2CPUMax = "/sys/fs/cgroup/cpu.max"
+ // Contains current CPU usage under usage_usec
+ cgroupV2CPUStat = "/sys/fs/cgroup/cpu.stat"
+ // Contains current cgroup memory usage in bytes.
+ cgroupV2MemoryUsageBytes = "/sys/fs/cgroup/memory.current"
+ // Contains max cgroup memory usage in bytes.
+ cgroupV2MemoryMaxBytes = "/sys/fs/cgroup/memory.max"
+ // Other memory stats - we are interested in total_inactive_file
+ cgroupV2MemoryStat = "/sys/fs/cgroup/memory.stat"
+)
+
+// ContainerCPU returns the CPU usage of the container cgroup.
+// This is calculated as difference of two samples of the
+// CPU usage of the container cgroup.
+// The total is read from the relevant path in /sys/fs/cgroup.
+// If there is no limit set, the total is assumed to be the
+// number of host cores multiplied by the CFS period.
+// If the system is not containerized, this always returns nil.
+func (s *Statter) ContainerCPU() (*Result, error) {
+ // Firstly, check if we are containerized.
+ if ok, err := IsContainerized(s.fs); err != nil || !ok {
+ return nil, nil //nolint: nilnil
+ }
+
+ total, err := s.cGroupCPUTotal()
+ if err != nil {
+ return nil, xerrors.Errorf("get total cpu: %w", err)
+ }
+
+ used1, err := s.cGroupCPUUsed()
+ if err != nil {
+ return nil, xerrors.Errorf("get cgroup CPU usage: %w", err)
+ }
+
+ // The measurements in /sys/fs/cgroup are counters.
+ // We need to wait for a bit to get a difference.
+ // Note that someone could reset the counter in the meantime.
+ // We can't do anything about that.
+ s.wait(s.sampleInterval)
+
+ used2, err := s.cGroupCPUUsed()
+ if err != nil {
+ return nil, xerrors.Errorf("get cgroup CPU usage: %w", err)
+ }
+
+ if used2 < used1 {
+ // Someone reset the counter. Best we can do is count from zero.
+ used1 = 0
+ }
+
+ r := &Result{
+ Unit: "cores",
+ Used: used2 - used1,
+ Total: ptr.To(total),
+ }
+ return r, nil
+}
+
+func (s *Statter) cGroupCPUTotal() (used float64, err error) {
+ if s.isCGroupV2() {
+ return s.cGroupV2CPUTotal()
+ }
+
+ // Fall back to CGroupv1
+ return s.cGroupV1CPUTotal()
+}
+
+func (s *Statter) cGroupCPUUsed() (used float64, err error) {
+ if s.isCGroupV2() {
+ return s.cGroupV2CPUUsed()
+ }
+
+ return s.cGroupV1CPUUsed()
+}
+
+func (s *Statter) isCGroupV2() bool {
+ // Check for the presence of /sys/fs/cgroup/cpu.max
+ _, err := s.fs.Stat(cgroupV2CPUMax)
+ return err == nil
+}
+
+func (s *Statter) cGroupV2CPUUsed() (used float64, err error) {
+ usageUs, err := readInt64Prefix(s.fs, cgroupV2CPUStat, "usage_usec")
+ if err != nil {
+ return 0, xerrors.Errorf("get cgroupv2 cpu used: %w", err)
+ }
+ periodUs, err := readInt64SepIdx(s.fs, cgroupV2CPUMax, " ", 1)
+ if err != nil {
+ return 0, xerrors.Errorf("get cpu period: %w", err)
+ }
+
+ return float64(usageUs) / float64(periodUs), nil
+}
+
+func (s *Statter) cGroupV2CPUTotal() (total float64, err error) {
+ var quotaUs, periodUs int64
+ periodUs, err = readInt64SepIdx(s.fs, cgroupV2CPUMax, " ", 1)
+ if err != nil {
+ return 0, xerrors.Errorf("get cpu period: %w", err)
+ }
+
+ quotaUs, err = readInt64SepIdx(s.fs, cgroupV2CPUMax, " ", 0)
+ if err != nil {
+ // Fall back to number of cores
+ quotaUs = int64(s.nproc) * periodUs
+ }
+
+ return float64(quotaUs) / float64(periodUs), nil
+}
+
+func (s *Statter) cGroupV1CPUTotal() (float64, error) {
+ periodUs, err := readInt64(s.fs, cgroupV1CFSPeriodUs)
+ if err != nil {
+ return 0, xerrors.Errorf("read cpu period: %w", err)
+ }
+
+ quotaUs, err := readInt64(s.fs, cgroupV1CFSQuotaUs)
+ if err != nil {
+ return 0, xerrors.Errorf("read cpu quota: %w", err)
+ }
+
+ if quotaUs < 0 {
+ // Fall back to the number of cores
+ quotaUs = int64(s.nproc) * periodUs
+ }
+
+ return float64(quotaUs) / float64(periodUs), nil
+}
+
+func (s *Statter) cGroupV1CPUUsed() (float64, error) {
+ usageNs, err := readInt64(s.fs, cgroupV1CPUAcctUsage)
+ if err != nil {
+ // try alternate path
+ usageNs, err = readInt64(s.fs, cgroupV1CPUAcctUsageAlt)
+ if err != nil {
+ return 0, xerrors.Errorf("read cpu used: %w", err)
+ }
+ }
+
+ // usage is in ns, convert to us
+ usageNs /= 1000
+ periodUs, err := readInt64(s.fs, cgroupV1CFSPeriodUs)
+ if err != nil {
+ return 0, xerrors.Errorf("get cpu period: %w", err)
+ }
+
+ return float64(usageNs) / float64(periodUs), nil
+}
+
+// ContainerMemory returns the memory usage of the container cgroup.
+// If the system is not containerized, this always returns nil.
+func (s *Statter) ContainerMemory() (*Result, error) {
+ if ok, err := IsContainerized(s.fs); err != nil || !ok {
+ return nil, nil //nolint:nilnil
+ }
+
+ if s.isCGroupV2() {
+ return s.cGroupV2Memory()
+ }
+
+ // Fall back to CGroupv1
+ return s.cGroupV1Memory()
+}
+
+func (s *Statter) cGroupV2Memory() (*Result, error) {
+ maxUsageBytes, err := readInt64(s.fs, cgroupV2MemoryMaxBytes)
+ if err != nil {
+ return nil, xerrors.Errorf("read memory total: %w", err)
+ }
+
+ currUsageBytes, err := readInt64(s.fs, cgroupV2MemoryUsageBytes)
+ if err != nil {
+ return nil, xerrors.Errorf("read memory usage: %w", err)
+ }
+
+ inactiveFileBytes, err := readInt64Prefix(s.fs, cgroupV2MemoryStat, "inactive_file")
+ if err != nil {
+ return nil, xerrors.Errorf("read memory stats: %w", err)
+ }
+
+ return &Result{
+ Total: ptr.To(float64(maxUsageBytes)),
+ Used: float64(currUsageBytes - inactiveFileBytes),
+ Unit: "B",
+ }, nil
+}
+
+func (s *Statter) cGroupV1Memory() (*Result, error) {
+ maxUsageBytes, err := readInt64(s.fs, cgroupV1MemoryMaxUsageBytes)
+ if err != nil {
+ return nil, xerrors.Errorf("read memory total: %w", err)
+ }
+
+ // need a space after total_rss so we don't hit something else
+ usageBytes, err := readInt64(s.fs, cgroupV1MemoryUsageBytes)
+ if err != nil {
+ return nil, xerrors.Errorf("read memory usage: %w", err)
+ }
+
+ totalInactiveFileBytes, err := readInt64Prefix(s.fs, cgroupV1MemoryStat, "total_inactive_file")
+ if err != nil {
+ return nil, xerrors.Errorf("read memory stats: %w", err)
+ }
+
+ // Total memory used is usage - total_inactive_file
+ return &Result{
+ Total: ptr.To(float64(maxUsageBytes)),
+ Used: float64(usageBytes - totalInactiveFileBytes),
+ Unit: "B",
+ }, nil
+}
+
+// read an int64 value from path
+func readInt64(fs afero.Fs, path string) (int64, error) {
+ data, err := afero.ReadFile(fs, path)
+ if err != nil {
+ return 0, xerrors.Errorf("read %s: %w", path, err)
+ }
+
+ val, err := strconv.ParseInt(string(bytes.TrimSpace(data)), 10, 64)
+ if err != nil {
+ return 0, xerrors.Errorf("parse %s: %w", path, err)
+ }
+
+ return val, nil
+}
+
+// read an int64 value from path at field idx separated by sep
+func readInt64SepIdx(fs afero.Fs, path, sep string, idx int) (int64, error) {
+ data, err := afero.ReadFile(fs, path)
+ if err != nil {
+ return 0, xerrors.Errorf("read %s: %w", path, err)
+ }
+
+ parts := strings.Split(string(data), sep)
+ if len(parts) < idx {
+ return 0, xerrors.Errorf("expected line %q to have at least %d parts", string(data), idx+1)
+ }
+
+ val, err := strconv.ParseInt(strings.TrimSpace(parts[idx]), 10, 64)
+ if err != nil {
+ return 0, xerrors.Errorf("parse %s: %w", path, err)
+ }
+
+ return val, nil
+}
+
+// read the first int64 value from path prefixed with prefix
+func readInt64Prefix(fs afero.Fs, path, prefix string) (int64, error) {
+ data, err := afero.ReadFile(fs, path)
+ if err != nil {
+ return 0, xerrors.Errorf("read %s: %w", path, err)
+ }
+
+ scn := bufio.NewScanner(bytes.NewReader(data))
+ for scn.Scan() {
+ line := scn.Text()
+ if !strings.HasPrefix(line, prefix) {
+ continue
+ }
+
+ parts := strings.Fields(line)
+ if len(parts) != 2 {
+ return 0, xerrors.Errorf("parse %s: expected two fields but got %s", path, line)
+ }
+
+ val, err := strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 64)
+ if err != nil {
+ return 0, xerrors.Errorf("parse %s: %w", path, err)
+ }
+
+ return val, nil
+ }
+
+ return 0, xerrors.Errorf("parse %s: did not find line with prefix %s", path, prefix)
+}
diff --git a/cli/clistat/container.go b/cli/clistat/container.go
new file mode 100644
index 0000000000000..079bffe5e3c43
--- /dev/null
+++ b/cli/clistat/container.go
@@ -0,0 +1,61 @@
+package clistat
+
+import (
+ "bufio"
+ "bytes"
+ "os"
+
+ "github.com/spf13/afero"
+ "golang.org/x/xerrors"
+)
+
+const (
+ procMounts = "/proc/mounts"
+ procOneCgroup = "/proc/1/cgroup"
+)
+
+// IsContainerized returns whether the host is containerized.
+// This is adapted from https://github.com/elastic/go-sysinfo/tree/main/providers/linux/container.go#L31
+// with modifications to support Sysbox containers.
+// On non-Linux platforms, it always returns false.
+func IsContainerized(fs afero.Fs) (ok bool, err error) {
+ cgData, err := afero.ReadFile(fs, procOneCgroup)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, xerrors.Errorf("read file %s: %w", procOneCgroup, err)
+ }
+
+ scn := bufio.NewScanner(bytes.NewReader(cgData))
+ for scn.Scan() {
+ line := scn.Bytes()
+ if bytes.Contains(line, []byte("docker")) ||
+ bytes.Contains(line, []byte(".slice")) ||
+ bytes.Contains(line, []byte("lxc")) ||
+ bytes.Contains(line, []byte("kubepods")) {
+ return true, nil
+ }
+ }
+
+ // Last-ditch effort to detect Sysbox containers.
+ // Check if we have anything mounted as type sysboxfs in /proc/mounts
+ mountsData, err := afero.ReadFile(fs, procMounts)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, xerrors.Errorf("read file %s: %w", procMounts, err)
+ }
+
+ scn = bufio.NewScanner(bytes.NewReader(mountsData))
+ for scn.Scan() {
+ line := scn.Bytes()
+ if bytes.Contains(line, []byte("sysboxfs")) {
+ return true, nil
+ }
+ }
+
+ // If we get here, we are _probably_ not running in a container.
+ return false, nil
+}
diff --git a/cli/clistat/disk.go b/cli/clistat/disk.go
new file mode 100644
index 0000000000000..54731dfd9737f
--- /dev/null
+++ b/cli/clistat/disk.go
@@ -0,0 +1,26 @@
+//go:build !windows
+
+package clistat
+
+import (
+ "syscall"
+
+ "tailscale.com/types/ptr"
+)
+
+// Disk returns the disk usage of the given path.
+// If path is empty, it returns the usage of the root directory.
+func (*Statter) Disk(path string) (*Result, error) {
+ if path == "" {
+ path = "/"
+ }
+ var stat syscall.Statfs_t
+ if err := syscall.Statfs(path, &stat); err != nil {
+ return nil, err
+ }
+ var r Result
+ r.Total = ptr.To(float64(stat.Blocks * uint64(stat.Bsize)))
+ r.Used = float64(stat.Blocks-stat.Bfree) * float64(stat.Bsize)
+ r.Unit = "B"
+ return &r, nil
+}
diff --git a/cli/clistat/disk_windows.go b/cli/clistat/disk_windows.go
new file mode 100644
index 0000000000000..d11995e2c2980
--- /dev/null
+++ b/cli/clistat/disk_windows.go
@@ -0,0 +1,35 @@
+package clistat
+
+import (
+ "golang.org/x/sys/windows"
+ "tailscale.com/types/ptr"
+)
+
+// Disk returns the disk usage of the given path.
+// If path is empty, it defaults to C:\
+func (*Statter) Disk(path string) (*Result, error) {
+ if path == "" {
+ path = `C:\`
+ }
+
+ pathPtr, err := windows.UTF16PtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var freeBytes, totalBytes, availBytes uint64
+ if err := windows.GetDiskFreeSpaceEx(
+ pathPtr,
+ &freeBytes,
+ &totalBytes,
+ &availBytes,
+ ); err != nil {
+ return nil, err
+ }
+
+ var r Result
+ r.Total = ptr.To(float64(totalBytes))
+ r.Used = float64(totalBytes - freeBytes)
+ r.Unit = "B"
+ return &r, nil
+}
diff --git a/cli/clistat/stat.go b/cli/clistat/stat.go
new file mode 100644
index 0000000000000..1c19e33ef5f2d
--- /dev/null
+++ b/cli/clistat/stat.go
@@ -0,0 +1,191 @@
+package clistat
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "github.com/elastic/go-sysinfo"
+ "github.com/spf13/afero"
+ "golang.org/x/xerrors"
+ "tailscale.com/types/ptr"
+
+ sysinfotypes "github.com/elastic/go-sysinfo/types"
+)
+
+// Result is a generic result type for a statistic.
+// Total is the total amount of the resource available.
+// It is nil if the resource is not a finite quantity.
+// Unit is the unit of the resource.
+// Used is the amount of the resource used.
+type Result struct {
+ Total *float64 `json:"total"`
+ Unit string `json:"unit"`
+ Used float64 `json:"used"`
+}
+
+// String returns a human-readable representation of the result.
+func (r *Result) String() string {
+ if r == nil {
+ return "-"
+ }
+
+ var usedDisplay, totalDisplay string
+ var usedScaled, totalScaled float64
+ var usedPrefix, totalPrefix string
+ usedScaled, usedPrefix = humanize.ComputeSI(r.Used)
+ usedDisplay = humanizeFloat(usedScaled)
+ if r.Total != (*float64)(nil) {
+ totalScaled, totalPrefix = humanize.ComputeSI(*r.Total)
+ totalDisplay = humanizeFloat(totalScaled)
+ }
+
+ var sb strings.Builder
+ _, _ = sb.WriteString(usedDisplay)
+
+ // If the unit prefixes of the used and total values are different,
+ // display the used value's prefix to avoid confusion.
+ if usedPrefix != totalPrefix || totalDisplay == "" {
+ _, _ = sb.WriteString(" ")
+ _, _ = sb.WriteString(usedPrefix)
+ _, _ = sb.WriteString(r.Unit)
+ }
+
+ if totalDisplay != "" {
+ _, _ = sb.WriteString("/")
+ _, _ = sb.WriteString(totalDisplay)
+ _, _ = sb.WriteString(" ")
+ _, _ = sb.WriteString(totalPrefix)
+ _, _ = sb.WriteString(r.Unit)
+ }
+
+ if r.Total != nil && *r.Total != 0.0 {
+ _, _ = sb.WriteString(" (")
+ _, _ = sb.WriteString(fmt.Sprintf("%.0f", r.Used/(*r.Total)*100.0))
+ _, _ = sb.WriteString("%)")
+ }
+
+ return strings.TrimSpace(sb.String())
+}
+
+func humanizeFloat(f float64) string {
+ // humanize.FtoaWithDigits does not round correctly.
+ prec := precision(f)
+ rat := math.Pow(10, float64(prec))
+ rounded := math.Round(f*rat) / rat
+ return strconv.FormatFloat(rounded, 'f', -1, 64)
+}
+
+// limit precision to 3 digits at most to preserve space
+func precision(f float64) int {
+ fabs := math.Abs(f)
+ if fabs == 0.0 {
+ return 0
+ }
+ if fabs < 1.0 {
+ return 3
+ }
+ if fabs < 10.0 {
+ return 2
+ }
+ if fabs < 100.0 {
+ return 1
+ }
+ return 0
+}
+
+// Statter is a system statistics collector.
+// It is a thin wrapper around the elastic/go-sysinfo library.
+type Statter struct {
+ hi sysinfotypes.Host
+ fs afero.Fs
+ sampleInterval time.Duration
+ nproc int
+ wait func(time.Duration)
+}
+
+type Option func(*Statter)
+
+// WithSampleInterval sets the sample interval for the statter.
+func WithSampleInterval(d time.Duration) Option {
+ return func(s *Statter) {
+ s.sampleInterval = d
+ }
+}
+
+// WithFS sets the fs for the statter.
+func WithFS(fs afero.Fs) Option {
+ return func(s *Statter) {
+ s.fs = fs
+ }
+}
+
+func New(opts ...Option) (*Statter, error) {
+ hi, err := sysinfo.Host()
+ if err != nil {
+ return nil, xerrors.Errorf("get host info: %w", err)
+ }
+ s := &Statter{
+ hi: hi,
+ fs: afero.NewReadOnlyFs(afero.NewOsFs()),
+ sampleInterval: 100 * time.Millisecond,
+ nproc: runtime.NumCPU(),
+ wait: func(d time.Duration) {
+ <-time.After(d)
+ },
+ }
+ for _, opt := range opts {
+ opt(s)
+ }
+ return s, nil
+}
+
+// HostCPU returns the CPU usage of the host. This is calculated by
+// taking two samples of CPU usage and calculating the difference.
+// Total will always be equal to the number of cores.
+// Used will be an estimate of the number of cores used during the sample interval.
+// This is calculated by taking the difference between the total and idle HostCPU time
+// and scaling it by the number of cores.
+// Units are in "cores".
+func (s *Statter) HostCPU() (*Result, error) {
+ r := &Result{
+ Unit: "cores",
+ Total: ptr.To(float64(s.nproc)),
+ }
+ c1, err := s.hi.CPUTime()
+ if err != nil {
+ return nil, xerrors.Errorf("get first cpu sample: %w", err)
+ }
+ s.wait(s.sampleInterval)
+ c2, err := s.hi.CPUTime()
+ if err != nil {
+ return nil, xerrors.Errorf("get second cpu sample: %w", err)
+ }
+ total := c2.Total() - c1.Total()
+ if total == 0 {
+ return r, nil // no change
+ }
+ idle := c2.Idle - c1.Idle
+ used := total - idle
+ scaleFactor := float64(s.nproc) / total.Seconds()
+ r.Used = used.Seconds() * scaleFactor
+ return r, nil
+}
+
+// HostMemory returns the memory usage of the host, in gigabytes.
+func (s *Statter) HostMemory() (*Result, error) {
+ r := &Result{
+ Unit: "B",
+ }
+ hm, err := s.hi.Memory()
+ if err != nil {
+ return nil, xerrors.Errorf("get memory info: %w", err)
+ }
+ r.Total = ptr.To(float64(hm.Total))
+ r.Used = float64(hm.Used)
+ return r, nil
+}
diff --git a/cli/clistat/stat_internal_test.go b/cli/clistat/stat_internal_test.go
new file mode 100644
index 0000000000000..0cfdc26f7afb4
--- /dev/null
+++ b/cli/clistat/stat_internal_test.go
@@ -0,0 +1,345 @@
+package clistat
+
+import (
+ "testing"
+ "time"
+
+ "github.com/spf13/afero"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "tailscale.com/types/ptr"
+)
+
+func TestResultString(t *testing.T) {
+ t.Parallel()
+ for _, tt := range []struct {
+ Expected string
+ Result Result
+ }{
+ {
+ Expected: "1.23/5.68 quatloos (22%)",
+ Result: Result{Used: 1.234, Total: ptr.To(5.678), Unit: "quatloos"},
+ },
+ {
+ Expected: "0/0 HP",
+ Result: Result{Used: 0.0, Total: ptr.To(0.0), Unit: "HP"},
+ },
+ {
+ Expected: "123 seconds",
+ Result: Result{Used: 123.01, Total: nil, Unit: "seconds"},
+ },
+ {
+ Expected: "12.3",
+ Result: Result{Used: 12.34, Total: nil, Unit: ""},
+ },
+ {
+ Expected: "1.54 kB",
+ Result: Result{Used: 1536, Total: nil, Unit: "B"},
+ },
+ {
+ Expected: "1.23 things",
+ Result: Result{Used: 1.234, Total: nil, Unit: "things"},
+ },
+ {
+ Expected: "1 B/100 TB (0%)",
+ Result: Result{Used: 1, Total: ptr.To(1000 * 1000 * 1000 * 1000 * 100.0), Unit: "B"},
+ },
+ {
+ Expected: "500 mcores/8 cores (6%)",
+ Result: Result{Used: 0.5, Total: ptr.To(8.0), Unit: "cores"},
+ },
+ } {
+ assert.Equal(t, tt.Expected, tt.Result.String())
+ }
+}
+
+func TestStatter(t *testing.T) {
+ t.Parallel()
+
+ // We cannot make many assertions about the data we get back
+ // for host-specific measurements because these tests could
+ // and should run successfully on any OS.
+ // The best we can do is assert that it is non-zero.
+ t.Run("HostOnly", func(t *testing.T) {
+ t.Parallel()
+ fs := initFS(t, fsHostOnly)
+ s, err := New(WithFS(fs))
+ require.NoError(t, err)
+ t.Run("HostCPU", func(t *testing.T) {
+ t.Parallel()
+ cpu, err := s.HostCPU()
+ require.NoError(t, err)
+ assert.NotZero(t, cpu.Used)
+ assert.NotZero(t, cpu.Total)
+ assert.Equal(t, "cores", cpu.Unit)
+ })
+
+ t.Run("HostMemory", func(t *testing.T) {
+ t.Parallel()
+ mem, err := s.HostMemory()
+ require.NoError(t, err)
+ assert.NotZero(t, mem.Used)
+ assert.NotZero(t, mem.Total)
+ assert.Equal(t, "B", mem.Unit)
+ })
+
+ t.Run("HostDisk", func(t *testing.T) {
+ t.Parallel()
+ disk, err := s.Disk("") // default to home dir
+ require.NoError(t, err)
+ assert.NotZero(t, disk.Used)
+ assert.NotZero(t, disk.Total)
+ assert.Equal(t, "B", disk.Unit)
+ })
+ })
+
+ // Sometimes we do need to "fake" some stuff
+ // that happens while we wait.
+ withWait := func(waitF func(time.Duration)) Option {
+ return func(s *Statter) {
+ s.wait = waitF
+ }
+ }
+
+ // Other times we just want things to run fast.
+ withNoWait := func(s *Statter) {
+ s.wait = func(time.Duration) {}
+ }
+
+ // We don't want to use the actual host CPU here.
+ withNproc := func(n int) Option {
+ return func(s *Statter) {
+ s.nproc = n
+ }
+ }
+
+ // For container-specific measurements, everything we need
+ // can be read from the filesystem. We control the FS, so
+ // we control the data.
+ t.Run("CGroupV1", func(t *testing.T) {
+ t.Parallel()
+ t.Run("ContainerCPU/Limit", func(t *testing.T) {
+ t.Parallel()
+ fs := initFS(t, fsContainerCgroupV1)
+ fakeWait := func(time.Duration) {
+ // Fake 1 second in ns of usage
+ mungeFS(t, fs, cgroupV1CPUAcctUsage, "100000000")
+ }
+ s, err := New(WithFS(fs), withWait(fakeWait))
+ require.NoError(t, err)
+ cpu, err := s.ContainerCPU()
+ require.NoError(t, err)
+ require.NotNil(t, cpu)
+ assert.Equal(t, 1.0, cpu.Used)
+ require.NotNil(t, cpu.Total)
+ assert.Equal(t, 2.5, *cpu.Total)
+ assert.Equal(t, "cores", cpu.Unit)
+ })
+
+ t.Run("ContainerCPU/NoLimit", func(t *testing.T) {
+ t.Parallel()
+ fs := initFS(t, fsContainerCgroupV1NoLimit)
+ fakeWait := func(time.Duration) {
+ // Fake 1 second in ns of usage
+ mungeFS(t, fs, cgroupV1CPUAcctUsage, "100000000")
+ }
+ s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait))
+ require.NoError(t, err)
+ cpu, err := s.ContainerCPU()
+ require.NoError(t, err)
+ require.NotNil(t, cpu)
+ assert.Equal(t, 1.0, cpu.Used)
+ require.NotNil(t, cpu.Total)
+ assert.Equal(t, 2.0, *cpu.Total)
+ assert.Equal(t, "cores", cpu.Unit)
+ })
+
+ t.Run("ContainerMemory", func(t *testing.T) {
+ t.Parallel()
+ fs := initFS(t, fsContainerCgroupV1)
+ s, err := New(WithFS(fs), withNoWait)
+ require.NoError(t, err)
+ mem, err := s.ContainerMemory()
+ require.NoError(t, err)
+ require.NotNil(t, mem)
+ assert.Equal(t, 268435456.0, mem.Used)
+ assert.NotNil(t, mem.Total)
+ assert.Equal(t, 1073741824.0, *mem.Total)
+ assert.Equal(t, "B", mem.Unit)
+ })
+ })
+
+ t.Run("CGroupV2", func(t *testing.T) {
+ t.Parallel()
+
+ t.Run("ContainerCPU/Limit", func(t *testing.T) {
+ t.Parallel()
+ fs := initFS(t, fsContainerCgroupV2)
+ fakeWait := func(time.Duration) {
+ mungeFS(t, fs, cgroupV2CPUStat, "usage_usec 100000")
+ }
+ s, err := New(WithFS(fs), withWait(fakeWait))
+ require.NoError(t, err)
+ cpu, err := s.ContainerCPU()
+ require.NoError(t, err)
+ require.NotNil(t, cpu)
+ assert.Equal(t, 1.0, cpu.Used)
+ require.NotNil(t, cpu.Total)
+ assert.Equal(t, 2.5, *cpu.Total)
+ assert.Equal(t, "cores", cpu.Unit)
+ })
+
+ t.Run("ContainerCPU/NoLimit", func(t *testing.T) {
+ t.Parallel()
+ fs := initFS(t, fsContainerCgroupV2NoLimit)
+ fakeWait := func(time.Duration) {
+ mungeFS(t, fs, cgroupV2CPUStat, "usage_usec 100000")
+ }
+ s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait))
+ require.NoError(t, err)
+ cpu, err := s.ContainerCPU()
+ require.NoError(t, err)
+ require.NotNil(t, cpu)
+ assert.Equal(t, 1.0, cpu.Used)
+ require.NotNil(t, cpu.Total)
+ assert.Equal(t, 2.0, *cpu.Total)
+ assert.Equal(t, "cores", cpu.Unit)
+ })
+
+ t.Run("ContainerMemory", func(t *testing.T) {
+ t.Parallel()
+ fs := initFS(t, fsContainerCgroupV2)
+ s, err := New(WithFS(fs), withNoWait)
+ require.NoError(t, err)
+ mem, err := s.ContainerMemory()
+ require.NoError(t, err)
+ require.NotNil(t, mem)
+ assert.Equal(t, 268435456.0, mem.Used)
+ assert.NotNil(t, mem.Total)
+ assert.Equal(t, 1073741824.0, *mem.Total)
+ assert.Equal(t, "B", mem.Unit)
+ })
+ })
+}
+
+func TestIsContainerized(t *testing.T) {
+ t.Parallel()
+
+ for _, tt := range []struct {
+ Name string
+ FS map[string]string
+ Expected bool
+ Error string
+ }{
+ {
+ Name: "Empty",
+ FS: map[string]string{},
+ Expected: false,
+ Error: "",
+ },
+ {
+ Name: "BareMetal",
+ FS: fsHostOnly,
+ Expected: false,
+ Error: "",
+ },
+ {
+ Name: "Docker",
+ FS: fsContainerCgroupV1,
+ Expected: true,
+ Error: "",
+ },
+ {
+ Name: "Sysbox",
+ FS: fsContainerSysbox,
+ Expected: true,
+ Error: "",
+ },
+ } {
+ tt := tt
+ t.Run(tt.Name, func(t *testing.T) {
+ t.Parallel()
+ fs := initFS(t, tt.FS)
+ actual, err := IsContainerized(fs)
+ if tt.Error == "" {
+ assert.NoError(t, err)
+ assert.Equal(t, tt.Expected, actual)
+ } else {
+ assert.ErrorContains(t, err, tt.Error)
+ assert.False(t, actual)
+ }
+ })
+ }
+}
+
+// helper function for initializing a fs
+func initFS(t testing.TB, m map[string]string) afero.Fs {
+ t.Helper()
+ fs := afero.NewMemMapFs()
+ for k, v := range m {
+ mungeFS(t, fs, k, v)
+ }
+ return fs
+}
+
+// helper function for writing v to fs under path k
+func mungeFS(t testing.TB, fs afero.Fs, k, v string) {
+ t.Helper()
+ require.NoError(t, afero.WriteFile(fs, k, []byte(v+"\n"), 0o600))
+}
+
+var (
+ fsHostOnly = map[string]string{
+ procOneCgroup: "0::/",
+ procMounts: "/dev/sda1 / ext4 rw,relatime 0 0",
+ }
+ fsContainerSysbox = map[string]string{
+ procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
+ procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
+sysboxfs /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
+ cgroupV2CPUMax: "250000 100000",
+ cgroupV2CPUStat: "usage_usec 0",
+ }
+ fsContainerCgroupV2 = map[string]string{
+ procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
+ procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
+proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
+ cgroupV2CPUMax: "250000 100000",
+ cgroupV2CPUStat: "usage_usec 0",
+ cgroupV2MemoryMaxBytes: "1073741824",
+ cgroupV2MemoryUsageBytes: "536870912",
+ cgroupV2MemoryStat: "inactive_file 268435456",
+ }
+ fsContainerCgroupV2NoLimit = map[string]string{
+ procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
+ procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
+proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
+ cgroupV2CPUMax: "max 100000",
+ cgroupV2CPUStat: "usage_usec 0",
+ cgroupV2MemoryMaxBytes: "1073741824",
+ cgroupV2MemoryUsageBytes: "536870912",
+ cgroupV2MemoryStat: "inactive_file 268435456",
+ }
+ fsContainerCgroupV1 = map[string]string{
+ procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
+ procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
+proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
+ cgroupV1CPUAcctUsage: "0",
+ cgroupV1CFSQuotaUs: "250000",
+ cgroupV1CFSPeriodUs: "100000",
+ cgroupV1MemoryMaxUsageBytes: "1073741824",
+ cgroupV1MemoryUsageBytes: "536870912",
+ cgroupV1MemoryStat: "total_inactive_file 268435456",
+ }
+ fsContainerCgroupV1NoLimit = map[string]string{
+ procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
+ procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
+proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
+ cgroupV1CPUAcctUsage: "0",
+ cgroupV1CFSQuotaUs: "-1",
+ cgroupV1CFSPeriodUs: "100000",
+ cgroupV1MemoryMaxUsageBytes: "1073741824",
+ cgroupV1MemoryUsageBytes: "536870912",
+ cgroupV1MemoryStat: "total_inactive_file 268435456",
+ }
+)
diff --git a/cli/root.go b/cli/root.go
index e8ee13b2476d6..b2c33d0f1b547 100644
--- a/cli/root.go
+++ b/cli/root.go
@@ -103,6 +103,7 @@ func (r *RootCmd) Core() []*clibase.Cmd {
r.stop(),
r.update(),
r.restart(),
+ r.stat(),
// Hidden
r.gitssh(),
diff --git a/cli/stat.go b/cli/stat.go
new file mode 100644
index 0000000000000..67232caee53c7
--- /dev/null
+++ b/cli/stat.go
@@ -0,0 +1,241 @@
+package cli
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/afero"
+ "golang.org/x/xerrors"
+
+ "github.com/coder/coder/cli/clibase"
+ "github.com/coder/coder/cli/clistat"
+ "github.com/coder/coder/cli/cliui"
+)
+
+func (r *RootCmd) stat() *clibase.Cmd {
+ fs := afero.NewReadOnlyFs(afero.NewOsFs())
+ defaultCols := []string{
+ "host_cpu",
+ "host_memory",
+ "home_disk",
+ "container_cpu",
+ "container_memory",
+ }
+ formatter := cliui.NewOutputFormatter(
+ cliui.TableFormat([]statsRow{}, defaultCols),
+ cliui.JSONFormat(),
+ )
+ st, err := clistat.New(clistat.WithFS(fs))
+ if err != nil {
+ panic(xerrors.Errorf("initialize workspace stats collector: %w", err))
+ }
+
+ cmd := &clibase.Cmd{
+ Use: "stat",
+ Short: "Show resource usage for the current workspace.",
+ Children: []*clibase.Cmd{
+ r.statCPU(st, fs),
+ r.statMem(st, fs),
+ r.statDisk(st),
+ },
+ Handler: func(inv *clibase.Invocation) error {
+ var sr statsRow
+
+ // Get CPU measurements first.
+ hostErr := make(chan error, 1)
+ containerErr := make(chan error, 1)
+ go func() {
+ defer close(hostErr)
+ cs, err := st.HostCPU()
+ if err != nil {
+ hostErr <- err
+ return
+ }
+ sr.HostCPU = cs
+ }()
+ go func() {
+ defer close(containerErr)
+ if ok, _ := clistat.IsContainerized(fs); !ok {
+ // don't error if we're not in a container
+ return
+ }
+ cs, err := st.ContainerCPU()
+ if err != nil {
+ containerErr <- err
+ return
+ }
+ sr.ContainerCPU = cs
+ }()
+
+ if err := <-hostErr; err != nil {
+ return err
+ }
+ if err := <-containerErr; err != nil {
+ return err
+ }
+
+ // Host-level stats
+ ms, err := st.HostMemory()
+ if err != nil {
+ return err
+ }
+ sr.HostMemory = ms
+
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return err
+ }
+ ds, err := st.Disk(home)
+ if err != nil {
+ return err
+ }
+ sr.Disk = ds
+
+ // Container-only stats.
+ if ok, err := clistat.IsContainerized(fs); err == nil && ok {
+ cs, err := st.ContainerCPU()
+ if err != nil {
+ return err
+ }
+ sr.ContainerCPU = cs
+
+ ms, err := st.ContainerMemory()
+ if err != nil {
+ return err
+ }
+ sr.ContainerMemory = ms
+ }
+
+ out, err := formatter.Format(inv.Context(), []statsRow{sr})
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(inv.Stdout, out)
+ return err
+ },
+ }
+ formatter.AttachOptions(&cmd.Options)
+ return cmd
+}
+
+func (*RootCmd) statCPU(s *clistat.Statter, fs afero.Fs) *clibase.Cmd {
+ var hostArg bool
+ var prefixArg string
+ formatter := cliui.NewOutputFormatter(cliui.TextFormat(), cliui.JSONFormat())
+ cmd := &clibase.Cmd{
+ Use: "cpu",
+ Short: "Show CPU usage, in cores.",
+ Options: clibase.OptionSet{
+ {
+ Flag: "host",
+ Value: clibase.BoolOf(&hostArg),
+ Description: "Force host CPU measurement.",
+ },
+ {
+ Flag: "prefix",
+ Value: clibase.StringOf(&prefixArg),
+ Description: "Unit prefix.",
+ Default: "",
+ },
+ },
+ Handler: func(inv *clibase.Invocation) error {
+ var cs *clistat.Result
+ var err error
+ if ok, _ := clistat.IsContainerized(fs); ok && !hostArg {
+ cs, err = s.ContainerCPU()
+ } else {
+ cs, err = s.HostCPU()
+ }
+ if err != nil {
+ return err
+ }
+ out, err := formatter.Format(inv.Context(), cs)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(inv.Stdout, out)
+ return err
+ },
+ }
+ formatter.AttachOptions(&cmd.Options)
+
+ return cmd
+}
+
+func (*RootCmd) statMem(s *clistat.Statter, fs afero.Fs) *clibase.Cmd {
+ var hostArg bool
+ formatter := cliui.NewOutputFormatter(cliui.TextFormat(), cliui.JSONFormat())
+ cmd := &clibase.Cmd{
+ Use: "mem",
+ Short: "Show memory usage, in gigabytes.",
+ Options: clibase.OptionSet{
+ {
+ Flag: "host",
+ Value: clibase.BoolOf(&hostArg),
+ Description: "Force host memory measurement.",
+ },
+ },
+ Handler: func(inv *clibase.Invocation) error {
+ var ms *clistat.Result
+ var err error
+ if ok, _ := clistat.IsContainerized(fs); ok && !hostArg {
+ ms, err = s.ContainerMemory()
+ } else {
+ ms, err = s.HostMemory()
+ }
+ if err != nil {
+ return err
+ }
+ out, err := formatter.Format(inv.Context(), ms)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(inv.Stdout, out)
+ return err
+ },
+ }
+
+ formatter.AttachOptions(&cmd.Options)
+ return cmd
+}
+
+func (*RootCmd) statDisk(s *clistat.Statter) *clibase.Cmd {
+ var pathArg string
+ formatter := cliui.NewOutputFormatter(cliui.TextFormat(), cliui.JSONFormat())
+ cmd := &clibase.Cmd{
+ Use: "disk",
+ Short: "Show disk usage, in gigabytes.",
+ Options: clibase.OptionSet{
+ {
+ Flag: "path",
+ Value: clibase.StringOf(&pathArg),
+ Description: "Path for which to check disk usage.",
+ Default: "/",
+ },
+ },
+ Handler: func(inv *clibase.Invocation) error {
+ ds, err := s.Disk(pathArg)
+ if err != nil {
+ return err
+ }
+
+ out, err := formatter.Format(inv.Context(), ds)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(inv.Stdout, out)
+ return err
+ },
+ }
+
+ formatter.AttachOptions(&cmd.Options)
+ return cmd
+}
+
+type statsRow struct {
+ HostCPU *clistat.Result `json:"host_cpu" table:"host_cpu,default_sort"`
+ HostMemory *clistat.Result `json:"host_memory" table:"host_memory"`
+ Disk *clistat.Result `json:"home_disk" table:"home_disk"`
+ ContainerCPU *clistat.Result `json:"container_cpu" table:"container_cpu"`
+ ContainerMemory *clistat.Result `json:"container_memory" table:"container_memory"`
+}
diff --git a/cli/stat_test.go b/cli/stat_test.go
new file mode 100644
index 0000000000000..39934133b107c
--- /dev/null
+++ b/cli/stat_test.go
@@ -0,0 +1,173 @@
+package cli_test
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/coder/coder/cli/clistat"
+ "github.com/coder/coder/cli/clitest"
+ "github.com/coder/coder/testutil"
+)
+
+// This just tests that the stat command is recognized and does not output
+// an empty string. Actually testing the output of the stats command is
+// fraught with all sorts of fun. Some more detailed testing of the stats
+// output is performed in the tests in the clistat package.
+func TestStatCmd(t *testing.T) {
+ t.Parallel()
+ t.Run("JSON", func(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ t.Cleanup(cancel)
+ inv, _ := clitest.New(t, "stat", "all", "--output=json")
+ buf := new(bytes.Buffer)
+ inv.Stdout = buf
+ err := inv.WithContext(ctx).Run()
+ require.NoError(t, err)
+ s := buf.String()
+ require.NotEmpty(t, s)
+ // Must be valid JSON
+ tmp := make([]clistat.Result, 0)
+ require.NoError(t, json.NewDecoder(strings.NewReader(s)).Decode(&tmp))
+ })
+ t.Run("Table", func(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ t.Cleanup(cancel)
+ inv, _ := clitest.New(t, "stat", "all", "--output=table")
+ buf := new(bytes.Buffer)
+ inv.Stdout = buf
+ err := inv.WithContext(ctx).Run()
+ require.NoError(t, err)
+ s := buf.String()
+ require.NotEmpty(t, s)
+ require.Contains(t, s, "HOST CPU")
+ require.Contains(t, s, "HOST MEMORY")
+ require.Contains(t, s, "HOME DISK")
+ })
+ t.Run("Default", func(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ t.Cleanup(cancel)
+ inv, _ := clitest.New(t, "stat", "all")
+ buf := new(bytes.Buffer)
+ inv.Stdout = buf
+ err := inv.WithContext(ctx).Run()
+ require.NoError(t, err)
+ s := buf.String()
+ require.NotEmpty(t, s)
+ require.Contains(t, s, "HOST CPU")
+ require.Contains(t, s, "HOST MEMORY")
+ require.Contains(t, s, "HOME DISK")
+ })
+}
+
+func TestStatCPUCmd(t *testing.T) {
+ t.Parallel()
+
+ t.Run("Text", func(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ t.Cleanup(cancel)
+ inv, _ := clitest.New(t, "stat", "cpu", "--output=text")
+ buf := new(bytes.Buffer)
+ inv.Stdout = buf
+ err := inv.WithContext(ctx).Run()
+ require.NoError(t, err)
+ s := buf.String()
+ require.NotEmpty(t, s)
+ })
+
+ t.Run("JSON", func(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ t.Cleanup(cancel)
+ inv, _ := clitest.New(t, "stat", "cpu", "--output=json")
+ buf := new(bytes.Buffer)
+ inv.Stdout = buf
+ err := inv.WithContext(ctx).Run()
+ require.NoError(t, err)
+ s := buf.String()
+ tmp := clistat.Result{}
+ require.NoError(t, json.NewDecoder(strings.NewReader(s)).Decode(&tmp))
+ require.NotZero(t, tmp.Used)
+ require.NotNil(t, tmp.Total)
+ require.NotZero(t, *tmp.Total)
+ require.Equal(t, "cores", tmp.Unit)
+ })
+}
+
+func TestStatMemCmd(t *testing.T) {
+ t.Parallel()
+
+ t.Run("Text", func(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ t.Cleanup(cancel)
+ inv, _ := clitest.New(t, "stat", "mem", "--output=text")
+ buf := new(bytes.Buffer)
+ inv.Stdout = buf
+ err := inv.WithContext(ctx).Run()
+ require.NoError(t, err)
+ s := buf.String()
+ require.NotEmpty(t, s)
+ })
+
+ t.Run("JSON", func(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ t.Cleanup(cancel)
+ inv, _ := clitest.New(t, "stat", "mem", "--output=json")
+ buf := new(bytes.Buffer)
+ inv.Stdout = buf
+ err := inv.WithContext(ctx).Run()
+ require.NoError(t, err)
+ s := buf.String()
+ tmp := clistat.Result{}
+ require.NoError(t, json.NewDecoder(strings.NewReader(s)).Decode(&tmp))
+ require.NotZero(t, tmp.Used)
+ require.NotNil(t, tmp.Total)
+ require.NotZero(t, *tmp.Total)
+ require.Equal(t, "B", tmp.Unit)
+ })
+}
+
+func TestStatDiskCmd(t *testing.T) {
+ t.Parallel()
+
+ t.Run("Text", func(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ t.Cleanup(cancel)
+ inv, _ := clitest.New(t, "stat", "disk", "--output=text")
+ buf := new(bytes.Buffer)
+ inv.Stdout = buf
+ err := inv.WithContext(ctx).Run()
+ require.NoError(t, err)
+ s := buf.String()
+ require.NotEmpty(t, s)
+ })
+
+ t.Run("JSON", func(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ t.Cleanup(cancel)
+ inv, _ := clitest.New(t, "stat", "disk", "--output=json")
+ buf := new(bytes.Buffer)
+ inv.Stdout = buf
+ err := inv.WithContext(ctx).Run()
+ require.NoError(t, err)
+ s := buf.String()
+ tmp := clistat.Result{}
+ require.NoError(t, json.NewDecoder(strings.NewReader(s)).Decode(&tmp))
+ require.NotZero(t, tmp.Used)
+ require.NotNil(t, tmp.Total)
+ require.NotZero(t, *tmp.Total)
+ require.Equal(t, "B", tmp.Unit)
+ })
+}
diff --git a/cli/testdata/coder_--help.golden b/cli/testdata/coder_--help.golden
index 7b2fcd494a5db..381bc30ece5b9 100644
--- a/cli/testdata/coder_--help.golden
+++ b/cli/testdata/coder_--help.golden
@@ -34,6 +34,7 @@ Coder v0.0.0-devel — A tool for provisioning self-hosted development environme
workspace
ssh Start a shell into a workspace
start Start a workspace
+ stat Show resource usage for the current workspace.
state Manually manage Terraform state to fix broken workspaces
stop Stop a workspace
templates Manage templates
diff --git a/cli/testdata/coder_stat_--help.golden b/cli/testdata/coder_stat_--help.golden
new file mode 100644
index 0000000000000..e2eddcb30fcf2
--- /dev/null
+++ b/cli/testdata/coder_stat_--help.golden
@@ -0,0 +1,19 @@
+Usage: coder stat [flags]
+
+Show resource usage for the current workspace.
+
+[1mSubcommands[0m
+ cpu Show CPU usage, in cores.
+ disk Show disk usage, in gigabytes.
+ mem Show memory usage, in gigabytes.
+
+[1mOptions[0m
+ -c, --column string-array (default: host_cpu,host_memory,home_disk,container_cpu,container_memory)
+ Columns to display in table output. Available columns: host cpu, host
+ memory, home disk, container cpu, container memory.
+
+ -o, --output string (default: table)
+ Output format. Available formats: table, json.
+
+---
+Run `coder --help` for a list of global options.
diff --git a/cli/testdata/coder_stat_cpu_--help.golden b/cli/testdata/coder_stat_cpu_--help.golden
new file mode 100644
index 0000000000000..dba620751ba6d
--- /dev/null
+++ b/cli/testdata/coder_stat_cpu_--help.golden
@@ -0,0 +1,16 @@
+Usage: coder stat cpu [flags]
+
+Show CPU usage, in cores.
+
+[1mOptions[0m
+ --host bool
+ Force host CPU measurement.
+
+ -o, --output string (default: text)
+ Output format. Available formats: text, json.
+
+ --prefix string
+ Unit prefix.
+
+---
+Run `coder --help` for a list of global options.
diff --git a/cli/testdata/coder_stat_disk_--help.golden b/cli/testdata/coder_stat_disk_--help.golden
new file mode 100644
index 0000000000000..cb33481f726b0
--- /dev/null
+++ b/cli/testdata/coder_stat_disk_--help.golden
@@ -0,0 +1,13 @@
+Usage: coder stat disk [flags]
+
+Show disk usage, in gigabytes.
+
+[1mOptions[0m
+ -o, --output string (default: text)
+ Output format. Available formats: text, json.
+
+ --path string (default: /)
+ Path for which to check disk usage.
+
+---
+Run `coder --help` for a list of global options.
diff --git a/cli/testdata/coder_stat_mem_--help.golden b/cli/testdata/coder_stat_mem_--help.golden
new file mode 100644
index 0000000000000..0905c38a9639d
--- /dev/null
+++ b/cli/testdata/coder_stat_mem_--help.golden
@@ -0,0 +1,13 @@
+Usage: coder stat mem [flags]
+
+Show memory usage, in gigabytes.
+
+[1mOptions[0m
+ --host bool
+ Force host memory measurement.
+
+ -o, --output string (default: text)
+ Output format. Available formats: text, json.
+
+---
+Run `coder --help` for a list of global options.
diff --git a/docs/cli.md b/docs/cli.md
index c92caa6feb037..9ff6f4596a8e0 100644
--- a/docs/cli.md
+++ b/docs/cli.md
@@ -49,6 +49,7 @@ Coder — A tool for provisioning self-hosted development environments with Terr
| [speedtest
](./cli/speedtest.md) | Run upload and download tests from your machine to a workspace |
| [ssh
](./cli/ssh.md) | Start a shell into a workspace |
| [start
](./cli/start.md) | Start a workspace |
+| [stat
](./cli/stat.md) | Show resource usage for the current workspace. |
| [state
](./cli/state.md) | Manually manage Terraform state to fix broken workspaces |
| [stop
](./cli/stop.md) | Stop a workspace |
| [templates
](./cli/templates.md) | Manage templates |
diff --git a/docs/cli/stat.md b/docs/cli/stat.md
new file mode 100644
index 0000000000000..ef66830f9348b
--- /dev/null
+++ b/docs/cli/stat.md
@@ -0,0 +1,39 @@
+
+
+# stat
+
+Show resource usage for the current workspace.
+
+## Usage
+
+```console
+coder stat [flags]
+```
+
+## Subcommands
+
+| Name | Purpose |
+| ----------------------------------- | -------------------------------- |
+| [cpu
](./stat_cpu.md) | Show CPU usage, in cores. |
+| [disk
](./stat_disk.md) | Show disk usage, in gigabytes. |
+| [mem
](./stat_mem.md) | Show memory usage, in gigabytes. |
+
+## Options
+
+### -c, --column
+
+| | |
+| ------- | -------------------------------------------------------------------------- |
+| Type | string-array
|
+| Default | host_cpu,host_memory,home_disk,container_cpu,container_memory
|
+
+Columns to display in table output. Available columns: host cpu, host memory, home disk, container cpu, container memory.
+
+### -o, --output
+
+| | |
+| ------- | ------------------- |
+| Type | string
|
+| Default | table
|
+
+Output format. Available formats: table, json.
diff --git a/docs/cli/stat_cpu.md b/docs/cli/stat_cpu.md
new file mode 100644
index 0000000000000..7edc442a1cb33
--- /dev/null
+++ b/docs/cli/stat_cpu.md
@@ -0,0 +1,38 @@
+
+
+# stat cpu
+
+Show CPU usage, in cores.
+
+## Usage
+
+```console
+coder stat cpu [flags]
+```
+
+## Options
+
+### --host
+
+| | |
+| ---- | ----------------- |
+| Type | bool
|
+
+Force host CPU measurement.
+
+### -o, --output
+
+| | |
+| ------- | ------------------- |
+| Type | string
|
+| Default | text
|
+
+Output format. Available formats: text, json.
+
+### --prefix
+
+| | |
+| ---- | ------------------- |
+| Type | string
|
+
+Unit prefix.
diff --git a/docs/cli/stat_disk.md b/docs/cli/stat_disk.md
new file mode 100644
index 0000000000000..6b6ddc34882c8
--- /dev/null
+++ b/docs/cli/stat_disk.md
@@ -0,0 +1,31 @@
+
+
+# stat disk
+
+Show disk usage, in gigabytes.
+
+## Usage
+
+```console
+coder stat disk [flags]
+```
+
+## Options
+
+### -o, --output
+
+| | |
+| ------- | ------------------- |
+| Type | string
|
+| Default | text
|
+
+Output format. Available formats: text, json.
+
+### --path
+
+| | |
+| ------- | ------------------- |
+| Type | string
|
+| Default | /
|
+
+Path for which to check disk usage.
diff --git a/docs/cli/stat_mem.md b/docs/cli/stat_mem.md
new file mode 100644
index 0000000000000..387e7d9ad18cb
--- /dev/null
+++ b/docs/cli/stat_mem.md
@@ -0,0 +1,30 @@
+
+
+# stat mem
+
+Show memory usage, in gigabytes.
+
+## Usage
+
+```console
+coder stat mem [flags]
+```
+
+## Options
+
+### --host
+
+| | |
+| ---- | ----------------- |
+| Type | bool
|
+
+Force host memory measurement.
+
+### -o, --output
+
+| | |
+| ------- | ------------------- |
+| Type | string
|
+| Default | text
|
+
+Output format. Available formats: text, json.
diff --git a/docs/manifest.json b/docs/manifest.json
index 47ddd9d7f9f48..f01144605eb50 100644
--- a/docs/manifest.json
+++ b/docs/manifest.json
@@ -703,6 +703,26 @@
"description": "Start a workspace",
"path": "cli/start.md"
},
+ {
+ "title": "stat",
+ "description": "Show resource usage for the current workspace.",
+ "path": "cli/stat.md"
+ },
+ {
+ "title": "stat cpu",
+ "description": "Show CPU usage, in cores.",
+ "path": "cli/stat_cpu.md"
+ },
+ {
+ "title": "stat disk",
+ "description": "Show disk usage, in gigabytes.",
+ "path": "cli/stat_disk.md"
+ },
+ {
+ "title": "stat mem",
+ "description": "Show memory usage, in gigabytes.",
+ "path": "cli/stat_mem.md"
+ },
{
"title": "state",
"description": "Manually manage Terraform state to fix broken workspaces",
diff --git a/docs/templates/agent-metadata.md b/docs/templates/agent-metadata.md
index a7504b2e5ecb8..2cd21e898a8f4 100644
--- a/docs/templates/agent-metadata.md
+++ b/docs/templates/agent-metadata.md
@@ -16,6 +16,10 @@ See the [Terraform reference](https://registry.terraform.io/providers/coder/code
All of these examples use [heredoc strings](https://developer.hashicorp.com/terraform/language/expressions/strings#heredoc-strings) for the script declaration. With heredoc strings, you
can script without messy escape codes, just as if you were working in your terminal.
+Some of the below examples use the [`coder stat`](../cli/stat.md) command.
+This is useful for determining CPU/memory usage inside a container, which
+can be tricky otherwise.
+
Here's a standard set of metadata snippets for Linux agents:
```hcl
@@ -25,26 +29,36 @@ resource "coder_agent" "main" {
metadata {
display_name = "CPU Usage"
key = "cpu"
- # calculates CPU usage by summing the "us", "sy" and "id" columns of
- # vmstat.
- script = < /tmp/cusage
- echo "Unknown"
- exit 0
- fi
-
- # interval in microseconds should be metadata.interval * 1000000
- interval=10000000
- ncores=$(nproc)
- echo "$cusage $cusage_p $interval $ncores" | awk '{ printf "%2.0f%%\n", (($1 - $2)/$3/$4)*100 }'
-
- EOT
}
metadata {
display_name = "RAM Usage"
+ key = "1_ram_usage"
+ script = "coder stat mem"
interval = 10
timeout = 1
- key = "1_ram_usage"
- script = <&1 | awk ' $0 ~ "Word of the Day: [A-z]+" { print $5; exit }'
EOT
diff --git a/examples/templates/docker/main.tf b/examples/templates/docker/main.tf
index ed7b51d2d8519..d30aa8c1f8afa 100644
--- a/examples/templates/docker/main.tf
+++ b/examples/templates/docker/main.tf
@@ -46,6 +46,72 @@ resource "coder_agent" "main" {
GIT_AUTHOR_EMAIL = "${data.coder_workspace.me.owner_email}"
GIT_COMMITTER_EMAIL = "${data.coder_workspace.me.owner_email}"
}
+
+ # The following metadata blocks are optional. They are used to display
+ # information about your workspace in the dashboard. You can remove them
+ # if you don't want to display any information.
+ # For basic resources, you can use the `coder stat` command.
+ # If you need more control, you can write your own script.
+ metadata {
+ display_name = "CPU Usage"
+ key = "0_cpu_usage"
+ script = "coder stat cpu"
+ interval = 10
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "RAM Usage"
+ key = "1_ram_usage"
+ script = "coder stat mem"
+ interval = 10
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "Home Disk"
+ key = "3_home_disk"
+ script = "coder stat disk --path $${HOME}"
+ interval = 60
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "CPU Usage (Host)"
+ key = "4_cpu_usage_host"
+ script = "coder stat cpu --host"
+ interval = 10
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "Memory Usage (Host)"
+ key = "5_mem_usage_host"
+ script = "coder stat mem --host"
+ interval = 10
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "Load Average (Host)"
+ key = "6_load_host"
+ # get load avg scaled by number of cores
+ script = </tmp/code-server.log 2>&1 &
EOT
+
+ # The following metadata blocks are optional. They are used to display
+ # information about your workspace in the dashboard. You can remove them
+ # if you don't want to display any information.
+ # For basic resources, you can use the `coder stat` command.
+ # If you need more control, you can write your own script.
+ metadata {
+ display_name = "CPU Usage"
+ key = "0_cpu_usage"
+ script = "coder stat cpu"
+ interval = 10
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "RAM Usage"
+ key = "1_ram_usage"
+ script = "coder stat mem"
+ interval = 10
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "Home Disk"
+ key = "3_home_disk"
+ script = "coder stat disk --path $${HOME}"
+ interval = 60
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "CPU Usage (Host)"
+ key = "4_cpu_usage_host"
+ script = "coder stat cpu --host"
+ interval = 10
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "Memory Usage (Host)"
+ key = "5_mem_usage_host"
+ script = "coder stat mem --host"
+ interval = 10
+ timeout = 1
+ }
+
+ metadata {
+ display_name = "Load Average (Host)"
+ key = "6_load_host"
+ # get load avg scaled by number of cores
+ script = <