From 172e52317cd053dcdffc2b7d445a1d390ebbe53b Mon Sep 17 00:00:00 2001
From: Cian Johnston
Date: Wed, 26 Feb 2025 09:03:27 +0000
Subject: [PATCH 01/44] feat(agent): wire up agentssh server to allow exec into
container (#16638)
Builds on top of https://github.com/coder/coder/pull/16623/ and wires up
the ReconnectingPTY server. This does nothing to wire up the web
terminal yet but the added test demonstrates the functionality working.
Other changes:
* Refactors and moves the `SystemEnvInfo` interface to the
`agent/usershell` package to address follow-up from
https://github.com/coder/coder/pull/16623#discussion_r1967580249
* Marks `usershellinfo.Get` as deprecated. Consumers should use the
`EnvInfoer` interface instead.
---------
Co-authored-by: Mathias Fredriksson
Co-authored-by: Danny Kopping
---
agent/agent.go | 9 +++
agent/agent_test.go | 78 ++++++++++++++++++-
agent/agentcontainers/containers_dockercli.go | 20 +----
.../containers_internal_test.go | 6 +-
agent/agentssh/agentssh.go | 66 +++++-----------
agent/agentssh/agentssh_test.go | 10 ++-
agent/reconnectingpty/server.go | 25 +++++-
agent/usershell/usershell.go | 66 ++++++++++++++++
agent/usershell/usershell_darwin.go | 1 +
agent/usershell/usershell_other.go | 1 +
agent/usershell/usershell_windows.go | 1 +
cli/agent.go | 2 +
coderd/workspaceapps/proxy.go | 7 +-
codersdk/workspacesdk/agentconn.go | 28 ++++++-
codersdk/workspacesdk/workspacesdk.go | 22 +++++-
15 files changed, 260 insertions(+), 82 deletions(-)
create mode 100644 agent/usershell/usershell.go
diff --git a/agent/agent.go b/agent/agent.go
index 0b3a6b3ecd2cf..285636cd31344 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -88,6 +88,8 @@ type Options struct {
BlockFileTransfer bool
Execer agentexec.Execer
ContainerLister agentcontainers.Lister
+
+ ExperimentalContainersEnabled bool
}
type Client interface {
@@ -188,6 +190,8 @@ func New(options Options) Agent {
metrics: newAgentMetrics(prometheusRegistry),
execer: options.Execer,
lister: options.ContainerLister,
+
+ experimentalDevcontainersEnabled: options.ExperimentalContainersEnabled,
}
// Initially, we have a closed channel, reflecting the fact that we are not initially connected.
// Each time we connect we replace the channel (while holding the closeMutex) with a new one
@@ -258,6 +262,8 @@ type agent struct {
metrics *agentMetrics
execer agentexec.Execer
lister agentcontainers.Lister
+
+ experimentalDevcontainersEnabled bool
}
func (a *agent) TailnetConn() *tailnet.Conn {
@@ -297,6 +303,9 @@ func (a *agent) init() {
a.sshServer,
a.metrics.connectionsTotal, a.metrics.reconnectingPTYErrors,
a.reconnectingPTYTimeout,
+ func(s *reconnectingpty.Server) {
+ s.ExperimentalContainersEnabled = a.experimentalDevcontainersEnabled
+ },
)
go a.runLoop()
}
diff --git a/agent/agent_test.go b/agent/agent_test.go
index 834e0a3e68151..935309e98d873 100644
--- a/agent/agent_test.go
+++ b/agent/agent_test.go
@@ -25,8 +25,14 @@ import (
"testing"
"time"
+ "go.uber.org/goleak"
+ "tailscale.com/net/speedtest"
+ "tailscale.com/tailcfg"
+
"github.com/bramvdbogaerde/go-scp"
"github.com/google/uuid"
+ "github.com/ory/dockertest/v3"
+ "github.com/ory/dockertest/v3/docker"
"github.com/pion/udp"
"github.com/pkg/sftp"
"github.com/prometheus/client_golang/prometheus"
@@ -34,15 +40,13 @@ import (
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "go.uber.org/goleak"
"golang.org/x/crypto/ssh"
"golang.org/x/exp/slices"
"golang.org/x/xerrors"
- "tailscale.com/net/speedtest"
- "tailscale.com/tailcfg"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
+
"github.com/coder/coder/v2/agent"
"github.com/coder/coder/v2/agent/agentssh"
"github.com/coder/coder/v2/agent/agenttest"
@@ -1761,6 +1765,74 @@ func TestAgent_ReconnectingPTY(t *testing.T) {
}
}
+// This tests end-to-end functionality of connecting to a running container
+// and executing a command. It creates a real Docker container and runs a
+// command. As such, it does not run by default in CI.
+// You can run it manually as follows:
+//
+// CODER_TEST_USE_DOCKER=1 go test -count=1 ./agent -run TestAgent_ReconnectingPTYContainer
+func TestAgent_ReconnectingPTYContainer(t *testing.T) {
+ t.Parallel()
+ if os.Getenv("CODER_TEST_USE_DOCKER") != "1" {
+ t.Skip("Set CODER_TEST_USE_DOCKER=1 to run this test")
+ }
+
+ ctx := testutil.Context(t, testutil.WaitLong)
+
+ pool, err := dockertest.NewPool("")
+ require.NoError(t, err, "Could not connect to docker")
+ ct, err := pool.RunWithOptions(&dockertest.RunOptions{
+ Repository: "busybox",
+ Tag: "latest",
+ Cmd: []string{"sleep", "infnity"},
+ }, func(config *docker.HostConfig) {
+ config.AutoRemove = true
+ config.RestartPolicy = docker.RestartPolicy{Name: "no"}
+ })
+ require.NoError(t, err, "Could not start container")
+ t.Cleanup(func() {
+ err := pool.Purge(ct)
+ require.NoError(t, err, "Could not stop container")
+ })
+ // Wait for container to start
+ require.Eventually(t, func() bool {
+ ct, ok := pool.ContainerByName(ct.Container.Name)
+ return ok && ct.Container.State.Running
+ }, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
+
+ // nolint: dogsled
+ conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
+ o.ExperimentalContainersEnabled = true
+ })
+ ac, err := conn.ReconnectingPTY(ctx, uuid.New(), 80, 80, "/bin/sh", func(arp *workspacesdk.AgentReconnectingPTYInit) {
+ arp.Container = ct.Container.ID
+ })
+ require.NoError(t, err, "failed to create ReconnectingPTY")
+ defer ac.Close()
+ tr := testutil.NewTerminalReader(t, ac)
+
+ require.NoError(t, tr.ReadUntil(ctx, func(line string) bool {
+ return strings.Contains(line, "#") || strings.Contains(line, "$")
+ }), "find prompt")
+
+ require.NoError(t, json.NewEncoder(ac).Encode(workspacesdk.ReconnectingPTYRequest{
+ Data: "hostname\r",
+ }), "write hostname")
+ require.NoError(t, tr.ReadUntil(ctx, func(line string) bool {
+ return strings.Contains(line, "hostname")
+ }), "find hostname command")
+
+ require.NoError(t, tr.ReadUntil(ctx, func(line string) bool {
+ return strings.Contains(line, ct.Container.Config.Hostname)
+ }), "find hostname output")
+ require.NoError(t, json.NewEncoder(ac).Encode(workspacesdk.ReconnectingPTYRequest{
+ Data: "exit\r",
+ }), "write exit command")
+
+ // Wait for the connection to close.
+ require.ErrorIs(t, tr.ReadUntil(ctx, nil), io.EOF)
+}
+
func TestAgent_Dial(t *testing.T) {
t.Parallel()
diff --git a/agent/agentcontainers/containers_dockercli.go b/agent/agentcontainers/containers_dockercli.go
index 64f264c1ba730..27e5f835d5adb 100644
--- a/agent/agentcontainers/containers_dockercli.go
+++ b/agent/agentcontainers/containers_dockercli.go
@@ -6,7 +6,6 @@ import (
"context"
"encoding/json"
"fmt"
- "os"
"os/user"
"slices"
"sort"
@@ -15,6 +14,7 @@ import (
"time"
"github.com/coder/coder/v2/agent/agentexec"
+ "github.com/coder/coder/v2/agent/usershell"
"github.com/coder/coder/v2/codersdk"
"golang.org/x/exp/maps"
@@ -37,6 +37,7 @@ func NewDocker(execer agentexec.Execer) Lister {
// DockerEnvInfoer is an implementation of agentssh.EnvInfoer that returns
// information about a container.
type DockerEnvInfoer struct {
+ usershell.SystemEnvInfo
container string
user *user.User
userShell string
@@ -122,26 +123,13 @@ func EnvInfo(ctx context.Context, execer agentexec.Execer, container, containerU
return &dei, nil
}
-func (dei *DockerEnvInfoer) CurrentUser() (*user.User, error) {
+func (dei *DockerEnvInfoer) User() (*user.User, error) {
// Clone the user so that the caller can't modify it
u := *dei.user
return &u, nil
}
-func (*DockerEnvInfoer) Environ() []string {
- // Return a clone of the environment so that the caller can't modify it
- return os.Environ()
-}
-
-func (*DockerEnvInfoer) UserHomeDir() (string, error) {
- // We default the working directory of the command to the user's home
- // directory. Since this came from inside the container, we cannot guarantee
- // that this exists on the host. Return the "real" home directory of the user
- // instead.
- return os.UserHomeDir()
-}
-
-func (dei *DockerEnvInfoer) UserShell(string) (string, error) {
+func (dei *DockerEnvInfoer) Shell(string) (string, error) {
return dei.userShell, nil
}
diff --git a/agent/agentcontainers/containers_internal_test.go b/agent/agentcontainers/containers_internal_test.go
index cdda03f9c8200..d48b95ebd74a6 100644
--- a/agent/agentcontainers/containers_internal_test.go
+++ b/agent/agentcontainers/containers_internal_test.go
@@ -502,15 +502,15 @@ func TestDockerEnvInfoer(t *testing.T) {
dei, err := EnvInfo(ctx, agentexec.DefaultExecer, ct.Container.ID, tt.containerUser)
require.NoError(t, err, "Expected no error from DockerEnvInfo()")
- u, err := dei.CurrentUser()
+ u, err := dei.User()
require.NoError(t, err, "Expected no error from CurrentUser()")
require.Equal(t, tt.expectedUsername, u.Username, "Expected username to match")
- hd, err := dei.UserHomeDir()
+ hd, err := dei.HomeDir()
require.NoError(t, err, "Expected no error from UserHomeDir()")
require.NotEmpty(t, hd, "Expected user homedir to be non-empty")
- sh, err := dei.UserShell(tt.containerUser)
+ sh, err := dei.Shell(tt.containerUser)
require.NoError(t, err, "Expected no error from UserShell()")
require.Equal(t, tt.expectedUserShell, sh, "Expected user shell to match")
diff --git a/agent/agentssh/agentssh.go b/agent/agentssh/agentssh.go
index a7e028541aa6e..d5fe945c49939 100644
--- a/agent/agentssh/agentssh.go
+++ b/agent/agentssh/agentssh.go
@@ -698,45 +698,6 @@ func (s *Server) sftpHandler(logger slog.Logger, session ssh.Session) {
_ = session.Exit(1)
}
-// EnvInfoer encapsulates external information required by CreateCommand.
-type EnvInfoer interface {
- // CurrentUser returns the current user.
- CurrentUser() (*user.User, error)
- // Environ returns the environment variables of the current process.
- Environ() []string
- // UserHomeDir returns the home directory of the current user.
- UserHomeDir() (string, error)
- // UserShell returns the shell of the given user.
- UserShell(username string) (string, error)
-}
-
-type systemEnvInfoer struct{}
-
-var defaultEnvInfoer EnvInfoer = &systemEnvInfoer{}
-
-// DefaultEnvInfoer returns a default implementation of
-// EnvInfoer. This reads information using the default Go
-// implementations.
-func DefaultEnvInfoer() EnvInfoer {
- return defaultEnvInfoer
-}
-
-func (systemEnvInfoer) CurrentUser() (*user.User, error) {
- return user.Current()
-}
-
-func (systemEnvInfoer) Environ() []string {
- return os.Environ()
-}
-
-func (systemEnvInfoer) UserHomeDir() (string, error) {
- return userHomeDir()
-}
-
-func (systemEnvInfoer) UserShell(username string) (string, error) {
- return usershell.Get(username)
-}
-
// CreateCommand processes raw command input with OpenSSH-like behavior.
// If the script provided is empty, it will default to the users shell.
// This injects environment variables specified by the user at launch too.
@@ -744,17 +705,17 @@ func (systemEnvInfoer) UserShell(username string) (string, error) {
// alternative implementations for the dependencies of CreateCommand.
// This is useful when creating a command to be run in a separate environment
// (for example, a Docker container). Pass in nil to use the default.
-func (s *Server) CreateCommand(ctx context.Context, script string, env []string, deps EnvInfoer) (*pty.Cmd, error) {
- if deps == nil {
- deps = DefaultEnvInfoer()
+func (s *Server) CreateCommand(ctx context.Context, script string, env []string, ei usershell.EnvInfoer) (*pty.Cmd, error) {
+ if ei == nil {
+ ei = &usershell.SystemEnvInfo{}
}
- currentUser, err := deps.CurrentUser()
+ currentUser, err := ei.User()
if err != nil {
return nil, xerrors.Errorf("get current user: %w", err)
}
username := currentUser.Username
- shell, err := deps.UserShell(username)
+ shell, err := ei.Shell(username)
if err != nil {
return nil, xerrors.Errorf("get user shell: %w", err)
}
@@ -802,7 +763,18 @@ func (s *Server) CreateCommand(ctx context.Context, script string, env []string,
}
}
- cmd := s.Execer.PTYCommandContext(ctx, name, args...)
+ // Modify command prior to execution. This will usually be a no-op, but not
+ // always. For example, to run a command in a Docker container, we need to
+ // modify the command to be `docker exec -it `.
+ modifiedName, modifiedArgs := ei.ModifyCommand(name, args...)
+ // Log if the command was modified.
+ if modifiedName != name && slices.Compare(modifiedArgs, args) != 0 {
+ s.logger.Debug(ctx, "modified command",
+ slog.F("before", append([]string{name}, args...)),
+ slog.F("after", append([]string{modifiedName}, modifiedArgs...)),
+ )
+ }
+ cmd := s.Execer.PTYCommandContext(ctx, modifiedName, modifiedArgs...)
cmd.Dir = s.config.WorkingDirectory()
// If the metadata directory doesn't exist, we run the command
@@ -810,13 +782,13 @@ func (s *Server) CreateCommand(ctx context.Context, script string, env []string,
_, err = os.Stat(cmd.Dir)
if cmd.Dir == "" || err != nil {
// Default to user home if a directory is not set.
- homedir, err := deps.UserHomeDir()
+ homedir, err := ei.HomeDir()
if err != nil {
return nil, xerrors.Errorf("get home dir: %w", err)
}
cmd.Dir = homedir
}
- cmd.Env = append(deps.Environ(), env...)
+ cmd.Env = append(ei.Environ(), env...)
cmd.Env = append(cmd.Env, fmt.Sprintf("USER=%s", username))
// Set SSH connection environment variables (these are also set by OpenSSH
diff --git a/agent/agentssh/agentssh_test.go b/agent/agentssh/agentssh_test.go
index 378657ebee5ad..6b0706e95db44 100644
--- a/agent/agentssh/agentssh_test.go
+++ b/agent/agentssh/agentssh_test.go
@@ -124,7 +124,7 @@ type fakeEnvInfoer struct {
UserShellFn func(string) (string, error)
}
-func (f *fakeEnvInfoer) CurrentUser() (u *user.User, err error) {
+func (f *fakeEnvInfoer) User() (u *user.User, err error) {
return f.CurrentUserFn()
}
@@ -132,14 +132,18 @@ func (f *fakeEnvInfoer) Environ() []string {
return f.EnvironFn()
}
-func (f *fakeEnvInfoer) UserHomeDir() (string, error) {
+func (f *fakeEnvInfoer) HomeDir() (string, error) {
return f.UserHomeDirFn()
}
-func (f *fakeEnvInfoer) UserShell(u string) (string, error) {
+func (f *fakeEnvInfoer) Shell(u string) (string, error) {
return f.UserShellFn(u)
}
+func (*fakeEnvInfoer) ModifyCommand(cmd string, args ...string) (string, []string) {
+ return cmd, args
+}
+
func TestNewServer_CloseActiveConnections(t *testing.T) {
t.Parallel()
diff --git a/agent/reconnectingpty/server.go b/agent/reconnectingpty/server.go
index 465667c616180..ab4ce854c789c 100644
--- a/agent/reconnectingpty/server.go
+++ b/agent/reconnectingpty/server.go
@@ -14,7 +14,9 @@ import (
"golang.org/x/xerrors"
"cdr.dev/slog"
+ "github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/agent/agentssh"
+ "github.com/coder/coder/v2/agent/usershell"
"github.com/coder/coder/v2/codersdk/workspacesdk"
)
@@ -26,20 +28,26 @@ type Server struct {
connCount atomic.Int64
reconnectingPTYs sync.Map
timeout time.Duration
+
+ ExperimentalContainersEnabled bool
}
// NewServer returns a new ReconnectingPTY server
func NewServer(logger slog.Logger, commandCreator *agentssh.Server,
connectionsTotal prometheus.Counter, errorsTotal *prometheus.CounterVec,
- timeout time.Duration,
+ timeout time.Duration, opts ...func(*Server),
) *Server {
- return &Server{
+ s := &Server{
logger: logger,
commandCreator: commandCreator,
connectionsTotal: connectionsTotal,
errorsTotal: errorsTotal,
timeout: timeout,
}
+ for _, o := range opts {
+ o(s)
+ }
+ return s
}
func (s *Server) Serve(ctx, hardCtx context.Context, l net.Listener) (retErr error) {
@@ -116,7 +124,7 @@ func (s *Server) handleConn(ctx context.Context, logger slog.Logger, conn net.Co
}
connectionID := uuid.NewString()
- connLogger := logger.With(slog.F("message_id", msg.ID), slog.F("connection_id", connectionID))
+ connLogger := logger.With(slog.F("message_id", msg.ID), slog.F("connection_id", connectionID), slog.F("container", msg.Container), slog.F("container_user", msg.ContainerUser))
connLogger.Debug(ctx, "starting handler")
defer func() {
@@ -158,8 +166,17 @@ func (s *Server) handleConn(ctx context.Context, logger slog.Logger, conn net.Co
}
}()
+ var ei usershell.EnvInfoer
+ if s.ExperimentalContainersEnabled && msg.Container != "" {
+ dei, err := agentcontainers.EnvInfo(ctx, s.commandCreator.Execer, msg.Container, msg.ContainerUser)
+ if err != nil {
+ return xerrors.Errorf("get container env info: %w", err)
+ }
+ ei = dei
+ s.logger.Info(ctx, "got container env info", slog.F("container", msg.Container))
+ }
// Empty command will default to the users shell!
- cmd, err := s.commandCreator.CreateCommand(ctx, msg.Command, nil, nil)
+ cmd, err := s.commandCreator.CreateCommand(ctx, msg.Command, nil, ei)
if err != nil {
s.errorsTotal.WithLabelValues("create_command").Add(1)
return xerrors.Errorf("create command: %w", err)
diff --git a/agent/usershell/usershell.go b/agent/usershell/usershell.go
new file mode 100644
index 0000000000000..9400dc91679da
--- /dev/null
+++ b/agent/usershell/usershell.go
@@ -0,0 +1,66 @@
+package usershell
+
+import (
+ "os"
+ "os/user"
+
+ "golang.org/x/xerrors"
+)
+
+// HomeDir returns the home directory of the current user, giving
+// priority to the $HOME environment variable.
+// Deprecated: use EnvInfoer.HomeDir() instead.
+func HomeDir() (string, error) {
+ // First we check the environment.
+ homedir, err := os.UserHomeDir()
+ if err == nil {
+ return homedir, nil
+ }
+
+ // As a fallback, we try the user information.
+ u, err := user.Current()
+ if err != nil {
+ return "", xerrors.Errorf("current user: %w", err)
+ }
+ return u.HomeDir, nil
+}
+
+// EnvInfoer encapsulates external information about the environment.
+type EnvInfoer interface {
+ // User returns the current user.
+ User() (*user.User, error)
+ // Environ returns the environment variables of the current process.
+ Environ() []string
+ // HomeDir returns the home directory of the current user.
+ HomeDir() (string, error)
+ // Shell returns the shell of the given user.
+ Shell(username string) (string, error)
+ // ModifyCommand modifies the command and arguments before execution based on
+ // the environment. This is useful for executing a command inside a container.
+ // In the default case, the command and arguments are returned unchanged.
+ ModifyCommand(name string, args ...string) (string, []string)
+}
+
+// SystemEnvInfo encapsulates the information about the environment
+// just using the default Go implementations.
+type SystemEnvInfo struct{}
+
+func (SystemEnvInfo) User() (*user.User, error) {
+ return user.Current()
+}
+
+func (SystemEnvInfo) Environ() []string {
+ return os.Environ()
+}
+
+func (SystemEnvInfo) HomeDir() (string, error) {
+ return HomeDir()
+}
+
+func (SystemEnvInfo) Shell(username string) (string, error) {
+ return Get(username)
+}
+
+func (SystemEnvInfo) ModifyCommand(name string, args ...string) (string, []string) {
+ return name, args
+}
diff --git a/agent/usershell/usershell_darwin.go b/agent/usershell/usershell_darwin.go
index 0f5be08f82631..5f221bc43ed39 100644
--- a/agent/usershell/usershell_darwin.go
+++ b/agent/usershell/usershell_darwin.go
@@ -10,6 +10,7 @@ import (
)
// Get returns the $SHELL environment variable.
+// Deprecated: use SystemEnvInfo.UserShell instead.
func Get(username string) (string, error) {
// This command will output "UserShell: /bin/zsh" if successful, we
// can ignore the error since we have fallback behavior.
diff --git a/agent/usershell/usershell_other.go b/agent/usershell/usershell_other.go
index d015b7ebf4111..6ee3ad2368faf 100644
--- a/agent/usershell/usershell_other.go
+++ b/agent/usershell/usershell_other.go
@@ -11,6 +11,7 @@ import (
)
// Get returns the /etc/passwd entry for the username provided.
+// Deprecated: use SystemEnvInfo.UserShell instead.
func Get(username string) (string, error) {
contents, err := os.ReadFile("/etc/passwd")
if err != nil {
diff --git a/agent/usershell/usershell_windows.go b/agent/usershell/usershell_windows.go
index e12537bf3a99f..52823d900de99 100644
--- a/agent/usershell/usershell_windows.go
+++ b/agent/usershell/usershell_windows.go
@@ -3,6 +3,7 @@ package usershell
import "os/exec"
// Get returns the command prompt binary name.
+// Deprecated: use SystemEnvInfo.UserShell instead.
func Get(username string) (string, error) {
_, err := exec.LookPath("pwsh.exe")
if err == nil {
diff --git a/cli/agent.go b/cli/agent.go
index e8a46a84e071c..01d6c36f7a045 100644
--- a/cli/agent.go
+++ b/cli/agent.go
@@ -351,6 +351,8 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
BlockFileTransfer: blockFileTransfer,
Execer: execer,
ContainerLister: containerLister,
+
+ ExperimentalContainersEnabled: devcontainersEnabled,
})
promHandler := agent.PrometheusMetricsHandler(prometheusRegistry, logger)
diff --git a/coderd/workspaceapps/proxy.go b/coderd/workspaceapps/proxy.go
index 04c3dec0c6c0d..ab67e6c260349 100644
--- a/coderd/workspaceapps/proxy.go
+++ b/coderd/workspaceapps/proxy.go
@@ -653,6 +653,8 @@ func (s *Server) workspaceAgentPTY(rw http.ResponseWriter, r *http.Request) {
reconnect := parser.RequiredNotEmpty("reconnect").UUID(values, uuid.New(), "reconnect")
height := parser.UInt(values, 80, "height")
width := parser.UInt(values, 80, "width")
+ container := parser.String(values, "", "container")
+ containerUser := parser.String(values, "", "container_user")
if len(parser.Errors) > 0 {
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
Message: "Invalid query parameters.",
@@ -690,7 +692,10 @@ func (s *Server) workspaceAgentPTY(rw http.ResponseWriter, r *http.Request) {
}
defer release()
log.Debug(ctx, "dialed workspace agent")
- ptNetConn, err := agentConn.ReconnectingPTY(ctx, reconnect, uint16(height), uint16(width), r.URL.Query().Get("command"))
+ ptNetConn, err := agentConn.ReconnectingPTY(ctx, reconnect, uint16(height), uint16(width), r.URL.Query().Get("command"), func(arp *workspacesdk.AgentReconnectingPTYInit) {
+ arp.Container = container
+ arp.ContainerUser = containerUser
+ })
if err != nil {
log.Debug(ctx, "dial reconnecting pty server in workspace agent", slog.Error(err))
_ = conn.Close(websocket.StatusInternalError, httpapi.WebsocketCloseSprintf("dial: %s", err))
diff --git a/codersdk/workspacesdk/agentconn.go b/codersdk/workspacesdk/agentconn.go
index f803f8736a6fa..6fa06c0ab5bd6 100644
--- a/codersdk/workspacesdk/agentconn.go
+++ b/codersdk/workspacesdk/agentconn.go
@@ -93,6 +93,24 @@ type AgentReconnectingPTYInit struct {
Height uint16
Width uint16
Command string
+ // Container, if set, will attempt to exec into a running container visible to the agent.
+ // This should be a unique container ID (implementation-dependent).
+ Container string
+ // ContainerUser, if set, will set the target user when execing into a container.
+ // This can be a username or UID, depending on the underlying implementation.
+ // This is ignored if Container is not set.
+ ContainerUser string
+}
+
+// AgentReconnectingPTYInitOption is a functional option for AgentReconnectingPTYInit.
+type AgentReconnectingPTYInitOption func(*AgentReconnectingPTYInit)
+
+// AgentReconnectingPTYInitWithContainer sets the container and container user for the reconnecting PTY session.
+func AgentReconnectingPTYInitWithContainer(container, containerUser string) AgentReconnectingPTYInitOption {
+ return func(init *AgentReconnectingPTYInit) {
+ init.Container = container
+ init.ContainerUser = containerUser
+ }
}
// ReconnectingPTYRequest is sent from the client to the server
@@ -107,7 +125,7 @@ type ReconnectingPTYRequest struct {
// ReconnectingPTY spawns a new reconnecting terminal session.
// `ReconnectingPTYRequest` should be JSON marshaled and written to the returned net.Conn.
// Raw terminal output will be read from the returned net.Conn.
-func (c *AgentConn) ReconnectingPTY(ctx context.Context, id uuid.UUID, height, width uint16, command string) (net.Conn, error) {
+func (c *AgentConn) ReconnectingPTY(ctx context.Context, id uuid.UUID, height, width uint16, command string, initOpts ...AgentReconnectingPTYInitOption) (net.Conn, error) {
ctx, span := tracing.StartSpan(ctx)
defer span.End()
@@ -119,12 +137,16 @@ func (c *AgentConn) ReconnectingPTY(ctx context.Context, id uuid.UUID, height, w
if err != nil {
return nil, err
}
- data, err := json.Marshal(AgentReconnectingPTYInit{
+ rptyInit := AgentReconnectingPTYInit{
ID: id,
Height: height,
Width: width,
Command: command,
- })
+ }
+ for _, o := range initOpts {
+ o(&rptyInit)
+ }
+ data, err := json.Marshal(rptyInit)
if err != nil {
_ = conn.Close()
return nil, err
diff --git a/codersdk/workspacesdk/workspacesdk.go b/codersdk/workspacesdk/workspacesdk.go
index 17b22a363d6a0..9f50622635568 100644
--- a/codersdk/workspacesdk/workspacesdk.go
+++ b/codersdk/workspacesdk/workspacesdk.go
@@ -12,12 +12,14 @@ import (
"strconv"
"strings"
- "github.com/google/uuid"
- "golang.org/x/xerrors"
"tailscale.com/tailcfg"
"tailscale.com/wgengine/capture"
+ "github.com/google/uuid"
+ "golang.org/x/xerrors"
+
"cdr.dev/slog"
+
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/proto"
@@ -305,6 +307,16 @@ type WorkspaceAgentReconnectingPTYOpts struct {
// issue-reconnecting-pty-signed-token endpoint. If set, the session token
// on the client will not be sent.
SignedToken string
+
+ // Experimental: Container, if set, will attempt to exec into a running container
+ // visible to the agent. This should be a unique container ID
+ // (implementation-dependent).
+ // ContainerUser is the user as which to exec into the container.
+ // NOTE: This feature is currently experimental and is currently "opt-in".
+ // In order to use this feature, the agent must have the environment variable
+ // CODER_AGENT_DEVCONTAINERS_ENABLE set to "true".
+ Container string
+ ContainerUser string
}
// AgentReconnectingPTY spawns a PTY that reconnects using the token provided.
@@ -320,6 +332,12 @@ func (c *Client) AgentReconnectingPTY(ctx context.Context, opts WorkspaceAgentRe
q.Set("width", strconv.Itoa(int(opts.Width)))
q.Set("height", strconv.Itoa(int(opts.Height)))
q.Set("command", opts.Command)
+ if opts.Container != "" {
+ q.Set("container", opts.Container)
+ }
+ if opts.ContainerUser != "" {
+ q.Set("container_user", opts.ContainerUser)
+ }
// If we're using a signed token, set the query parameter.
if opts.SignedToken != "" {
q.Set(codersdk.SignedAppTokenQueryParameter, opts.SignedToken)
From 38c0e8a086bdd977d5cad908b446f79c99cdcc68 Mon Sep 17 00:00:00 2001
From: Thomas Kosiewski
Date: Wed, 26 Feb 2025 11:45:35 +0100
Subject: [PATCH 02/44] fix(agent/agentssh): ensure RSA key generation always
produces valid keys (#16694)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Modify the RSA key generation algorithm to check that GCD(e, p-1) = 1 and
GCD(e, q-1) = 1 when selecting prime numbers, ensuring that e and φ(n)
are coprime. This prevents ModInverse from returning nil, which would
cause private key generation to fail and result in a panic when `Precompute` is called.
Change-Id: I0a453e1e1f8c638e40e7a4b87a6d0d7299e1cb5d
Signed-off-by: Thomas Kosiewski
---
agent/agentrsa/key.go | 87 ++++++++++++++++++++++++++++++++++++++
agent/agentrsa/key_test.go | 50 ++++++++++++++++++++++
agent/agentssh/agentssh.go | 74 +-------------------------------
3 files changed, 139 insertions(+), 72 deletions(-)
create mode 100644 agent/agentrsa/key.go
create mode 100644 agent/agentrsa/key_test.go
diff --git a/agent/agentrsa/key.go b/agent/agentrsa/key.go
new file mode 100644
index 0000000000000..fd70d0b7bfa9e
--- /dev/null
+++ b/agent/agentrsa/key.go
@@ -0,0 +1,87 @@
+package agentrsa
+
+import (
+ "crypto/rsa"
+ "math/big"
+ "math/rand"
+)
+
+// GenerateDeterministicKey generates an RSA private key deterministically based on the provided seed.
+// This function uses a deterministic random source to generate the primes p and q, ensuring that the
+// same seed will always produce the same private key. The generated key is 2048 bits in size.
+//
+// Reference: https://pkg.go.dev/crypto/rsa#GenerateKey
+func GenerateDeterministicKey(seed int64) *rsa.PrivateKey {
+ // Since the standard lib purposefully does not generate
+ // deterministic rsa keys, we need to do it ourselves.
+
+ // Create deterministic random source
+ // nolint: gosec
+ deterministicRand := rand.New(rand.NewSource(seed))
+
+ // Use fixed values for p and q based on the seed
+ p := big.NewInt(0)
+ q := big.NewInt(0)
+ e := big.NewInt(65537) // Standard RSA public exponent
+
+ for {
+ // Generate deterministic primes using the seeded random
+ // Each prime should be ~1024 bits to get a 2048-bit key
+ for {
+ p.SetBit(p, 1024, 1) // Ensure it's large enough
+ for i := range 1024 {
+ if deterministicRand.Int63()%2 == 1 {
+ p.SetBit(p, i, 1)
+ } else {
+ p.SetBit(p, i, 0)
+ }
+ }
+ p1 := new(big.Int).Sub(p, big.NewInt(1))
+ if p.ProbablyPrime(20) && new(big.Int).GCD(nil, nil, e, p1).Cmp(big.NewInt(1)) == 0 {
+ break
+ }
+ }
+
+ for {
+ q.SetBit(q, 1024, 1) // Ensure it's large enough
+ for i := range 1024 {
+ if deterministicRand.Int63()%2 == 1 {
+ q.SetBit(q, i, 1)
+ } else {
+ q.SetBit(q, i, 0)
+ }
+ }
+ q1 := new(big.Int).Sub(q, big.NewInt(1))
+ if q.ProbablyPrime(20) && p.Cmp(q) != 0 && new(big.Int).GCD(nil, nil, e, q1).Cmp(big.NewInt(1)) == 0 {
+ break
+ }
+ }
+
+ // Calculate phi = (p-1) * (q-1)
+ p1 := new(big.Int).Sub(p, big.NewInt(1))
+ q1 := new(big.Int).Sub(q, big.NewInt(1))
+ phi := new(big.Int).Mul(p1, q1)
+
+ // Calculate private exponent d
+ d := new(big.Int).ModInverse(e, phi)
+ if d != nil {
+ // Calculate n = p * q
+ n := new(big.Int).Mul(p, q)
+
+ // Create the private key
+ privateKey := &rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ N: n,
+ E: int(e.Int64()),
+ },
+ D: d,
+ Primes: []*big.Int{p, q},
+ }
+
+ // Compute precomputed values
+ privateKey.Precompute()
+
+ return privateKey
+ }
+ }
+}
diff --git a/agent/agentrsa/key_test.go b/agent/agentrsa/key_test.go
new file mode 100644
index 0000000000000..dc561d09d4e07
--- /dev/null
+++ b/agent/agentrsa/key_test.go
@@ -0,0 +1,50 @@
+package agentrsa_test
+
+import (
+ "crypto/rsa"
+ "math/rand/v2"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/coder/coder/v2/agent/agentrsa"
+)
+
+func TestGenerateDeterministicKey(t *testing.T) {
+ t.Parallel()
+
+ key1 := agentrsa.GenerateDeterministicKey(1234)
+ key2 := agentrsa.GenerateDeterministicKey(1234)
+
+ assert.Equal(t, key1, key2)
+ assert.EqualExportedValues(t, key1, key2)
+}
+
+var result *rsa.PrivateKey
+
+func BenchmarkGenerateDeterministicKey(b *testing.B) {
+ var r *rsa.PrivateKey
+
+ for range b.N {
+ // always record the result of DeterministicPrivateKey to prevent
+ // the compiler eliminating the function call.
+ r = agentrsa.GenerateDeterministicKey(rand.Int64())
+ }
+
+ // always store the result to a package level variable
+ // so the compiler cannot eliminate the Benchmark itself.
+ result = r
+}
+
+func FuzzGenerateDeterministicKey(f *testing.F) {
+ testcases := []int64{0, 1234, 1010101010}
+ for _, tc := range testcases {
+ f.Add(tc) // Use f.Add to provide a seed corpus
+ }
+ f.Fuzz(func(t *testing.T, seed int64) {
+ key1 := agentrsa.GenerateDeterministicKey(seed)
+ key2 := agentrsa.GenerateDeterministicKey(seed)
+ assert.Equal(t, key1, key2)
+ assert.EqualExportedValues(t, key1, key2)
+ })
+}
diff --git a/agent/agentssh/agentssh.go b/agent/agentssh/agentssh.go
index d5fe945c49939..3b09df0e388dd 100644
--- a/agent/agentssh/agentssh.go
+++ b/agent/agentssh/agentssh.go
@@ -3,12 +3,9 @@ package agentssh
import (
"bufio"
"context"
- "crypto/rsa"
"errors"
"fmt"
"io"
- "math/big"
- "math/rand"
"net"
"os"
"os/exec"
@@ -33,6 +30,7 @@ import (
"cdr.dev/slog"
"github.com/coder/coder/v2/agent/agentexec"
+ "github.com/coder/coder/v2/agent/agentrsa"
"github.com/coder/coder/v2/agent/usershell"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/pty"
@@ -1092,75 +1090,7 @@ func CoderSigner(seed int64) (gossh.Signer, error) {
// Clients should ignore the host key when connecting.
// The agent needs to authenticate with coderd to SSH,
// so SSH authentication doesn't improve security.
-
- // Since the standard lib purposefully does not generate
- // deterministic rsa keys, we need to do it ourselves.
- coderHostKey := func() *rsa.PrivateKey {
- // Create deterministic random source
- // nolint: gosec
- deterministicRand := rand.New(rand.NewSource(seed))
-
- // Use fixed values for p and q based on the seed
- p := big.NewInt(0)
- q := big.NewInt(0)
- e := big.NewInt(65537) // Standard RSA public exponent
-
- // Generate deterministic primes using the seeded random
- // Each prime should be ~1024 bits to get a 2048-bit key
- for {
- p.SetBit(p, 1024, 1) // Ensure it's large enough
- for i := 0; i < 1024; i++ {
- if deterministicRand.Int63()%2 == 1 {
- p.SetBit(p, i, 1)
- } else {
- p.SetBit(p, i, 0)
- }
- }
- if p.ProbablyPrime(20) {
- break
- }
- }
-
- for {
- q.SetBit(q, 1024, 1) // Ensure it's large enough
- for i := 0; i < 1024; i++ {
- if deterministicRand.Int63()%2 == 1 {
- q.SetBit(q, i, 1)
- } else {
- q.SetBit(q, i, 0)
- }
- }
- if q.ProbablyPrime(20) && p.Cmp(q) != 0 {
- break
- }
- }
-
- // Calculate n = p * q
- n := new(big.Int).Mul(p, q)
-
- // Calculate phi = (p-1) * (q-1)
- p1 := new(big.Int).Sub(p, big.NewInt(1))
- q1 := new(big.Int).Sub(q, big.NewInt(1))
- phi := new(big.Int).Mul(p1, q1)
-
- // Calculate private exponent d
- d := new(big.Int).ModInverse(e, phi)
-
- // Create the private key
- privateKey := &rsa.PrivateKey{
- PublicKey: rsa.PublicKey{
- N: n,
- E: int(e.Int64()),
- },
- D: d,
- Primes: []*big.Int{p, q},
- }
-
- // Compute precomputed values
- privateKey.Precompute()
-
- return privateKey
- }()
+ coderHostKey := agentrsa.GenerateDeterministicKey(seed)
coderSigner, err := gossh.NewSignerFromKey(coderHostKey)
return coderSigner, err
From c5a265fbc3316b56d3b179067dd55692222aba25 Mon Sep 17 00:00:00 2001
From: Cian Johnston
Date: Wed, 26 Feb 2025 12:32:57 +0000
Subject: [PATCH 03/44] feat(cli): add experimental rpty command (#16700)
Relates to https://github.com/coder/coder/issues/16419
Builds upon https://github.com/coder/coder/pull/16638 and adds a command
`exp rpty` that allows you to open a ReconnectingPTY session to an
agent.
This ultimately allows us to add an integration-style CLI test to verify
the functionality added in #16638 .
---
cli/dotfiles_test.go | 4 +
cli/exp.go | 1 +
cli/{errors.go => exp_errors.go} | 0
cli/{errors_test.go => exp_errors_test.go} | 0
cli/{prompts.go => exp_prompts.go} | 0
cli/exp_rpty.go | 216 +++++++++++++++++++++
cli/exp_rpty_test.go | 112 +++++++++++
7 files changed, 333 insertions(+)
rename cli/{errors.go => exp_errors.go} (100%)
rename cli/{errors_test.go => exp_errors_test.go} (100%)
rename cli/{prompts.go => exp_prompts.go} (100%)
create mode 100644 cli/exp_rpty.go
create mode 100644 cli/exp_rpty_test.go
diff --git a/cli/dotfiles_test.go b/cli/dotfiles_test.go
index 2f16929cc24ff..002f001e04574 100644
--- a/cli/dotfiles_test.go
+++ b/cli/dotfiles_test.go
@@ -17,6 +17,10 @@ import (
func TestDotfiles(t *testing.T) {
t.Parallel()
+ // This test will time out if the user has commit signing enabled.
+ if _, gpgTTYFound := os.LookupEnv("GPG_TTY"); gpgTTYFound {
+ t.Skip("GPG_TTY is set, skipping test to avoid hanging")
+ }
t.Run("MissingArg", func(t *testing.T) {
t.Parallel()
inv, _ := clitest.New(t, "dotfiles")
diff --git a/cli/exp.go b/cli/exp.go
index 5c72d0f9fcd20..2339da86313a6 100644
--- a/cli/exp.go
+++ b/cli/exp.go
@@ -14,6 +14,7 @@ func (r *RootCmd) expCmd() *serpent.Command {
r.scaletestCmd(),
r.errorExample(),
r.promptExample(),
+ r.rptyCommand(),
},
}
return cmd
diff --git a/cli/errors.go b/cli/exp_errors.go
similarity index 100%
rename from cli/errors.go
rename to cli/exp_errors.go
diff --git a/cli/errors_test.go b/cli/exp_errors_test.go
similarity index 100%
rename from cli/errors_test.go
rename to cli/exp_errors_test.go
diff --git a/cli/prompts.go b/cli/exp_prompts.go
similarity index 100%
rename from cli/prompts.go
rename to cli/exp_prompts.go
diff --git a/cli/exp_rpty.go b/cli/exp_rpty.go
new file mode 100644
index 0000000000000..ddfdc15ece58d
--- /dev/null
+++ b/cli/exp_rpty.go
@@ -0,0 +1,216 @@
+package cli
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/mattn/go-isatty"
+ "golang.org/x/term"
+ "golang.org/x/xerrors"
+
+ "github.com/coder/coder/v2/cli/cliui"
+ "github.com/coder/coder/v2/codersdk"
+ "github.com/coder/coder/v2/codersdk/workspacesdk"
+ "github.com/coder/coder/v2/pty"
+ "github.com/coder/serpent"
+)
+
+func (r *RootCmd) rptyCommand() *serpent.Command {
+ var (
+ client = new(codersdk.Client)
+ args handleRPTYArgs
+ )
+
+ cmd := &serpent.Command{
+ Handler: func(inv *serpent.Invocation) error {
+ if r.disableDirect {
+ return xerrors.New("direct connections are disabled, but you can try websocat ;-)")
+ }
+ args.NamedWorkspace = inv.Args[0]
+ args.Command = inv.Args[1:]
+ return handleRPTY(inv, client, args)
+ },
+ Long: "Establish an RPTY session with a workspace/agent. This uses the same mechanism as the Web Terminal.",
+ Middleware: serpent.Chain(
+ serpent.RequireRangeArgs(1, -1),
+ r.InitClient(client),
+ ),
+ Options: []serpent.Option{
+ {
+ Name: "container",
+ Description: "The container name or ID to connect to.",
+ Flag: "container",
+ FlagShorthand: "c",
+ Default: "",
+ Value: serpent.StringOf(&args.Container),
+ },
+ {
+ Name: "container-user",
+ Description: "The user to connect as.",
+ Flag: "container-user",
+ FlagShorthand: "u",
+ Default: "",
+ Value: serpent.StringOf(&args.ContainerUser),
+ },
+ {
+ Name: "reconnect",
+ Description: "The reconnect ID to use.",
+ Flag: "reconnect",
+ FlagShorthand: "r",
+ Default: "",
+ Value: serpent.StringOf(&args.ReconnectID),
+ },
+ },
+ Short: "Establish an RPTY session with a workspace/agent.",
+ Use: "rpty",
+ }
+
+ return cmd
+}
+
+type handleRPTYArgs struct {
+ Command []string
+ Container string
+ ContainerUser string
+ NamedWorkspace string
+ ReconnectID string
+}
+
+func handleRPTY(inv *serpent.Invocation, client *codersdk.Client, args handleRPTYArgs) error {
+ ctx, cancel := context.WithCancel(inv.Context())
+ defer cancel()
+
+ var reconnectID uuid.UUID
+ if args.ReconnectID != "" {
+ rid, err := uuid.Parse(args.ReconnectID)
+ if err != nil {
+ return xerrors.Errorf("invalid reconnect ID: %w", err)
+ }
+ reconnectID = rid
+ } else {
+ reconnectID = uuid.New()
+ }
+ ws, agt, err := getWorkspaceAndAgent(ctx, inv, client, true, args.NamedWorkspace)
+ if err != nil {
+ return err
+ }
+
+ var ctID string
+ if args.Container != "" {
+ cts, err := client.WorkspaceAgentListContainers(ctx, agt.ID, nil)
+ if err != nil {
+ return err
+ }
+ for _, ct := range cts.Containers {
+ if ct.FriendlyName == args.Container || ct.ID == args.Container {
+ ctID = ct.ID
+ break
+ }
+ }
+ if ctID == "" {
+ return xerrors.Errorf("container %q not found", args.Container)
+ }
+ }
+
+ if err := cliui.Agent(ctx, inv.Stderr, agt.ID, cliui.AgentOptions{
+ FetchInterval: 0,
+ Fetch: client.WorkspaceAgent,
+ Wait: false,
+ }); err != nil {
+ return err
+ }
+
+ // Get the width and height of the terminal.
+ var termWidth, termHeight uint16
+ stdoutFile, validOut := inv.Stdout.(*os.File)
+ if validOut && isatty.IsTerminal(stdoutFile.Fd()) {
+ w, h, err := term.GetSize(int(stdoutFile.Fd()))
+ if err == nil {
+ //nolint: gosec
+ termWidth, termHeight = uint16(w), uint16(h)
+ }
+ }
+
+ // Set stdin to raw mode so that control characters work.
+ stdinFile, validIn := inv.Stdin.(*os.File)
+ if validIn && isatty.IsTerminal(stdinFile.Fd()) {
+ inState, err := pty.MakeInputRaw(stdinFile.Fd())
+ if err != nil {
+ return xerrors.Errorf("failed to set input terminal to raw mode: %w", err)
+ }
+ defer func() {
+ _ = pty.RestoreTerminal(stdinFile.Fd(), inState)
+ }()
+ }
+
+ conn, err := workspacesdk.New(client).AgentReconnectingPTY(ctx, workspacesdk.WorkspaceAgentReconnectingPTYOpts{
+ AgentID: agt.ID,
+ Reconnect: reconnectID,
+ Command: strings.Join(args.Command, " "),
+ Container: ctID,
+ ContainerUser: args.ContainerUser,
+ Width: termWidth,
+ Height: termHeight,
+ })
+ if err != nil {
+ return xerrors.Errorf("open reconnecting PTY: %w", err)
+ }
+ defer conn.Close()
+
+ cliui.Infof(inv.Stderr, "Connected to %s (agent id: %s)", args.NamedWorkspace, agt.ID)
+ cliui.Infof(inv.Stderr, "Reconnect ID: %s", reconnectID)
+ closeUsage := client.UpdateWorkspaceUsageWithBodyContext(ctx, ws.ID, codersdk.PostWorkspaceUsageRequest{
+ AgentID: agt.ID,
+ AppName: codersdk.UsageAppNameReconnectingPty,
+ })
+ defer closeUsage()
+
+ br := bufio.NewScanner(inv.Stdin)
+ // Split on bytes, otherwise you have to send a newline to flush the buffer.
+ br.Split(bufio.ScanBytes)
+ je := json.NewEncoder(conn)
+
+ go func() {
+ for br.Scan() {
+ if err := je.Encode(map[string]string{
+ "data": br.Text(),
+ }); err != nil {
+ return
+ }
+ }
+ }()
+
+ windowChange := listenWindowSize(ctx)
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-windowChange:
+ }
+ width, height, err := term.GetSize(int(stdoutFile.Fd()))
+ if err != nil {
+ continue
+ }
+ if err := je.Encode(map[string]int{
+ "width": width,
+ "height": height,
+ }); err != nil {
+ cliui.Errorf(inv.Stderr, "Failed to send window size: %v", err)
+ }
+ }
+ }()
+
+ _, _ = io.Copy(inv.Stdout, conn)
+ cancel()
+ _ = conn.Close()
+ _, _ = fmt.Fprintf(inv.Stderr, "Connection closed\n")
+
+ return nil
+}
diff --git a/cli/exp_rpty_test.go b/cli/exp_rpty_test.go
new file mode 100644
index 0000000000000..2f0a24bf1cf41
--- /dev/null
+++ b/cli/exp_rpty_test.go
@@ -0,0 +1,112 @@
+package cli_test
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+
+ "github.com/ory/dockertest/v3"
+ "github.com/ory/dockertest/v3/docker"
+
+ "github.com/coder/coder/v2/agent"
+ "github.com/coder/coder/v2/agent/agenttest"
+ "github.com/coder/coder/v2/cli/clitest"
+ "github.com/coder/coder/v2/coderd/coderdtest"
+ "github.com/coder/coder/v2/pty/ptytest"
+ "github.com/coder/coder/v2/testutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestExpRpty(t *testing.T) {
+ t.Parallel()
+
+ t.Run("OK", func(t *testing.T) {
+ t.Parallel()
+
+ client, workspace, agentToken := setupWorkspaceForAgent(t)
+ inv, root := clitest.New(t, "exp", "rpty", workspace.Name)
+ clitest.SetupConfig(t, client, root)
+ pty := ptytest.New(t).Attach(inv)
+
+ ctx := testutil.Context(t, testutil.WaitLong)
+
+ cmdDone := tGo(t, func() {
+ err := inv.WithContext(ctx).Run()
+ assert.NoError(t, err)
+ })
+
+ _ = agenttest.New(t, client.URL, agentToken)
+ _ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
+
+ pty.ExpectMatch(fmt.Sprintf("Connected to %s", workspace.Name))
+ pty.WriteLine("exit")
+ <-cmdDone
+ })
+
+ t.Run("NotFound", func(t *testing.T) {
+ t.Parallel()
+
+ client, _, _ := setupWorkspaceForAgent(t)
+ inv, root := clitest.New(t, "exp", "rpty", "not-found")
+ clitest.SetupConfig(t, client, root)
+
+ ctx := testutil.Context(t, testutil.WaitShort)
+ err := inv.WithContext(ctx).Run()
+ require.ErrorContains(t, err, "not found")
+ })
+
+ t.Run("Container", func(t *testing.T) {
+ t.Parallel()
+ // Skip this test on non-Linux platforms since it requires Docker
+ if runtime.GOOS != "linux" {
+ t.Skip("Skipping test on non-Linux platform")
+ }
+
+ client, workspace, agentToken := setupWorkspaceForAgent(t)
+ ctx := testutil.Context(t, testutil.WaitLong)
+ pool, err := dockertest.NewPool("")
+ require.NoError(t, err, "Could not connect to docker")
+ ct, err := pool.RunWithOptions(&dockertest.RunOptions{
+ Repository: "busybox",
+ Tag: "latest",
+ Cmd: []string{"sleep", "infnity"},
+ }, func(config *docker.HostConfig) {
+ config.AutoRemove = true
+ config.RestartPolicy = docker.RestartPolicy{Name: "no"}
+ })
+ require.NoError(t, err, "Could not start container")
+ // Wait for container to start
+ require.Eventually(t, func() bool {
+ ct, ok := pool.ContainerByName(ct.Container.Name)
+ return ok && ct.Container.State.Running
+ }, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
+ t.Cleanup(func() {
+ err := pool.Purge(ct)
+ require.NoError(t, err, "Could not stop container")
+ })
+
+ inv, root := clitest.New(t, "exp", "rpty", workspace.Name, "-c", ct.Container.ID)
+ clitest.SetupConfig(t, client, root)
+ pty := ptytest.New(t).Attach(inv)
+
+ cmdDone := tGo(t, func() {
+ err := inv.WithContext(ctx).Run()
+ assert.NoError(t, err)
+ })
+
+ _ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
+ o.ExperimentalContainersEnabled = true
+ })
+ _ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
+
+ pty.ExpectMatch(fmt.Sprintf("Connected to %s", workspace.Name))
+ pty.ExpectMatch("Reconnect ID: ")
+ pty.ExpectMatch(" #")
+ pty.WriteLine("hostname")
+ pty.ExpectMatch(ct.Container.Config.Hostname)
+ pty.WriteLine("exit")
+ <-cmdDone
+ })
+}
From a2cc1b896f06afaa586154a216ba8ff6e8c01ecf Mon Sep 17 00:00:00 2001
From: Marcin Tojek
Date: Wed, 26 Feb 2025 14:16:48 +0100
Subject: [PATCH 04/44] fix: display premium banner on audit page when license
inactive (#16713)
Fixes: https://github.com/coder/coder/issues/14798
---
site/src/pages/AuditPage/AuditPage.tsx | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/site/src/pages/AuditPage/AuditPage.tsx b/site/src/pages/AuditPage/AuditPage.tsx
index efcf2068f19ad..fbf12260e57ce 100644
--- a/site/src/pages/AuditPage/AuditPage.tsx
+++ b/site/src/pages/AuditPage/AuditPage.tsx
@@ -16,6 +16,12 @@ import { AuditPageView } from "./AuditPageView";
const AuditPage: FC = () => {
const feats = useFeatureVisibility();
+ // The "else false" is required if audit_log is undefined.
+ // It may happen if owner removes the license.
+ //
+ // see: https://github.com/coder/coder/issues/14798
+ const isAuditLogVisible = feats.audit_log || false;
+
const { showOrganizations } = useDashboard();
/**
@@ -85,7 +91,7 @@ const AuditPage: FC = () => {
Date: Wed, 26 Feb 2025 17:12:51 +0000
Subject: [PATCH 05/44] ci: also restart tagged provisioner deployment (#16716)
Forgot to add this to CI a while ago, and it only recently became
apparent!
---
.github/workflows/ci.yaml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index bf1428df6cc3a..6cd3238cad2bf 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -1219,6 +1219,8 @@ jobs:
kubectl --namespace coder rollout status deployment/coder
kubectl --namespace coder rollout restart deployment/coder-provisioner
kubectl --namespace coder rollout status deployment/coder-provisioner
+ kubectl --namespace coder rollout restart deployment/coder-provisioner-tagged
+ kubectl --namespace coder rollout status deployment/coder-provisioner-tagged
deploy-wsproxies:
runs-on: ubuntu-latest
From f1b357d6f23136d149b3af9ef43bb554a8990dc5 Mon Sep 17 00:00:00 2001
From: Bruno Quaresma
Date: Wed, 26 Feb 2025 14:13:11 -0300
Subject: [PATCH 06/44] feat: support session audit log (#16703)
Related to https://github.com/coder/coder/issues/15139
Demo:
---------
Co-authored-by: Mathias Fredriksson
---
.../AuditLogDescription.tsx | 25 ++++++++++--
.../AuditLogRow/AuditLogRow.stories.tsx | 40 +++++++++++++++++++
.../AuditPage/AuditLogRow/AuditLogRow.tsx | 32 ++++++++++-----
3 files changed, 85 insertions(+), 12 deletions(-)
diff --git a/site/src/pages/AuditPage/AuditLogRow/AuditLogDescription/AuditLogDescription.tsx b/site/src/pages/AuditPage/AuditLogRow/AuditLogDescription/AuditLogDescription.tsx
index 51d4e8ec910d9..4b2a9b4df4df7 100644
--- a/site/src/pages/AuditPage/AuditLogRow/AuditLogDescription/AuditLogDescription.tsx
+++ b/site/src/pages/AuditPage/AuditLogRow/AuditLogDescription/AuditLogDescription.tsx
@@ -11,12 +11,15 @@ interface AuditLogDescriptionProps {
export const AuditLogDescription: FC = ({
auditLog,
}) => {
- let target = auditLog.resource_target.trim();
- let user = auditLog.user?.username.trim();
-
if (auditLog.resource_type === "workspace_build") {
return ;
}
+ if (auditLog.additional_fields?.connection_type) {
+ return ;
+ }
+
+ let target = auditLog.resource_target.trim();
+ let user = auditLog.user?.username.trim();
// SSH key entries have no links
if (auditLog.resource_type === "git_ssh_key") {
@@ -57,3 +60,19 @@ export const AuditLogDescription: FC = ({
);
};
+
+function AppSessionAuditLogDescription({ auditLog }: AuditLogDescriptionProps) {
+ const { connection_type, workspace_owner, workspace_name } =
+ auditLog.additional_fields;
+
+ return (
+ <>
+ {connection_type} session to {workspace_owner}'s{" "}
+
+ {workspace_name}
+ {" "}
+ workspace{" "}
+ {auditLog.action === "disconnect" ? "closed" : "opened"}
+ >
+ );
+}
diff --git a/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.stories.tsx b/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.stories.tsx
index 12d57b63047e8..8bb45aa39378b 100644
--- a/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.stories.tsx
+++ b/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.stories.tsx
@@ -159,3 +159,43 @@ export const NoUserAgent: Story = {
},
},
};
+
+export const WithConnectionType: Story = {
+ args: {
+ showOrgDetails: true,
+ auditLog: {
+ id: "725ea2f2-faae-4bdd-a821-c2384a67d89c",
+ request_id: "a486c1cb-6acb-41c9-9bce-1f4f24a2e8ff",
+ time: "2025-02-24T10:20:08.054072Z",
+ ip: "fd7a:115c:a1e0:4fa5:9ccd:27e4:5d72:c66a",
+ user_agent: "",
+ resource_type: "workspace_agent",
+ resource_id: "813311fb-bad3-4a92-98cd-09ee57e73d6e",
+ resource_target: "main",
+ resource_icon: "",
+ action: "disconnect",
+ diff: {},
+ status_code: 255,
+ additional_fields: {
+ reason: "process exited with error status: -1",
+ build_number: "1",
+ build_reason: "initiator",
+ workspace_id: "6a7cfb32-d208-47bb-91d0-ec54b69912b6",
+ workspace_name: "test2",
+ connection_type: "SSH",
+ workspace_owner: "admin",
+ },
+ description: "{user} disconnected workspace agent {target}",
+ resource_link: "",
+ is_deleted: false,
+ organization_id: "0e6fa63f-b625-4a6f-ab5b-a8217f8c80b3",
+ organization: {
+ id: "0e6fa63f-b625-4a6f-ab5b-a8217f8c80b3",
+ name: "coder",
+ display_name: "Coder",
+ icon: "",
+ },
+ user: null,
+ },
+ },
+};
diff --git a/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.tsx b/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.tsx
index 909fb7cf5646e..e5145ea86c966 100644
--- a/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.tsx
+++ b/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.tsx
@@ -128,6 +128,8 @@ export const AuditLogRow: FC = ({
+
+
{/* With multi-org, there is not enough space so show
everything in a tooltip. */}
{showOrgDetails ? (
@@ -169,6 +171,12 @@ export const AuditLogRow: FC = ({
)}
+ {auditLog.additional_fields?.reason && (
+
+
Reason:
+
{auditLog.additional_fields?.reason}
+
+ )}
}
>
@@ -203,13 +211,6 @@ export const AuditLogRow: FC = ({
)}
)}
-
-
- {auditLog.status_code.toString()}
-
@@ -218,7 +219,7 @@ export const AuditLogRow: FC = ({
{shouldDisplayDiff ? (
{ }
) : (
-
+
)}
@@ -232,6 +233,19 @@ export const AuditLogRow: FC = ({
);
};
+function StatusPill({ code }: { code: number }) {
+ const isHttp = code >= 100;
+
+ return (
+
+ {code.toString()}
+
+ );
+}
+
const styles = {
auditLogCell: {
padding: "0 !important",
@@ -287,7 +301,7 @@ const styles = {
width: "100%",
},
- httpStatusPill: {
+ statusCodePill: {
fontSize: 10,
height: 20,
paddingLeft: 10,
From b94d2cb8d45314c9ff9d4cdbcb8c4639c7845cad Mon Sep 17 00:00:00 2001
From: Mathias Fredriksson
Date: Wed, 26 Feb 2025 19:16:54 +0200
Subject: [PATCH 07/44] fix(coderd): handle deletes and links for new agent/app
audit resources (#16670)
These code-paths were overlooked in #16493.
---
coderd/audit.go | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/coderd/audit.go b/coderd/audit.go
index 72be70754c2ea..ce932c9143a98 100644
--- a/coderd/audit.go
+++ b/coderd/audit.go
@@ -367,6 +367,26 @@ func (api *API) auditLogIsResourceDeleted(ctx context.Context, alog database.Get
api.Logger.Error(ctx, "unable to fetch workspace", slog.Error(err))
}
return workspace.Deleted
+ case database.ResourceTypeWorkspaceAgent:
+ // We use workspace as a proxy for workspace agents.
+ workspace, err := api.Database.GetWorkspaceByAgentID(ctx, alog.AuditLog.ResourceID)
+ if err != nil {
+ if xerrors.Is(err, sql.ErrNoRows) {
+ return true
+ }
+ api.Logger.Error(ctx, "unable to fetch workspace", slog.Error(err))
+ }
+ return workspace.Deleted
+ case database.ResourceTypeWorkspaceApp:
+ // We use workspace as a proxy for workspace apps.
+ workspace, err := api.Database.GetWorkspaceByWorkspaceAppID(ctx, alog.AuditLog.ResourceID)
+ if err != nil {
+ if xerrors.Is(err, sql.ErrNoRows) {
+ return true
+ }
+ api.Logger.Error(ctx, "unable to fetch workspace", slog.Error(err))
+ }
+ return workspace.Deleted
case database.ResourceTypeOauth2ProviderApp:
_, err := api.Database.GetOAuth2ProviderAppByID(ctx, alog.AuditLog.ResourceID)
if xerrors.Is(err, sql.ErrNoRows) {
@@ -429,6 +449,26 @@ func (api *API) auditLogResourceLink(ctx context.Context, alog database.GetAudit
return fmt.Sprintf("/@%s/%s/builds/%s",
workspaceOwner.Username, additionalFields.WorkspaceName, additionalFields.BuildNumber)
+ case database.ResourceTypeWorkspaceAgent:
+ if additionalFields.WorkspaceOwner != "" && additionalFields.WorkspaceName != "" {
+ return fmt.Sprintf("/@%s/%s", additionalFields.WorkspaceOwner, additionalFields.WorkspaceName)
+ }
+ workspace, getWorkspaceErr := api.Database.GetWorkspaceByAgentID(ctx, alog.AuditLog.ResourceID)
+ if getWorkspaceErr != nil {
+ return ""
+ }
+ return fmt.Sprintf("/@%s/%s", workspace.OwnerUsername, workspace.Name)
+
+ case database.ResourceTypeWorkspaceApp:
+ if additionalFields.WorkspaceOwner != "" && additionalFields.WorkspaceName != "" {
+ return fmt.Sprintf("/@%s/%s", additionalFields.WorkspaceOwner, additionalFields.WorkspaceName)
+ }
+ workspace, getWorkspaceErr := api.Database.GetWorkspaceByWorkspaceAppID(ctx, alog.AuditLog.ResourceID)
+ if getWorkspaceErr != nil {
+ return ""
+ }
+ return fmt.Sprintf("/@%s/%s", workspace.OwnerUsername, workspace.Name)
+
case database.ResourceTypeOauth2ProviderApp:
return fmt.Sprintf("/deployment/oauth2-provider/apps/%s", alog.AuditLog.ResourceID)
From 7c035a4d9855988ef29cfcce2c0d7638c4164173 Mon Sep 17 00:00:00 2001
From: Bruno Quaresma
Date: Wed, 26 Feb 2025 14:20:47 -0300
Subject: [PATCH 08/44] fix: remove provisioners from deployment sidebar
(#16717)
Provisioners should be only under orgs. This is a left over from a
previous provisioner refactoring.
---
site/src/modules/management/DeploymentSidebarView.tsx | 5 -----
1 file changed, 5 deletions(-)
diff --git a/site/src/modules/management/DeploymentSidebarView.tsx b/site/src/modules/management/DeploymentSidebarView.tsx
index 21ff6f84b4a48..4783133a872bb 100644
--- a/site/src/modules/management/DeploymentSidebarView.tsx
+++ b/site/src/modules/management/DeploymentSidebarView.tsx
@@ -94,11 +94,6 @@ export const DeploymentSidebarView: FC = ({
IdP Organization Sync
)}
- {permissions.viewDeploymentValues && (
-
- Provisioners
-
- )}
{!hasPremiumLicense && (
Premium
)}
From 7cd6e9cdd6b60b70bd5fe69564515ff8c27dd07d Mon Sep 17 00:00:00 2001
From: Mathias Fredriksson
Date: Wed, 26 Feb 2025 21:06:51 +0200
Subject: [PATCH 09/44] fix: return provisioners in desc order and add limit to
cli (#16720)
---
cli/provisioners.go | 16 +++++++++++++++-
.../coder_provisioner_list_--help.golden | 3 +++
coderd/database/dbmem/dbmem.go | 2 +-
coderd/database/queries.sql.go | 2 +-
coderd/database/queries/provisionerdaemons.sql | 2 +-
coderd/provisionerdaemons_test.go | 4 ++--
docs/reference/cli/provisioner_list.md | 10 ++++++++++
.../coder_provisioner_list_--help.golden | 3 +++
8 files changed, 36 insertions(+), 6 deletions(-)
diff --git a/cli/provisioners.go b/cli/provisioners.go
index 08d96493b87aa..5dd3a703619e5 100644
--- a/cli/provisioners.go
+++ b/cli/provisioners.go
@@ -39,6 +39,7 @@ func (r *RootCmd) provisionerList() *serpent.Command {
cliui.TableFormat([]provisionerDaemonRow{}, []string{"name", "organization", "status", "key name", "created at", "last seen at", "version", "tags"}),
cliui.JSONFormat(),
)
+ limit int64
)
cmd := &serpent.Command{
@@ -57,7 +58,9 @@ func (r *RootCmd) provisionerList() *serpent.Command {
return xerrors.Errorf("current organization: %w", err)
}
- daemons, err := client.OrganizationProvisionerDaemons(ctx, org.ID, nil)
+ daemons, err := client.OrganizationProvisionerDaemons(ctx, org.ID, &codersdk.OrganizationProvisionerDaemonsOptions{
+ Limit: int(limit),
+ })
if err != nil {
return xerrors.Errorf("list provisioner daemons: %w", err)
}
@@ -86,6 +89,17 @@ func (r *RootCmd) provisionerList() *serpent.Command {
},
}
+ cmd.Options = append(cmd.Options, []serpent.Option{
+ {
+ Flag: "limit",
+ FlagShorthand: "l",
+ Env: "CODER_PROVISIONER_LIST_LIMIT",
+ Description: "Limit the number of provisioners returned.",
+ Default: "50",
+ Value: serpent.Int64Of(&limit),
+ },
+ }...)
+
orgContext.AttachOptions(cmd)
formatter.AttachOptions(&cmd.Options)
diff --git a/cli/testdata/coder_provisioner_list_--help.golden b/cli/testdata/coder_provisioner_list_--help.golden
index 111eb8315b162..ac889fb6dcf58 100644
--- a/cli/testdata/coder_provisioner_list_--help.golden
+++ b/cli/testdata/coder_provisioner_list_--help.golden
@@ -14,6 +14,9 @@ OPTIONS:
-c, --column [id|organization id|created at|last seen at|name|version|api version|tags|key name|status|current job id|current job status|current job template name|current job template icon|current job template display name|previous job id|previous job status|previous job template name|previous job template icon|previous job template display name|organization] (default: name,organization,status,key name,created at,last seen at,version,tags)
Columns to display in table output.
+ -l, --limit int, $CODER_PROVISIONER_LIST_LIMIT (default: 50)
+ Limit the number of provisioners returned.
+
-o, --output table|json (default: table)
Output format.
diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go
index 058aed631887e..23913a55bf0c8 100644
--- a/coderd/database/dbmem/dbmem.go
+++ b/coderd/database/dbmem/dbmem.go
@@ -4073,7 +4073,7 @@ func (q *FakeQuerier) GetProvisionerDaemonsWithStatusByOrganization(ctx context.
}
slices.SortFunc(rows, func(a, b database.GetProvisionerDaemonsWithStatusByOrganizationRow) int {
- return a.ProvisionerDaemon.CreatedAt.Compare(b.ProvisionerDaemon.CreatedAt)
+ return b.ProvisionerDaemon.CreatedAt.Compare(a.ProvisionerDaemon.CreatedAt)
})
if arg.Limit.Valid && arg.Limit.Int32 > 0 && len(rows) > int(arg.Limit.Int32) {
diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go
index 0e2bc0e37f375..9c9ead1b6746e 100644
--- a/coderd/database/queries.sql.go
+++ b/coderd/database/queries.sql.go
@@ -5845,7 +5845,7 @@ WHERE
AND (COALESCE(array_length($3::uuid[], 1), 0) = 0 OR pd.id = ANY($3::uuid[]))
AND ($4::tagset = 'null'::tagset OR provisioner_tagset_contains(pd.tags::tagset, $4::tagset))
ORDER BY
- pd.created_at ASC
+ pd.created_at DESC
LIMIT
$5::int
`
diff --git a/coderd/database/queries/provisionerdaemons.sql b/coderd/database/queries/provisionerdaemons.sql
index ab1668e537d6c..4f7c7a8b2200a 100644
--- a/coderd/database/queries/provisionerdaemons.sql
+++ b/coderd/database/queries/provisionerdaemons.sql
@@ -111,7 +111,7 @@ WHERE
AND (COALESCE(array_length(@ids::uuid[], 1), 0) = 0 OR pd.id = ANY(@ids::uuid[]))
AND (@tags::tagset = 'null'::tagset OR provisioner_tagset_contains(pd.tags::tagset, @tags::tagset))
ORDER BY
- pd.created_at ASC
+ pd.created_at DESC
LIMIT
sqlc.narg('limit')::int;
diff --git a/coderd/provisionerdaemons_test.go b/coderd/provisionerdaemons_test.go
index d6d1138f7a912..249da9d6bc922 100644
--- a/coderd/provisionerdaemons_test.go
+++ b/coderd/provisionerdaemons_test.go
@@ -159,8 +159,8 @@ func TestProvisionerDaemons(t *testing.T) {
})
require.NoError(t, err)
require.Len(t, daemons, 2)
- require.Equal(t, pd1.ID, daemons[0].ID)
- require.Equal(t, pd2.ID, daemons[1].ID)
+ require.Equal(t, pd1.ID, daemons[1].ID)
+ require.Equal(t, pd2.ID, daemons[0].ID)
})
t.Run("Tags", func(t *testing.T) {
diff --git a/docs/reference/cli/provisioner_list.md b/docs/reference/cli/provisioner_list.md
index 93718ddd01ea8..4aadb22064755 100644
--- a/docs/reference/cli/provisioner_list.md
+++ b/docs/reference/cli/provisioner_list.md
@@ -15,6 +15,16 @@ coder provisioner list [flags]
## Options
+### -l, --limit
+
+| | |
+|-------------|--------------------------------------------|
+| Type | int
|
+| Environment | $CODER_PROVISIONER_LIST_LIMIT
|
+| Default | 50
|
+
+Limit the number of provisioners returned.
+
### -O, --org
| | |
diff --git a/enterprise/cli/testdata/coder_provisioner_list_--help.golden b/enterprise/cli/testdata/coder_provisioner_list_--help.golden
index 111eb8315b162..ac889fb6dcf58 100644
--- a/enterprise/cli/testdata/coder_provisioner_list_--help.golden
+++ b/enterprise/cli/testdata/coder_provisioner_list_--help.golden
@@ -14,6 +14,9 @@ OPTIONS:
-c, --column [id|organization id|created at|last seen at|name|version|api version|tags|key name|status|current job id|current job status|current job template name|current job template icon|current job template display name|previous job id|previous job status|previous job template name|previous job template icon|previous job template display name|organization] (default: name,organization,status,key name,created at,last seen at,version,tags)
Columns to display in table output.
+ -l, --limit int, $CODER_PROVISIONER_LIST_LIMIT (default: 50)
+ Limit the number of provisioners returned.
+
-o, --output table|json (default: table)
Output format.
From 52959025966ec9b844d4a5285168963352b4063f Mon Sep 17 00:00:00 2001
From: Michael Vincent Patterson
Date: Wed, 26 Feb 2025 14:30:41 -0500
Subject: [PATCH 10/44] docs: clarified prometheus integration behavior
(#16724)
Closes issue #16538
Updated docs to explain Behavior of enabling Prometheus
---
docs/admin/integrations/prometheus.md | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/docs/admin/integrations/prometheus.md b/docs/admin/integrations/prometheus.md
index d849f192aaa3d..0d6054bbf37ea 100644
--- a/docs/admin/integrations/prometheus.md
+++ b/docs/admin/integrations/prometheus.md
@@ -31,9 +31,8 @@ coderd_api_active_users_duration_hour 0
### Kubernetes deployment
The Prometheus endpoint can be enabled in the [Helm chart's](https://github.com/coder/coder/tree/main/helm)
-`values.yml` by setting the environment variable `CODER_PROMETHEUS_ADDRESS` to
-`0.0.0.0:2112`. The environment variable `CODER_PROMETHEUS_ENABLE` will be
-enabled automatically. A Service Endpoint will not be exposed; if you need to
+`values.yml` by setting `CODER_PROMETHEUS_ENABLE=true`. Once enabled, the environment variable `CODER_PROMETHEUS_ADDRESS` will be set by default to
+`0.0.0.0:2112`. A Service Endpoint will not be exposed; if you need to
expose the Prometheus port on a Service, (for example, to use a
`ServiceMonitor`), create a separate headless service instead.
From 1cb864bc1bf853cfb5a678f3140b6b68d33282ba Mon Sep 17 00:00:00 2001
From: Jaayden Halko
Date: Wed, 26 Feb 2025 19:39:08 +0000
Subject: [PATCH 11/44] fix: allow viewOrgRoles for custom roles page (#16722)
Users with viewOrgRoles should be able to see customs roles page as this
matches the left sidebar permissions.
---
.../CustomRolesPage/CustomRolesPage.tsx | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/site/src/pages/OrganizationSettingsPage/CustomRolesPage/CustomRolesPage.tsx b/site/src/pages/OrganizationSettingsPage/CustomRolesPage/CustomRolesPage.tsx
index 4eee74c6a599d..4e7b8c386120a 100644
--- a/site/src/pages/OrganizationSettingsPage/CustomRolesPage/CustomRolesPage.tsx
+++ b/site/src/pages/OrganizationSettingsPage/CustomRolesPage/CustomRolesPage.tsx
@@ -57,7 +57,8 @@ export const CustomRolesPage: FC = () => {
From 81ef9e9e80a1e977d35a29bb31816eb8b83fe2bf Mon Sep 17 00:00:00 2001
From: Edward Angert
Date: Wed, 26 Feb 2025 15:43:02 -0500
Subject: [PATCH 12/44] docs: document new feature stages (#16719)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- [x] translate notes to docs
- [x] move to Home > About > Feature Stages
- [x] decide on bullet point summaries (👍 👎 in comment)
### OOS for this PR
add support page that describes how users can get support. currently,
[this help
article](https://help.coder.com/hc/en-us/articles/25308666965783-Get-Help-with-Coder)
is the only thing that pops up and includes that `Users with valid Coder
licenses can submit tickets` but doesn't show how, nor does it include
the support bundle docs (link or content). it'd be good to have these
things relate to each other
## preview
[preview](https://coder.com/docs/@feature-stages/contributing/feature-stages)
---------
Co-authored-by: EdwardAngert <17991901+EdwardAngert@users.noreply.github.com>
Co-authored-by: Ben Potter
---
docs/about/feature-stages.md | 105 ++++++++++++++++++++++++++++
docs/contributing/feature-stages.md | 63 -----------------
docs/manifest.json | 11 ++-
3 files changed, 110 insertions(+), 69 deletions(-)
create mode 100644 docs/about/feature-stages.md
delete mode 100644 docs/contributing/feature-stages.md
diff --git a/docs/about/feature-stages.md b/docs/about/feature-stages.md
new file mode 100644
index 0000000000000..f5afb78836a03
--- /dev/null
+++ b/docs/about/feature-stages.md
@@ -0,0 +1,105 @@
+# Feature stages
+
+Some Coder features are released in feature stages before they are generally
+available.
+
+If you encounter an issue with any Coder feature, please submit a
+[GitHub issue](https://github.com/coder/coder/issues) or join the
+[Coder Discord](https://discord.gg/coder).
+
+## Early access features
+
+- **Stable**: No
+- **Production-ready**: No
+- **Support**: GitHub issues
+
+Early access features are neither feature-complete nor stable. We do not
+recommend using early access features in production deployments.
+
+Coder often releases early access features behind an “unsafe” experiment, where
+they’re accessible but not easy to find.
+They are disabled by default, and not recommended for use in
+production because they might cause performance or stability issues. In most cases,
+early access features are mostly complete, but require further internal testing and
+will stay in the early access stage for at least one month.
+
+Coder may make significant changes or revert features to a feature flag at any time.
+
+If you plan to activate an early access feature, we suggest that you use a
+staging deployment.
+
+To enable early access features:
+
+Use the [Coder CLI](../install/cli.md) `--experiments` flag to enable early access features:
+
+- Enable all early access features:
+
+ ```shell
+ coder server --experiments=*
+ ```
+
+- Enable multiple early access features:
+
+ ```shell
+ coder server --experiments=feature1,feature2
+ ```
+
+You can also use the `CODER_EXPERIMENTS` [environment variable](../admin/setup/index.md).
+
+You can opt-out of a feature after you've enabled it.
+
+
+
+### Available early access features
+
+
+
+
+| Feature | Description | Available in |
+|-----------------|---------------------------------------------------------------------|--------------|
+| `notifications` | Sends notifications via SMTP and webhooks following certain events. | stable |
+
+
+
+## Beta
+
+- **Stable**: No
+- **Production-ready**: Not fully
+- **Support**: Documentation, [Discord](https://discord.gg/coder), and [GitHub issues](https://github.com/coder/coder/issues)
+
+Beta features are open to the public and are tagged with a `Beta` label.
+
+They’re in active development and subject to minor changes.
+They might contain minor bugs, but are generally ready for use.
+
+Beta features are often ready for general availability within two-three releases.
+You should test beta features in staging environments.
+You can use beta features in production, but should set expectations and inform users that some features may be incomplete.
+
+We keep documentation about beta features up-to-date with the latest information, including planned features, limitations, and workarounds.
+If you encounter an issue, please contact your [Coder account team](https://coder.com/contact), reach out on [Discord](https://discord.gg/coder), or create a [GitHub issues](https://github.com/coder/coder/issues) if there isn't one already.
+While we will do our best to provide support with beta features, most issues will be escalated to the product team.
+Beta features are not covered within service-level agreements (SLA).
+
+Most beta features are enabled by default.
+Beta features are announced through the [Coder Changelog](https://coder.com/changelog), and more information is available in the documentation.
+
+## General Availability (GA)
+
+- **Stable**: Yes
+- **Production-ready**: Yes
+- **Support**: Yes, [based on license](https://coder.com/pricing).
+
+All features that are not explicitly tagged as `Early access` or `Beta` are considered generally available (GA).
+They have been tested, are stable, and are enabled by default.
+
+If your Coder license includes an SLA, please consult it for an outline of specific expectations.
+
+For support, consult our knowledgeable and growing community on [Discord](https://discord.gg/coder), or create a [GitHub issue](https://github.com/coder/coder/issues) if one doesn't exist already.
+Customers with a valid Coder license, can submit a support request or contact your [account team](https://coder.com/contact).
+
+We intend [Coder documentation](../README.md) to be the [single source of truth](https://en.wikipedia.org/wiki/Single_source_of_truth) and all features should have some form of complete documentation that outlines how to use or implement a feature.
+If you discover an error or if you have a suggestion that could improve the documentation, please [submit a GitHub issue](https://github.com/coder/internal/issues/new?title=request%28docs%29%3A+request+title+here&labels=["customer-feedback","docs"]&body=please+enter+your+request+here).
+
+Some GA features can be disabled for air-gapped deployments.
+Consult the feature's documentation or submit a support ticket for assistance.
diff --git a/docs/contributing/feature-stages.md b/docs/contributing/feature-stages.md
deleted file mode 100644
index 97b8b020a4559..0000000000000
--- a/docs/contributing/feature-stages.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# Feature stages
-
-Some Coder features are released in feature stages before they are generally
-available.
-
-If you encounter an issue with any Coder feature, please submit a
-[GitHub issues](https://github.com/coder/coder/issues) or join the
-[Coder Discord](https://discord.gg/coder).
-
-## Early access features
-
-Early access features are neither feature-complete nor stable. We do not
-recommend using early access features in production deployments.
-
-Coder releases early access features behind an “unsafe” experiment, where
-they’re accessible but not easy to find.
-
-## Experimental features
-
-These features are disabled by default, and not recommended for use in
-production as they may cause performance or stability issues. In most cases,
-experimental features are complete, but require further internal testing and
-will stay in the experimental stage for one month.
-
-Coder may make significant changes to experiments or revert features to a
-feature flag at any time.
-
-If you plan to activate an experimental feature, we suggest that you use a
-staging deployment.
-
-You can opt-out of an experiment after you've enabled it.
-
-```yaml
-# Enable all experimental features
-coder server --experiments=*
-
-# Enable multiple experimental features
-coder server --experiments=feature1,feature2
-
-# Alternatively, use the `CODER_EXPERIMENTS` environment variable.
-```
-
-### Available experimental features
-
-
-
-
-| Feature | Description | Available in |
-|-----------------|---------------------------------------------------------------------|--------------|
-| `notifications` | Sends notifications via SMTP and webhooks following certain events. | stable |
-
-
-
-## Beta
-
-Beta features are open to the public, but are tagged with a `Beta` label.
-
-They’re subject to minor changes and may contain bugs, but are generally ready
-for use.
-
-## General Availability (GA)
-
-All other features have been tested, are stable, and are enabled by default.
diff --git a/docs/manifest.json b/docs/manifest.json
index 2da08f84d6419..0dfb85096ae34 100644
--- a/docs/manifest.json
+++ b/docs/manifest.json
@@ -16,6 +16,11 @@
"title": "Screenshots",
"description": "View screenshots of the Coder platform",
"path": "./start/screenshots.md"
+ },
+ {
+ "title": "Feature stages",
+ "description": "Information about pre-GA stages.",
+ "path": "./about/feature-stages.md"
}
]
},
@@ -639,12 +644,6 @@
"path": "./contributing/CODE_OF_CONDUCT.md",
"icon_path": "./images/icons/circle-dot.svg"
},
- {
- "title": "Feature stages",
- "description": "Policies for Alpha and Experimental features.",
- "path": "./contributing/feature-stages.md",
- "icon_path": "./images/icons/stairs.svg"
- },
{
"title": "Documentation",
"description": "Our style guide for use when authoring documentation",
From 2aa749a7f03a326de94b8bb445a8ae369e458065 Mon Sep 17 00:00:00 2001
From: Cian Johnston
Date: Wed, 26 Feb 2025 21:10:39 +0000
Subject: [PATCH 13/44] chore(cli): fix test flake caused by agent connect race
(#16725)
Fixes test flake seen here:
https://github.com/coder/coder/actions/runs/13552012547/job/37877778883
```
exp_rpty_test.go:96:
Error Trace: /home/runner/work/coder/coder/cli/exp_rpty_test.go:96
/home/runner/work/coder/coder/cli/ssh_test.go:1963
/home/runner/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.22.9.linux-amd64/src/runtime/asm_amd64.s:1695
Error: Received unexpected error:
running command "coder exp rpty": GET http://localhost:37991/api/v2/workspaceagents/3785b98f-0589-47d2-a3c8-33a55a6c5b29/containers: unexpected status code 400: Agent state is "connecting", it must be in the "connected" state.
Test: TestExpRpty/Container
```
---
cli/exp_rpty_test.go | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/cli/exp_rpty_test.go b/cli/exp_rpty_test.go
index 2f0a24bf1cf41..782a7b5c08d48 100644
--- a/cli/exp_rpty_test.go
+++ b/cli/exp_rpty_test.go
@@ -87,6 +87,11 @@ func TestExpRpty(t *testing.T) {
require.NoError(t, err, "Could not stop container")
})
+ _ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
+ o.ExperimentalContainersEnabled = true
+ })
+ _ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
+
inv, root := clitest.New(t, "exp", "rpty", workspace.Name, "-c", ct.Container.ID)
clitest.SetupConfig(t, client, root)
pty := ptytest.New(t).Attach(inv)
@@ -96,11 +101,6 @@ func TestExpRpty(t *testing.T) {
assert.NoError(t, err)
})
- _ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
- o.ExperimentalContainersEnabled = true
- })
- _ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
-
pty.ExpectMatch(fmt.Sprintf("Connected to %s", workspace.Name))
pty.ExpectMatch("Reconnect ID: ")
pty.ExpectMatch(" #")
From 6b6963514011b4937fb24a0df6601e11e885d109 Mon Sep 17 00:00:00 2001
From: Jaayden Halko
Date: Wed, 26 Feb 2025 22:03:23 +0000
Subject: [PATCH 14/44] chore: warn user without permissions to view org
members (#16721)
resolves coder/internal#392
In situations where a user accesses the org members without any
permissions beyond that of a normal member, they will only be able to
see themselves in the list of members.
This PR shows a warning to users who arrive at the members page in this
situation.
---
.../OrganizationMembersPage.tsx | 1 +
.../OrganizationMembersPageView.tsx | 16 ++++++++++++++--
2 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/site/src/pages/OrganizationSettingsPage/OrganizationMembersPage.tsx b/site/src/pages/OrganizationSettingsPage/OrganizationMembersPage.tsx
index 078ae1a0cbba8..7ae0eb72bec91 100644
--- a/site/src/pages/OrganizationSettingsPage/OrganizationMembersPage.tsx
+++ b/site/src/pages/OrganizationSettingsPage/OrganizationMembersPage.tsx
@@ -72,6 +72,7 @@ const OrganizationMembersPage: FC = () => {
= ({
allAvailableRoles,
canEditMembers,
+ canViewMembers,
error,
isAddingMember,
isUpdatingMemberRoles,
@@ -70,7 +73,7 @@ export const OrganizationMembersPageView: FC<
return (
-
+
{Boolean(error) &&
}
{canEditMembers && (
@@ -80,6 +83,15 @@ export const OrganizationMembersPageView: FC<
/>
)}
+ {!canViewMembers && (
+
+
+
+ You do not have permission to view members other than yourself.
+
+
+ )}
+
@@ -154,7 +166,7 @@ export const OrganizationMembersPageView: FC<
))}
-
+
);
};
From 5cdc13ba9ec60904f7a502e51f40268a35cd3fac Mon Sep 17 00:00:00 2001
From: Edward Angert
Date: Wed, 26 Feb 2025 17:42:46 -0500
Subject: [PATCH 15/44] docs: fix broken links in feature-stages (#16727)
fix broken links introduced by #16719
---------
Co-authored-by: EdwardAngert <17991901+EdwardAngert@users.noreply.github.com>
---
docs/admin/monitoring/notifications/index.md | 2 +-
docs/changelogs/v0.26.0.md | 2 +-
docs/changelogs/v2.9.0.md | 2 +-
docs/install/releases.md | 2 +-
scripts/release/docs_update_experiments.sh | 2 +-
site/src/components/FeatureStageBadge/FeatureStageBadge.tsx | 2 +-
6 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/docs/admin/monitoring/notifications/index.md b/docs/admin/monitoring/notifications/index.md
index eb077e13b38ed..d65667058e437 100644
--- a/docs/admin/monitoring/notifications/index.md
+++ b/docs/admin/monitoring/notifications/index.md
@@ -269,7 +269,7 @@ troubleshoot:
`CODER_VERBOSE=true` or `--verbose` to output debug logs.
1. If you are on version 2.15.x, notifications must be enabled using the
`notifications`
- [experiment](../../../contributing/feature-stages.md#experimental-features).
+ [experiment](../../../about/feature-stages.md#early-access-features).
Notifications are enabled by default in Coder v2.16.0 and later.
diff --git a/docs/changelogs/v0.26.0.md b/docs/changelogs/v0.26.0.md
index 19fcb5c3950ea..9a07e2ed9638c 100644
--- a/docs/changelogs/v0.26.0.md
+++ b/docs/changelogs/v0.26.0.md
@@ -16,7 +16,7 @@
> previously necessary to activate this additional feature.
- Our scale test CLI is
- [experimental](https://coder.com/docs/contributing/feature-stages#experimental-features)
+ [experimental](https://coder.com/docs/about/feature-stages.md#early-access-features)
to allow for rapid iteration. You can still interact with it via
`coder exp scaletest` (#8339)
diff --git a/docs/changelogs/v2.9.0.md b/docs/changelogs/v2.9.0.md
index 55bfb33cf1fcf..549f15c19c014 100644
--- a/docs/changelogs/v2.9.0.md
+++ b/docs/changelogs/v2.9.0.md
@@ -61,7 +61,7 @@
### Experimental features
-The following features are hidden or disabled by default as we don't guarantee stability. Learn more about experiments in [our documentation](https://coder.com/docs/contributing/feature-stages#experimental-features).
+The following features are hidden or disabled by default as we don't guarantee stability. Learn more about experiments in [our documentation](https://coder.com/docs/about/feature-stages.md#early-access-features).
- The `coder support` command generates a ZIP with deployment information, agent logs, and server config values for troubleshooting purposes. We will publish documentation on how it works (and un-hide the feature) in a future release (#12328) (@johnstcn)
- Port sharing: Allow users to share ports running in their workspace with other Coder users (#11939) (#12119) (#12383) (@deansheather) (@f0ssel)
diff --git a/docs/install/releases.md b/docs/install/releases.md
index 157adf7fe8961..14e7dd7e6db90 100644
--- a/docs/install/releases.md
+++ b/docs/install/releases.md
@@ -35,7 +35,7 @@ only for security issues or CVEs.
- In-product security vulnerabilities and CVEs are supported
> For more information on feature rollout, see our
-> [feature stages documentation](../contributing/feature-stages.md).
+> [feature stages documentation](../about/feature-stages.md).
## Installing stable
diff --git a/scripts/release/docs_update_experiments.sh b/scripts/release/docs_update_experiments.sh
index 8ed380a356a2e..1c6afdb87b181 100755
--- a/scripts/release/docs_update_experiments.sh
+++ b/scripts/release/docs_update_experiments.sh
@@ -94,7 +94,7 @@ parse_experiments() {
}
workdir=build/docs/experiments
-dest=docs/contributing/feature-stages.md
+dest=docs/about/feature-stages.md
log "Updating available experimental features in ${dest}"
diff --git a/site/src/components/FeatureStageBadge/FeatureStageBadge.tsx b/site/src/components/FeatureStageBadge/FeatureStageBadge.tsx
index d463af2de43aa..0d4ea98258ea8 100644
--- a/site/src/components/FeatureStageBadge/FeatureStageBadge.tsx
+++ b/site/src/components/FeatureStageBadge/FeatureStageBadge.tsx
@@ -61,7 +61,7 @@ export const FeatureStageBadge: FC = ({
Date: Wed, 26 Feb 2025 23:20:03 -0500
Subject: [PATCH 16/44] docs: copy edit early access section in feature-stages
doc (#16730)
- copy edit EA section with @mattvollmer 's suggestions
- ran the script that updates the list of experiments
---------
Co-authored-by: EdwardAngert <17991901+EdwardAngert@users.noreply.github.com>
---
docs/about/feature-stages.md | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/docs/about/feature-stages.md b/docs/about/feature-stages.md
index f5afb78836a03..65644e98b558f 100644
--- a/docs/about/feature-stages.md
+++ b/docs/about/feature-stages.md
@@ -16,12 +16,9 @@ If you encounter an issue with any Coder feature, please submit a
Early access features are neither feature-complete nor stable. We do not
recommend using early access features in production deployments.
-Coder often releases early access features behind an “unsafe” experiment, where
-they’re accessible but not easy to find.
-They are disabled by default, and not recommended for use in
-production because they might cause performance or stability issues. In most cases,
-early access features are mostly complete, but require further internal testing and
-will stay in the early access stage for at least one month.
+Coder sometimes releases early access features that are available for use, but are disabled by default.
+You shouldn't use early access features in production because they might cause performance or stability issues.
+Early access features can be mostly feature-complete, but require further internal testing and remain in the early access stage for at least one month.
Coder may make significant changes or revert features to a feature flag at any time.
@@ -55,9 +52,7 @@ You can opt-out of a feature after you've enabled it.
-| Feature | Description | Available in |
-|-----------------|---------------------------------------------------------------------|--------------|
-| `notifications` | Sends notifications via SMTP and webhooks following certain events. | stable |
+Currently no experimental features are available in the latest mainline or stable release.
From 95363c9041d805e03b1be422a7dd64cfe7ec1603 Mon Sep 17 00:00:00 2001
From: Cian Johnston
Date: Thu, 27 Feb 2025 09:08:08 +0000
Subject: [PATCH 17/44] fix(enterprise/coderd): remove useless provisioner
daemon id from request (#16723)
`ServeProvisionerDaemonRequest` has had an ID field for quite a while
now.
This field is only used for telemetry purposes; the actual daemon ID is
created upon insertion in the database. There's no reason to set it, and
it's confusing to do so. Deprecating the field and removing references
to it.
---
codersdk/provisionerdaemons.go | 2 +-
enterprise/cli/provisionerdaemonstart.go | 1 -
enterprise/coderd/coderdenttest/coderdenttest.go | 1 -
enterprise/coderd/provisionerdaemons.go | 7 +------
enterprise/coderd/provisionerdaemons_test.go | 11 -----------
5 files changed, 2 insertions(+), 20 deletions(-)
diff --git a/codersdk/provisionerdaemons.go b/codersdk/provisionerdaemons.go
index f6130f3b8235d..2a9472f1cb36a 100644
--- a/codersdk/provisionerdaemons.go
+++ b/codersdk/provisionerdaemons.go
@@ -239,6 +239,7 @@ func (c *Client) provisionerJobLogsAfter(ctx context.Context, path string, after
// @typescript-ignore ServeProvisionerDaemonRequest
type ServeProvisionerDaemonRequest struct {
// ID is a unique ID for a provisioner daemon.
+ // Deprecated: this field has always been ignored.
ID uuid.UUID `json:"id" format:"uuid"`
// Name is the human-readable unique identifier for the daemon.
Name string `json:"name" example:"my-cool-provisioner-daemon"`
@@ -270,7 +271,6 @@ func (c *Client) ServeProvisionerDaemon(ctx context.Context, req ServeProvisione
}
query := serverURL.Query()
query.Add("version", proto.CurrentVersion.String())
- query.Add("id", req.ID.String())
query.Add("name", req.Name)
query.Add("version", proto.CurrentVersion.String())
diff --git a/enterprise/cli/provisionerdaemonstart.go b/enterprise/cli/provisionerdaemonstart.go
index 8d7d319d39c2b..e0b3e00c63ece 100644
--- a/enterprise/cli/provisionerdaemonstart.go
+++ b/enterprise/cli/provisionerdaemonstart.go
@@ -225,7 +225,6 @@ func (r *RootCmd) provisionerDaemonStart() *serpent.Command {
}
srv := provisionerd.New(func(ctx context.Context) (provisionerdproto.DRPCProvisionerDaemonClient, error) {
return client.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: name,
Provisioners: []codersdk.ProvisionerType{
codersdk.ProvisionerTypeTerraform,
diff --git a/enterprise/coderd/coderdenttest/coderdenttest.go b/enterprise/coderd/coderdenttest/coderdenttest.go
index d76722b5bac1a..a72c8c0199695 100644
--- a/enterprise/coderd/coderdenttest/coderdenttest.go
+++ b/enterprise/coderd/coderdenttest/coderdenttest.go
@@ -388,7 +388,6 @@ func newExternalProvisionerDaemon(t testing.TB, client *codersdk.Client, org uui
daemon := provisionerd.New(func(ctx context.Context) (provisionerdproto.DRPCProvisionerDaemonClient, error) {
return client.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: testutil.GetRandomName(t),
Organization: org,
Provisioners: []codersdk.ProvisionerType{provisionerType},
diff --git a/enterprise/coderd/provisionerdaemons.go b/enterprise/coderd/provisionerdaemons.go
index f4335438654b5..5b0f0ca197743 100644
--- a/enterprise/coderd/provisionerdaemons.go
+++ b/enterprise/coderd/provisionerdaemons.go
@@ -175,11 +175,6 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
return
}
- id, _ := uuid.Parse(r.URL.Query().Get("id"))
- if id == uuid.Nil {
- id = uuid.New()
- }
-
provisionersMap := map[codersdk.ProvisionerType]struct{}{}
for _, provisioner := range r.URL.Query()["provisioner"] {
switch provisioner {
@@ -295,7 +290,7 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
api.AGPL.WebsocketWaitMutex.Unlock()
defer api.AGPL.WebsocketWaitGroup.Done()
- tep := telemetry.ConvertExternalProvisioner(id, tags, provisioners)
+ tep := telemetry.ConvertExternalProvisioner(daemon.ID, tags, provisioners)
api.Telemetry.Report(&telemetry.Snapshot{ExternalProvisioners: []telemetry.ExternalProvisioner{tep}})
defer func() {
tep.ShutdownAt = ptr.Ref(time.Now())
diff --git a/enterprise/coderd/provisionerdaemons_test.go b/enterprise/coderd/provisionerdaemons_test.go
index 0cd812b45c5f1..a84213f71805f 100644
--- a/enterprise/coderd/provisionerdaemons_test.go
+++ b/enterprise/coderd/provisionerdaemons_test.go
@@ -50,7 +50,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
defer cancel()
daemonName := testutil.MustRandString(t, 63)
srv, err := templateAdminClient.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: daemonName,
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -180,7 +179,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
defer cancel()
daemonName := testutil.MustRandString(t, 63)
_, err := templateAdminClient.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: daemonName,
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -205,7 +203,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
_, err := another.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: testutil.MustRandString(t, 63),
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -229,7 +226,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
_, err := another.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: testutil.MustRandString(t, 63),
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -360,7 +356,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
req := codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: testutil.MustRandString(t, 63),
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -425,7 +420,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
another := codersdk.New(client.URL)
pd := provisionerd.New(func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
return another.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: testutil.MustRandString(t, 63),
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -503,7 +497,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
_, err := another.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: testutil.MustRandString(t, 32),
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -538,7 +531,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
defer cancel()
another := codersdk.New(client.URL)
_, err := another.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: testutil.MustRandString(t, 63),
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -571,7 +563,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
defer cancel()
another := codersdk.New(client.URL)
_, err := another.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: testutil.MustRandString(t, 63),
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -698,7 +689,6 @@ func TestProvisionerDaemonServe(t *testing.T) {
another := codersdk.New(client.URL)
srv, err := another.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: testutil.MustRandString(t, 63),
Organization: user.OrganizationID,
Provisioners: []codersdk.ProvisionerType{
@@ -758,7 +748,6 @@ func TestGetProvisionerDaemons(t *testing.T) {
defer cancel()
daemonName := testutil.MustRandString(t, 63)
srv, err := orgAdmin.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
- ID: uuid.New(),
Name: daemonName,
Organization: org.ID,
Provisioners: []codersdk.ProvisionerType{
From 6dd51f92fbd6132ea4dc1d9c541c322cf2d4effc Mon Sep 17 00:00:00 2001
From: Danielle Maywood
Date: Thu, 27 Feb 2025 10:43:51 +0100
Subject: [PATCH 18/44] chore: test metricscache on postgres (#16711)
metricscache_test has been running tests against dbmem only, instead of
against postgres. Unfortunately the implementations of
GetTemplateAverageBuildTime have diverged between dbmem and postgres.
This change gets the tests working on Postgres and test for the
behaviour postgres provides.
---
coderd/coderd.go | 1 +
coderd/database/dbmem/dbmem.go | 36 +++---
coderd/database/queries.sql.go | 12 +-
coderd/database/queries/workspaces.sql | 12 +-
coderd/metricscache/metricscache.go | 13 +-
coderd/metricscache/metricscache_test.go | 148 +++++++++++++----------
6 files changed, 126 insertions(+), 96 deletions(-)
diff --git a/coderd/coderd.go b/coderd/coderd.go
index 1cb4c0592b66e..d4c948e346265 100644
--- a/coderd/coderd.go
+++ b/coderd/coderd.go
@@ -422,6 +422,7 @@ func New(options *Options) *API {
metricsCache := metricscache.New(
options.Database,
options.Logger.Named("metrics_cache"),
+ options.Clock,
metricscache.Intervals{
TemplateBuildTimes: options.MetricsCacheRefreshInterval,
DeploymentStats: options.AgentStatsRefreshInterval,
diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go
index 23913a55bf0c8..6fbafa562d087 100644
--- a/coderd/database/dbmem/dbmem.go
+++ b/coderd/database/dbmem/dbmem.go
@@ -269,7 +269,7 @@ type data struct {
presetParameters []database.TemplateVersionPresetParameter
}
-func tryPercentile(fs []float64, p float64) float64 {
+func tryPercentileCont(fs []float64, p float64) float64 {
if len(fs) == 0 {
return -1
}
@@ -282,6 +282,14 @@ func tryPercentile(fs []float64, p float64) float64 {
return fs[lower] + (fs[upper]-fs[lower])*(pos-float64(lower))
}
+func tryPercentileDisc(fs []float64, p float64) float64 {
+ if len(fs) == 0 {
+ return -1
+ }
+ sort.Float64s(fs)
+ return fs[max(int(math.Ceil(float64(len(fs))*p/100-1)), 0)]
+}
+
func validateDatabaseTypeWithValid(v reflect.Value) (handled bool, err error) {
if v.Kind() == reflect.Struct {
return false, nil
@@ -2790,8 +2798,8 @@ func (q *FakeQuerier) GetDeploymentWorkspaceAgentStats(_ context.Context, create
latencies = append(latencies, agentStat.ConnectionMedianLatencyMS)
}
- stat.WorkspaceConnectionLatency50 = tryPercentile(latencies, 50)
- stat.WorkspaceConnectionLatency95 = tryPercentile(latencies, 95)
+ stat.WorkspaceConnectionLatency50 = tryPercentileCont(latencies, 50)
+ stat.WorkspaceConnectionLatency95 = tryPercentileCont(latencies, 95)
return stat, nil
}
@@ -2839,8 +2847,8 @@ func (q *FakeQuerier) GetDeploymentWorkspaceAgentUsageStats(_ context.Context, c
stat.WorkspaceTxBytes += agentStat.TxBytes
latencies = append(latencies, agentStat.ConnectionMedianLatencyMS)
}
- stat.WorkspaceConnectionLatency50 = tryPercentile(latencies, 50)
- stat.WorkspaceConnectionLatency95 = tryPercentile(latencies, 95)
+ stat.WorkspaceConnectionLatency50 = tryPercentileCont(latencies, 50)
+ stat.WorkspaceConnectionLatency95 = tryPercentileCont(latencies, 95)
for _, agentStat := range sessions {
stat.SessionCountVSCode += agentStat.SessionCountVSCode
@@ -4987,9 +4995,9 @@ func (q *FakeQuerier) GetTemplateAverageBuildTime(ctx context.Context, arg datab
}
var row database.GetTemplateAverageBuildTimeRow
- row.Delete50, row.Delete95 = tryPercentile(deleteTimes, 50), tryPercentile(deleteTimes, 95)
- row.Stop50, row.Stop95 = tryPercentile(stopTimes, 50), tryPercentile(stopTimes, 95)
- row.Start50, row.Start95 = tryPercentile(startTimes, 50), tryPercentile(startTimes, 95)
+ row.Delete50, row.Delete95 = tryPercentileDisc(deleteTimes, 50), tryPercentileDisc(deleteTimes, 95)
+ row.Stop50, row.Stop95 = tryPercentileDisc(stopTimes, 50), tryPercentileDisc(stopTimes, 95)
+ row.Start50, row.Start95 = tryPercentileDisc(startTimes, 50), tryPercentileDisc(startTimes, 95)
return row, nil
}
@@ -6024,8 +6032,8 @@ func (q *FakeQuerier) GetUserLatencyInsights(_ context.Context, arg database.Get
Username: user.Username,
AvatarURL: user.AvatarURL,
TemplateIDs: seenTemplatesByUserID[userID],
- WorkspaceConnectionLatency50: tryPercentile(latencies, 50),
- WorkspaceConnectionLatency95: tryPercentile(latencies, 95),
+ WorkspaceConnectionLatency50: tryPercentileCont(latencies, 50),
+ WorkspaceConnectionLatency95: tryPercentileCont(latencies, 95),
}
rows = append(rows, row)
}
@@ -6669,8 +6677,8 @@ func (q *FakeQuerier) GetWorkspaceAgentStats(_ context.Context, createdAfter tim
if !ok {
continue
}
- stat.WorkspaceConnectionLatency50 = tryPercentile(latencies, 50)
- stat.WorkspaceConnectionLatency95 = tryPercentile(latencies, 95)
+ stat.WorkspaceConnectionLatency50 = tryPercentileCont(latencies, 50)
+ stat.WorkspaceConnectionLatency95 = tryPercentileCont(latencies, 95)
statByAgent[stat.AgentID] = stat
}
@@ -6807,8 +6815,8 @@ func (q *FakeQuerier) GetWorkspaceAgentUsageStats(_ context.Context, createdAt t
for key, latencies := range latestAgentLatencies {
val, ok := latestAgentStats[key]
if ok {
- val.WorkspaceConnectionLatency50 = tryPercentile(latencies, 50)
- val.WorkspaceConnectionLatency95 = tryPercentile(latencies, 95)
+ val.WorkspaceConnectionLatency50 = tryPercentileCont(latencies, 50)
+ val.WorkspaceConnectionLatency95 = tryPercentileCont(latencies, 95)
}
latestAgentStats[key] = val
}
diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go
index 9c9ead1b6746e..779bbf4b47ee9 100644
--- a/coderd/database/queries.sql.go
+++ b/coderd/database/queries.sql.go
@@ -16253,13 +16253,11 @@ func (q *sqlQuerier) GetWorkspaceByWorkspaceAppID(ctx context.Context, workspace
}
const getWorkspaceUniqueOwnerCountByTemplateIDs = `-- name: GetWorkspaceUniqueOwnerCountByTemplateIDs :many
-SELECT
- template_id, COUNT(DISTINCT owner_id) AS unique_owners_sum
-FROM
- workspaces
-WHERE
- template_id = ANY($1 :: uuid[]) AND deleted = false
-GROUP BY template_id
+SELECT templates.id AS template_id, COUNT(DISTINCT workspaces.owner_id) AS unique_owners_sum
+FROM templates
+LEFT JOIN workspaces ON workspaces.template_id = templates.id AND workspaces.deleted = false
+WHERE templates.id = ANY($1 :: uuid[])
+GROUP BY templates.id
`
type GetWorkspaceUniqueOwnerCountByTemplateIDsRow struct {
diff --git a/coderd/database/queries/workspaces.sql b/coderd/database/queries/workspaces.sql
index cb0d11e8a8960..4ec74c066fe41 100644
--- a/coderd/database/queries/workspaces.sql
+++ b/coderd/database/queries/workspaces.sql
@@ -415,13 +415,11 @@ WHERE
ORDER BY created_at DESC;
-- name: GetWorkspaceUniqueOwnerCountByTemplateIDs :many
-SELECT
- template_id, COUNT(DISTINCT owner_id) AS unique_owners_sum
-FROM
- workspaces
-WHERE
- template_id = ANY(@template_ids :: uuid[]) AND deleted = false
-GROUP BY template_id;
+SELECT templates.id AS template_id, COUNT(DISTINCT workspaces.owner_id) AS unique_owners_sum
+FROM templates
+LEFT JOIN workspaces ON workspaces.template_id = templates.id AND workspaces.deleted = false
+WHERE templates.id = ANY(@template_ids :: uuid[])
+GROUP BY templates.id;
-- name: InsertWorkspace :one
INSERT INTO
diff --git a/coderd/metricscache/metricscache.go b/coderd/metricscache/metricscache.go
index 3452ef2cce10f..9a18400c8d54b 100644
--- a/coderd/metricscache/metricscache.go
+++ b/coderd/metricscache/metricscache.go
@@ -15,6 +15,7 @@ import (
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/codersdk"
+ "github.com/coder/quartz"
"github.com/coder/retry"
)
@@ -26,6 +27,7 @@ import (
type Cache struct {
database database.Store
log slog.Logger
+ clock quartz.Clock
intervals Intervals
templateWorkspaceOwners atomic.Pointer[map[uuid.UUID]int]
@@ -45,7 +47,7 @@ type Intervals struct {
DeploymentStats time.Duration
}
-func New(db database.Store, log slog.Logger, intervals Intervals, usage bool) *Cache {
+func New(db database.Store, log slog.Logger, clock quartz.Clock, intervals Intervals, usage bool) *Cache {
if intervals.TemplateBuildTimes <= 0 {
intervals.TemplateBuildTimes = time.Hour
}
@@ -55,6 +57,7 @@ func New(db database.Store, log slog.Logger, intervals Intervals, usage bool) *C
ctx, cancel := context.WithCancel(context.Background())
c := &Cache{
+ clock: clock,
database: db,
intervals: intervals,
log: log,
@@ -104,7 +107,7 @@ func (c *Cache) refreshTemplateBuildTimes(ctx context.Context) error {
Valid: true,
},
StartTime: sql.NullTime{
- Time: dbtime.Time(time.Now().AddDate(0, 0, -30)),
+ Time: dbtime.Time(c.clock.Now().AddDate(0, 0, -30)),
Valid: true,
},
})
@@ -131,7 +134,7 @@ func (c *Cache) refreshTemplateBuildTimes(ctx context.Context) error {
func (c *Cache) refreshDeploymentStats(ctx context.Context) error {
var (
- from = dbtime.Now().Add(-15 * time.Minute)
+ from = c.clock.Now().Add(-15 * time.Minute)
agentStats database.GetDeploymentWorkspaceAgentStatsRow
err error
)
@@ -155,8 +158,8 @@ func (c *Cache) refreshDeploymentStats(ctx context.Context) error {
}
c.deploymentStatsResponse.Store(&codersdk.DeploymentStats{
AggregatedFrom: from,
- CollectedAt: dbtime.Now(),
- NextUpdateAt: dbtime.Now().Add(c.intervals.DeploymentStats),
+ CollectedAt: dbtime.Time(c.clock.Now()),
+ NextUpdateAt: dbtime.Time(c.clock.Now().Add(c.intervals.DeploymentStats)),
Workspaces: codersdk.WorkspaceDeploymentStats{
Pending: workspaceStats.PendingWorkspaces,
Building: workspaceStats.BuildingWorkspaces,
diff --git a/coderd/metricscache/metricscache_test.go b/coderd/metricscache/metricscache_test.go
index 24b22d012c1be..b825bc6454522 100644
--- a/coderd/metricscache/metricscache_test.go
+++ b/coderd/metricscache/metricscache_test.go
@@ -4,42 +4,68 @@ import (
"context"
"database/sql"
"encoding/json"
+ "sync/atomic"
"testing"
"time"
"github.com/google/uuid"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
+ "cdr.dev/slog"
"github.com/coder/coder/v2/coderd/database"
+ "github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbgen"
- "github.com/coder/coder/v2/coderd/database/dbmem"
- "github.com/coder/coder/v2/coderd/database/dbtime"
+ "github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/metricscache"
+ "github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/testutil"
+ "github.com/coder/quartz"
)
func date(year, month, day int) time.Time {
return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
}
+func newMetricsCache(t *testing.T, log slog.Logger, clock quartz.Clock, intervals metricscache.Intervals, usage bool) (*metricscache.Cache, database.Store) {
+ t.Helper()
+
+ accessControlStore := &atomic.Pointer[dbauthz.AccessControlStore]{}
+ var acs dbauthz.AccessControlStore = dbauthz.AGPLTemplateAccessControlStore{}
+ accessControlStore.Store(&acs)
+
+ var (
+ auth = rbac.NewStrictCachingAuthorizer(prometheus.NewRegistry())
+ db, _ = dbtestutil.NewDB(t)
+ dbauth = dbauthz.New(db, auth, log, accessControlStore)
+ cache = metricscache.New(dbauth, log, clock, intervals, usage)
+ )
+
+ t.Cleanup(func() { cache.Close() })
+
+ return cache, db
+}
+
func TestCache_TemplateWorkspaceOwners(t *testing.T) {
t.Parallel()
var ()
var (
- db = dbmem.New()
- cache = metricscache.New(db, testutil.Logger(t), metricscache.Intervals{
+ log = testutil.Logger(t)
+ clock = quartz.NewReal()
+ cache, db = newMetricsCache(t, log, clock, metricscache.Intervals{
TemplateBuildTimes: testutil.IntervalFast,
}, false)
)
- defer cache.Close()
-
+ org := dbgen.Organization(t, db, database.Organization{})
user1 := dbgen.User(t, db, database.User{})
user2 := dbgen.User(t, db, database.User{})
template := dbgen.Template(t, db, database.Template{
- Provisioner: database.ProvisionerTypeEcho,
+ OrganizationID: org.ID,
+ Provisioner: database.ProvisionerTypeEcho,
+ CreatedBy: user1.ID,
})
require.Eventuallyf(t, func() bool {
count, ok := cache.TemplateWorkspaceOwners(template.ID)
@@ -49,8 +75,9 @@ func TestCache_TemplateWorkspaceOwners(t *testing.T) {
)
dbgen.Workspace(t, db, database.WorkspaceTable{
- TemplateID: template.ID,
- OwnerID: user1.ID,
+ OrganizationID: org.ID,
+ TemplateID: template.ID,
+ OwnerID: user1.ID,
})
require.Eventuallyf(t, func() bool {
@@ -61,8 +88,9 @@ func TestCache_TemplateWorkspaceOwners(t *testing.T) {
)
workspace2 := dbgen.Workspace(t, db, database.WorkspaceTable{
- TemplateID: template.ID,
- OwnerID: user2.ID,
+ OrganizationID: org.ID,
+ TemplateID: template.ID,
+ OwnerID: user2.ID,
})
require.Eventuallyf(t, func() bool {
@@ -74,8 +102,9 @@ func TestCache_TemplateWorkspaceOwners(t *testing.T) {
// 3rd workspace should not be counted since we have the same owner as workspace2.
dbgen.Workspace(t, db, database.WorkspaceTable{
- TemplateID: template.ID,
- OwnerID: user1.ID,
+ OrganizationID: org.ID,
+ TemplateID: template.ID,
+ OwnerID: user1.ID,
})
db.UpdateWorkspaceDeletedByID(context.Background(), database.UpdateWorkspaceDeletedByIDParams{
@@ -149,7 +178,7 @@ func TestCache_BuildTime(t *testing.T) {
},
},
transition: database.WorkspaceTransitionStop,
- }, want{30 * 1000, true},
+ }, want{10 * 1000, true},
},
{
"three/delete", args{
@@ -176,67 +205,57 @@ func TestCache_BuildTime(t *testing.T) {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
- ctx := context.Background()
var (
- db = dbmem.New()
- cache = metricscache.New(db, testutil.Logger(t), metricscache.Intervals{
+ log = testutil.Logger(t)
+ clock = quartz.NewMock(t)
+ cache, db = newMetricsCache(t, log, clock, metricscache.Intervals{
TemplateBuildTimes: testutil.IntervalFast,
}, false)
)
- defer cache.Close()
+ clock.Set(someDay)
+
+ org := dbgen.Organization(t, db, database.Organization{})
+ user := dbgen.User(t, db, database.User{})
- id := uuid.New()
- err := db.InsertTemplate(ctx, database.InsertTemplateParams{
- ID: id,
- Provisioner: database.ProvisionerTypeEcho,
- MaxPortSharingLevel: database.AppSharingLevelOwner,
+ template := dbgen.Template(t, db, database.Template{
+ CreatedBy: user.ID,
+ OrganizationID: org.ID,
})
- require.NoError(t, err)
- template, err := db.GetTemplateByID(ctx, id)
- require.NoError(t, err)
-
- templateVersionID := uuid.New()
- err = db.InsertTemplateVersion(ctx, database.InsertTemplateVersionParams{
- ID: templateVersionID,
- TemplateID: uuid.NullUUID{UUID: template.ID, Valid: true},
+
+ templateVersion := dbgen.TemplateVersion(t, db, database.TemplateVersion{
+ OrganizationID: org.ID,
+ CreatedBy: user.ID,
+ TemplateID: uuid.NullUUID{UUID: template.ID, Valid: true},
+ })
+
+ workspace := dbgen.Workspace(t, db, database.WorkspaceTable{
+ OrganizationID: org.ID,
+ OwnerID: user.ID,
+ TemplateID: template.ID,
})
- require.NoError(t, err)
gotStats := cache.TemplateBuildTimeStats(template.ID)
requireBuildTimeStatsEmpty(t, gotStats)
- for _, row := range tt.args.rows {
- _, err := db.InsertProvisionerJob(ctx, database.InsertProvisionerJobParams{
- ID: uuid.New(),
- Provisioner: database.ProvisionerTypeEcho,
- StorageMethod: database.ProvisionerStorageMethodFile,
- Type: database.ProvisionerJobTypeWorkspaceBuild,
- })
- require.NoError(t, err)
-
- job, err := db.AcquireProvisionerJob(ctx, database.AcquireProvisionerJobParams{
- StartedAt: sql.NullTime{Time: row.startedAt, Valid: true},
- Types: []database.ProvisionerType{
- database.ProvisionerTypeEcho,
- },
+ for buildNumber, row := range tt.args.rows {
+ job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
+ OrganizationID: org.ID,
+ InitiatorID: user.ID,
+ Type: database.ProvisionerJobTypeWorkspaceBuild,
+ StartedAt: sql.NullTime{Time: row.startedAt, Valid: true},
+ CompletedAt: sql.NullTime{Time: row.completedAt, Valid: true},
})
- require.NoError(t, err)
- err = db.InsertWorkspaceBuild(ctx, database.InsertWorkspaceBuildParams{
- TemplateVersionID: templateVersionID,
+ dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
+ BuildNumber: int32(1 + buildNumber),
+ WorkspaceID: workspace.ID,
+ InitiatorID: user.ID,
+ TemplateVersionID: templateVersion.ID,
JobID: job.ID,
Transition: tt.args.transition,
- Reason: database.BuildReasonInitiator,
})
- require.NoError(t, err)
-
- err = db.UpdateProvisionerJobWithCompleteByID(ctx, database.UpdateProvisionerJobWithCompleteByIDParams{
- ID: job.ID,
- CompletedAt: sql.NullTime{Time: row.completedAt, Valid: true},
- })
- require.NoError(t, err)
}
if tt.want.loads {
@@ -274,15 +293,18 @@ func TestCache_BuildTime(t *testing.T) {
func TestCache_DeploymentStats(t *testing.T) {
t.Parallel()
- db := dbmem.New()
- cache := metricscache.New(db, testutil.Logger(t), metricscache.Intervals{
- DeploymentStats: testutil.IntervalFast,
- }, false)
- defer cache.Close()
+
+ var (
+ log = testutil.Logger(t)
+ clock = quartz.NewMock(t)
+ cache, db = newMetricsCache(t, log, clock, metricscache.Intervals{
+ DeploymentStats: testutil.IntervalFast,
+ }, false)
+ )
err := db.InsertWorkspaceAgentStats(context.Background(), database.InsertWorkspaceAgentStatsParams{
ID: []uuid.UUID{uuid.New()},
- CreatedAt: []time.Time{dbtime.Now()},
+ CreatedAt: []time.Time{clock.Now()},
WorkspaceID: []uuid.UUID{uuid.New()},
UserID: []uuid.UUID{uuid.New()},
TemplateID: []uuid.UUID{uuid.New()},
From 4ba5a8a2ba8ec5a03c7b2360797806aeb3158bff Mon Sep 17 00:00:00 2001
From: Mathias Fredriksson
Date: Thu, 27 Feb 2025 12:45:45 +0200
Subject: [PATCH 19/44] feat(agent): add connection reporting for SSH and
reconnecting PTY (#16652)
Updates #15139
---
agent/agent.go | 158 +++++++++++++++++++++++++++++++
agent/agent_test.go | 87 +++++++++++++++--
agent/agentssh/agentssh.go | 87 +++++++++++++++--
agent/agentssh/jetbrainstrack.go | 11 ++-
agent/agenttest/client.go | 30 ++++--
agent/reconnectingpty/server.go | 26 ++++-
cli/agent.go | 15 +++
7 files changed, 382 insertions(+), 32 deletions(-)
diff --git a/agent/agent.go b/agent/agent.go
index 285636cd31344..504fff2386826 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -8,6 +8,7 @@ import (
"fmt"
"hash/fnv"
"io"
+ "net"
"net/http"
"net/netip"
"os"
@@ -28,6 +29,7 @@ import (
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
+ "google.golang.org/protobuf/types/known/timestamppb"
"tailscale.com/net/speedtest"
"tailscale.com/tailcfg"
"tailscale.com/types/netlogtype"
@@ -90,6 +92,7 @@ type Options struct {
ContainerLister agentcontainers.Lister
ExperimentalContainersEnabled bool
+ ExperimentalConnectionReports bool
}
type Client interface {
@@ -177,6 +180,7 @@ func New(options Options) Agent {
lifecycleUpdate: make(chan struct{}, 1),
lifecycleReported: make(chan codersdk.WorkspaceAgentLifecycle, 1),
lifecycleStates: []agentsdk.PostLifecycleRequest{{State: codersdk.WorkspaceAgentLifecycleCreated}},
+ reportConnectionsUpdate: make(chan struct{}, 1),
ignorePorts: options.IgnorePorts,
portCacheDuration: options.PortCacheDuration,
reportMetadataInterval: options.ReportMetadataInterval,
@@ -192,6 +196,7 @@ func New(options Options) Agent {
lister: options.ContainerLister,
experimentalDevcontainersEnabled: options.ExperimentalContainersEnabled,
+ experimentalConnectionReports: options.ExperimentalConnectionReports,
}
// Initially, we have a closed channel, reflecting the fact that we are not initially connected.
// Each time we connect we replace the channel (while holding the closeMutex) with a new one
@@ -252,6 +257,10 @@ type agent struct {
lifecycleStates []agentsdk.PostLifecycleRequest
lifecycleLastReportedIndex int // Keeps track of the last lifecycle state we successfully reported.
+ reportConnectionsUpdate chan struct{}
+ reportConnectionsMu sync.Mutex
+ reportConnections []*proto.ReportConnectionRequest
+
network *tailnet.Conn
statsReporter *statsReporter
logSender *agentsdk.LogSender
@@ -264,6 +273,7 @@ type agent struct {
lister agentcontainers.Lister
experimentalDevcontainersEnabled bool
+ experimentalConnectionReports bool
}
func (a *agent) TailnetConn() *tailnet.Conn {
@@ -279,6 +289,24 @@ func (a *agent) init() {
UpdateEnv: a.updateCommandEnv,
WorkingDirectory: func() string { return a.manifest.Load().Directory },
BlockFileTransfer: a.blockFileTransfer,
+ ReportConnection: func(id uuid.UUID, magicType agentssh.MagicSessionType, ip string) func(code int, reason string) {
+ var connectionType proto.Connection_Type
+ switch magicType {
+ case agentssh.MagicSessionTypeSSH:
+ connectionType = proto.Connection_SSH
+ case agentssh.MagicSessionTypeVSCode:
+ connectionType = proto.Connection_VSCODE
+ case agentssh.MagicSessionTypeJetBrains:
+ connectionType = proto.Connection_JETBRAINS
+ case agentssh.MagicSessionTypeUnknown:
+ connectionType = proto.Connection_TYPE_UNSPECIFIED
+ default:
+ a.logger.Error(a.hardCtx, "unhandled magic session type when reporting connection", slog.F("magic_type", magicType))
+ connectionType = proto.Connection_TYPE_UNSPECIFIED
+ }
+
+ return a.reportConnection(id, connectionType, ip)
+ },
})
if err != nil {
panic(err)
@@ -301,6 +329,9 @@ func (a *agent) init() {
a.reconnectingPTYServer = reconnectingpty.NewServer(
a.logger.Named("reconnecting-pty"),
a.sshServer,
+ func(id uuid.UUID, ip string) func(code int, reason string) {
+ return a.reportConnection(id, proto.Connection_RECONNECTING_PTY, ip)
+ },
a.metrics.connectionsTotal, a.metrics.reconnectingPTYErrors,
a.reconnectingPTYTimeout,
func(s *reconnectingpty.Server) {
@@ -713,6 +744,129 @@ func (a *agent) setLifecycle(state codersdk.WorkspaceAgentLifecycle) {
}
}
+// reportConnectionsLoop reports connections to the agent for auditing.
+func (a *agent) reportConnectionsLoop(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
+ for {
+ select {
+ case <-a.reportConnectionsUpdate:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ for {
+ a.reportConnectionsMu.Lock()
+ if len(a.reportConnections) == 0 {
+ a.reportConnectionsMu.Unlock()
+ break
+ }
+ payload := a.reportConnections[0]
+ // Release lock while we send the payload, this is safe
+ // since we only append to the slice.
+ a.reportConnectionsMu.Unlock()
+
+ logger := a.logger.With(slog.F("payload", payload))
+ logger.Debug(ctx, "reporting connection")
+ _, err := aAPI.ReportConnection(ctx, payload)
+ if err != nil {
+ return xerrors.Errorf("failed to report connection: %w", err)
+ }
+
+ logger.Debug(ctx, "successfully reported connection")
+
+ // Remove the payload we sent.
+ a.reportConnectionsMu.Lock()
+ a.reportConnections[0] = nil // Release the pointer from the underlying array.
+ a.reportConnections = a.reportConnections[1:]
+ a.reportConnectionsMu.Unlock()
+ }
+ }
+}
+
+const (
+ // reportConnectionBufferLimit limits the number of connection reports we
+ // buffer to avoid growing the buffer indefinitely. This should not happen
+ // unless the agent has lost connection to coderd for a long time or if
+ // the agent is being spammed with connections.
+ //
+ // If we assume ~150 byte per connection report, this would be around 300KB
+ // of memory which seems acceptable. We could reduce this if necessary by
+ // not using the proto struct directly.
+ reportConnectionBufferLimit = 2048
+)
+
+func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_Type, ip string) (disconnected func(code int, reason string)) {
+ // If the experiment hasn't been enabled, we don't report connections.
+ if !a.experimentalConnectionReports {
+ return func(int, string) {} // Noop.
+ }
+
+ // Remove the port from the IP because ports are not supported in coderd.
+ if host, _, err := net.SplitHostPort(ip); err != nil {
+ a.logger.Error(a.hardCtx, "split host and port for connection report failed", slog.F("ip", ip), slog.Error(err))
+ } else {
+ // Best effort.
+ ip = host
+ }
+
+ a.reportConnectionsMu.Lock()
+ defer a.reportConnectionsMu.Unlock()
+
+ if len(a.reportConnections) >= reportConnectionBufferLimit {
+ a.logger.Warn(a.hardCtx, "connection report buffer limit reached, dropping connect",
+ slog.F("limit", reportConnectionBufferLimit),
+ slog.F("connection_id", id),
+ slog.F("connection_type", connectionType),
+ slog.F("ip", ip),
+ )
+ } else {
+ a.reportConnections = append(a.reportConnections, &proto.ReportConnectionRequest{
+ Connection: &proto.Connection{
+ Id: id[:],
+ Action: proto.Connection_CONNECT,
+ Type: connectionType,
+ Timestamp: timestamppb.New(time.Now()),
+ Ip: ip,
+ StatusCode: 0,
+ Reason: nil,
+ },
+ })
+ select {
+ case a.reportConnectionsUpdate <- struct{}{}:
+ default:
+ }
+ }
+
+ return func(code int, reason string) {
+ a.reportConnectionsMu.Lock()
+ defer a.reportConnectionsMu.Unlock()
+ if len(a.reportConnections) >= reportConnectionBufferLimit {
+ a.logger.Warn(a.hardCtx, "connection report buffer limit reached, dropping disconnect",
+ slog.F("limit", reportConnectionBufferLimit),
+ slog.F("connection_id", id),
+ slog.F("connection_type", connectionType),
+ slog.F("ip", ip),
+ )
+ return
+ }
+
+ a.reportConnections = append(a.reportConnections, &proto.ReportConnectionRequest{
+ Connection: &proto.Connection{
+ Id: id[:],
+ Action: proto.Connection_DISCONNECT,
+ Type: connectionType,
+ Timestamp: timestamppb.New(time.Now()),
+ Ip: ip,
+ StatusCode: int32(code), //nolint:gosec
+ Reason: &reason,
+ },
+ })
+ select {
+ case a.reportConnectionsUpdate <- struct{}{}:
+ default:
+ }
+ }
+}
+
// fetchServiceBannerLoop fetches the service banner on an interval. It will
// not be fetched immediately; the expectation is that it is primed elsewhere
// (and must be done before the session actually starts).
@@ -823,6 +977,10 @@ func (a *agent) run() (retErr error) {
return resourcesmonitor.Start(ctx)
})
+ // Connection reports are part of auditing, we should keep sending them via
+ // gracefulShutdownBehaviorRemain.
+ connMan.startAgentAPI("report connections", gracefulShutdownBehaviorRemain, a.reportConnectionsLoop)
+
// channels to sync goroutines below
// handle manifest
// |
diff --git a/agent/agent_test.go b/agent/agent_test.go
index 935309e98d873..7ccce20ae776e 100644
--- a/agent/agent_test.go
+++ b/agent/agent_test.go
@@ -163,7 +163,9 @@ func TestAgent_Stats_Magic(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
//nolint:dogsled
- conn, _, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
+ conn, agentClient, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
+ o.ExperimentalConnectionReports = true
+ })
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
defer sshClient.Close()
@@ -193,6 +195,8 @@ func TestAgent_Stats_Magic(t *testing.T) {
_ = stdin.Close()
err = session.Wait()
require.NoError(t, err)
+
+ assertConnectionReport(t, agentClient, proto.Connection_VSCODE, 0, "")
})
t.Run("TracksJetBrains", func(t *testing.T) {
@@ -229,7 +233,9 @@ func TestAgent_Stats_Magic(t *testing.T) {
remotePort := sc.Text()
//nolint:dogsled
- conn, _, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
+ conn, agentClient, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
+ o.ExperimentalConnectionReports = true
+ })
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
@@ -265,6 +271,8 @@ func TestAgent_Stats_Magic(t *testing.T) {
}, testutil.WaitLong, testutil.IntervalFast,
"never saw stats after conn closes",
)
+
+ assertConnectionReport(t, agentClient, proto.Connection_JETBRAINS, 0, "")
})
}
@@ -922,7 +930,9 @@ func TestAgent_SFTP(t *testing.T) {
home = "/" + strings.ReplaceAll(home, "\\", "/")
}
//nolint:dogsled
- conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
+ conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
+ o.ExperimentalConnectionReports = true
+ })
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
defer sshClient.Close()
@@ -945,6 +955,10 @@ func TestAgent_SFTP(t *testing.T) {
require.NoError(t, err)
_, err = os.Stat(tempFile)
require.NoError(t, err)
+
+ // Close the client to trigger disconnect event.
+ _ = client.Close()
+ assertConnectionReport(t, agentClient, proto.Connection_SSH, 0, "")
}
func TestAgent_SCP(t *testing.T) {
@@ -954,7 +968,9 @@ func TestAgent_SCP(t *testing.T) {
defer cancel()
//nolint:dogsled
- conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
+ conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
+ o.ExperimentalConnectionReports = true
+ })
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
defer sshClient.Close()
@@ -967,6 +983,10 @@ func TestAgent_SCP(t *testing.T) {
require.NoError(t, err)
_, err = os.Stat(tempFile)
require.NoError(t, err)
+
+ // Close the client to trigger disconnect event.
+ scpClient.Close()
+ assertConnectionReport(t, agentClient, proto.Connection_SSH, 0, "")
}
func TestAgent_FileTransferBlocked(t *testing.T) {
@@ -991,8 +1011,9 @@ func TestAgent_FileTransferBlocked(t *testing.T) {
defer cancel()
//nolint:dogsled
- conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
+ conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
o.BlockFileTransfer = true
+ o.ExperimentalConnectionReports = true
})
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
@@ -1000,6 +1021,8 @@ func TestAgent_FileTransferBlocked(t *testing.T) {
_, err = sftp.NewClient(sshClient)
require.Error(t, err)
assertFileTransferBlocked(t, err.Error())
+
+ assertConnectionReport(t, agentClient, proto.Connection_SSH, agentssh.BlockedFileTransferErrorCode, "")
})
t.Run("SCP with go-scp package", func(t *testing.T) {
@@ -1009,8 +1032,9 @@ func TestAgent_FileTransferBlocked(t *testing.T) {
defer cancel()
//nolint:dogsled
- conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
+ conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
o.BlockFileTransfer = true
+ o.ExperimentalConnectionReports = true
})
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
@@ -1022,6 +1046,8 @@ func TestAgent_FileTransferBlocked(t *testing.T) {
err = scpClient.CopyFile(context.Background(), strings.NewReader("hello world"), tempFile, "0755")
require.Error(t, err)
assertFileTransferBlocked(t, err.Error())
+
+ assertConnectionReport(t, agentClient, proto.Connection_SSH, agentssh.BlockedFileTransferErrorCode, "")
})
t.Run("Forbidden commands", func(t *testing.T) {
@@ -1035,8 +1061,9 @@ func TestAgent_FileTransferBlocked(t *testing.T) {
defer cancel()
//nolint:dogsled
- conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
+ conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
o.BlockFileTransfer = true
+ o.ExperimentalConnectionReports = true
})
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
@@ -1057,6 +1084,8 @@ func TestAgent_FileTransferBlocked(t *testing.T) {
msg, err := io.ReadAll(stdout)
require.NoError(t, err)
assertFileTransferBlocked(t, string(msg))
+
+ assertConnectionReport(t, agentClient, proto.Connection_SSH, agentssh.BlockedFileTransferErrorCode, "")
})
}
})
@@ -1665,8 +1694,18 @@ func TestAgent_ReconnectingPTY(t *testing.T) {
defer cancel()
//nolint:dogsled
- conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
+ conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
+ o.ExperimentalConnectionReports = true
+ })
id := uuid.New()
+
+ // Test that the connection is reported. This must be tested in the
+ // first connection because we care about verifying all of these.
+ netConn0, err := conn.ReconnectingPTY(ctx, id, 80, 80, "bash --norc")
+ require.NoError(t, err)
+ _ = netConn0.Close()
+ assertConnectionReport(t, agentClient, proto.Connection_RECONNECTING_PTY, 0, "")
+
// --norc disables executing .bashrc, which is often used to customize the bash prompt
netConn1, err := conn.ReconnectingPTY(ctx, id, 80, 80, "bash --norc")
require.NoError(t, err)
@@ -2763,3 +2802,35 @@ func requireEcho(t *testing.T, conn net.Conn) {
require.NoError(t, err)
require.Equal(t, "test", string(b))
}
+
+func assertConnectionReport(t testing.TB, agentClient *agenttest.Client, connectionType proto.Connection_Type, status int, reason string) {
+ t.Helper()
+
+ var reports []*proto.ReportConnectionRequest
+ if !assert.Eventually(t, func() bool {
+ reports = agentClient.GetConnectionReports()
+ return len(reports) >= 2
+ }, testutil.WaitMedium, testutil.IntervalFast, "waiting for 2 connection reports or more; got %d", len(reports)) {
+ return
+ }
+
+ assert.Len(t, reports, 2, "want 2 connection reports")
+
+ assert.Equal(t, proto.Connection_CONNECT, reports[0].GetConnection().GetAction(), "first report should be connect")
+ assert.Equal(t, proto.Connection_DISCONNECT, reports[1].GetConnection().GetAction(), "second report should be disconnect")
+ assert.Equal(t, connectionType, reports[0].GetConnection().GetType(), "connect type should be %s", connectionType)
+ assert.Equal(t, connectionType, reports[1].GetConnection().GetType(), "disconnect type should be %s", connectionType)
+ t1 := reports[0].GetConnection().GetTimestamp().AsTime()
+ t2 := reports[1].GetConnection().GetTimestamp().AsTime()
+ assert.True(t, t1.Before(t2) || t1.Equal(t2), "connect timestamp should be before or equal to disconnect timestamp")
+ assert.NotEmpty(t, reports[0].GetConnection().GetIp(), "connect ip should not be empty")
+ assert.NotEmpty(t, reports[1].GetConnection().GetIp(), "disconnect ip should not be empty")
+ assert.Equal(t, 0, int(reports[0].GetConnection().GetStatusCode()), "connect status code should be 0")
+ assert.Equal(t, status, int(reports[1].GetConnection().GetStatusCode()), "disconnect status code should be %d", status)
+ assert.Equal(t, "", reports[0].GetConnection().GetReason(), "connect reason should be empty")
+ if reason != "" {
+ assert.Contains(t, reports[1].GetConnection().GetReason(), reason, "disconnect reason should contain %s", reason)
+ } else {
+ t.Logf("connection report disconnect reason: %s", reports[1].GetConnection().GetReason())
+ }
+}
diff --git a/agent/agentssh/agentssh.go b/agent/agentssh/agentssh.go
index 3b09df0e388dd..4a5d3215db911 100644
--- a/agent/agentssh/agentssh.go
+++ b/agent/agentssh/agentssh.go
@@ -78,6 +78,8 @@ const (
// BlockedFileTransferCommands contains a list of restricted file transfer commands.
var BlockedFileTransferCommands = []string{"nc", "rsync", "scp", "sftp"}
+type reportConnectionFunc func(id uuid.UUID, sessionType MagicSessionType, ip string) (disconnected func(code int, reason string))
+
// Config sets configuration parameters for the agent SSH server.
type Config struct {
// MaxTimeout sets the absolute connection timeout, none if empty. If set to
@@ -100,6 +102,8 @@ type Config struct {
X11DisplayOffset *int
// BlockFileTransfer restricts use of file transfer applications.
BlockFileTransfer bool
+ // ReportConnection.
+ ReportConnection reportConnectionFunc
}
type Server struct {
@@ -152,6 +156,9 @@ func NewServer(ctx context.Context, logger slog.Logger, prometheusRegistry *prom
return home
}
}
+ if config.ReportConnection == nil {
+ config.ReportConnection = func(uuid.UUID, MagicSessionType, string) func(int, string) { return func(int, string) {} }
+ }
forwardHandler := &ssh.ForwardedTCPHandler{}
unixForwardHandler := newForwardedUnixHandler(logger)
@@ -174,7 +181,7 @@ func NewServer(ctx context.Context, logger slog.Logger, prometheusRegistry *prom
ChannelHandlers: map[string]ssh.ChannelHandler{
"direct-tcpip": func(srv *ssh.Server, conn *gossh.ServerConn, newChan gossh.NewChannel, ctx ssh.Context) {
// Wrapper is designed to find and track JetBrains Gateway connections.
- wrapped := NewJetbrainsChannelWatcher(ctx, s.logger, newChan, &s.connCountJetBrains)
+ wrapped := NewJetbrainsChannelWatcher(ctx, s.logger, s.config.ReportConnection, newChan, &s.connCountJetBrains)
ssh.DirectTCPIPHandler(srv, conn, wrapped, ctx)
},
"direct-streamlocal@openssh.com": directStreamLocalHandler,
@@ -288,6 +295,35 @@ func extractMagicSessionType(env []string) (magicType MagicSessionType, rawType
})
}
+// sessionCloseTracker is a wrapper around Session that tracks the exit code.
+type sessionCloseTracker struct {
+ ssh.Session
+ exitOnce sync.Once
+ code atomic.Int64
+}
+
+var _ ssh.Session = &sessionCloseTracker{}
+
+func (s *sessionCloseTracker) track(code int) {
+ s.exitOnce.Do(func() {
+ s.code.Store(int64(code))
+ })
+}
+
+func (s *sessionCloseTracker) exitCode() int {
+ return int(s.code.Load())
+}
+
+func (s *sessionCloseTracker) Exit(code int) error {
+ s.track(code)
+ return s.Session.Exit(code)
+}
+
+func (s *sessionCloseTracker) Close() error {
+ s.track(1)
+ return s.Session.Close()
+}
+
func (s *Server) sessionHandler(session ssh.Session) {
ctx := session.Context()
id := uuid.New()
@@ -300,17 +336,23 @@ func (s *Server) sessionHandler(session ssh.Session) {
)
logger.Info(ctx, "handling ssh session")
+ env := session.Environ()
+ magicType, magicTypeRaw, env := extractMagicSessionType(env)
+
if !s.trackSession(session, true) {
+ reason := "unable to accept new session, server is closing"
+ // Report connection attempt even if we couldn't accept it.
+ disconnected := s.config.ReportConnection(id, magicType, session.RemoteAddr().String())
+ defer disconnected(1, reason)
+
+ logger.Info(ctx, reason)
// See (*Server).Close() for why we call Close instead of Exit.
_ = session.Close()
- logger.Info(ctx, "unable to accept new session, server is closing")
return
}
defer s.trackSession(session, false)
- env := session.Environ()
- magicType, magicTypeRaw, env := extractMagicSessionType(env)
-
+ reportSession := true
switch magicType {
case MagicSessionTypeVSCode:
s.connCountVSCode.Add(1)
@@ -318,6 +360,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
case MagicSessionTypeJetBrains:
// Do nothing here because JetBrains launches hundreds of ssh sessions.
// We instead track JetBrains in the single persistent tcp forwarding channel.
+ reportSession = false
case MagicSessionTypeSSH:
s.connCountSSHSession.Add(1)
defer s.connCountSSHSession.Add(-1)
@@ -325,6 +368,20 @@ func (s *Server) sessionHandler(session ssh.Session) {
logger.Warn(ctx, "invalid magic ssh session type specified", slog.F("raw_type", magicTypeRaw))
}
+ closeCause := func(string) {}
+ if reportSession {
+ var reason string
+ closeCause = func(r string) { reason = r }
+
+ scr := &sessionCloseTracker{Session: session}
+ session = scr
+
+ disconnected := s.config.ReportConnection(id, magicType, session.RemoteAddr().String())
+ defer func() {
+ disconnected(scr.exitCode(), reason)
+ }()
+ }
+
if s.fileTransferBlocked(session) {
s.logger.Warn(ctx, "file transfer blocked", slog.F("session_subsystem", session.Subsystem()), slog.F("raw_command", session.RawCommand()))
@@ -333,6 +390,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
errorMessage := fmt.Sprintf("\x02%s\n", BlockedFileTransferErrorMessage)
_, _ = session.Write([]byte(errorMessage))
}
+ closeCause("file transfer blocked")
_ = session.Exit(BlockedFileTransferErrorCode)
return
}
@@ -340,10 +398,14 @@ func (s *Server) sessionHandler(session ssh.Session) {
switch ss := session.Subsystem(); ss {
case "":
case "sftp":
- s.sftpHandler(logger, session)
+ err := s.sftpHandler(logger, session)
+ if err != nil {
+ closeCause(err.Error())
+ }
return
default:
logger.Warn(ctx, "unsupported subsystem", slog.F("subsystem", ss))
+ closeCause(fmt.Sprintf("unsupported subsystem: %s", ss))
_ = session.Exit(1)
return
}
@@ -352,8 +414,9 @@ func (s *Server) sessionHandler(session ssh.Session) {
if hasX11 {
display, handled := s.x11Handler(session.Context(), x11)
if !handled {
- _ = session.Exit(1)
logger.Error(ctx, "x11 handler failed")
+ closeCause("x11 handler failed")
+ _ = session.Exit(1)
return
}
env = append(env, fmt.Sprintf("DISPLAY=localhost:%d.%d", display, x11.ScreenNumber))
@@ -380,6 +443,8 @@ func (s *Server) sessionHandler(session ssh.Session) {
slog.F("exit_code", code),
)
+ closeCause(fmt.Sprintf("process exited with error status: %d", exitError.ExitCode()))
+
// TODO(mafredri): For signal exit, there's also an "exit-signal"
// request (session.Exit sends "exit-status"), however, since it's
// not implemented on the session interface and not used by
@@ -391,6 +456,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
logger.Warn(ctx, "ssh session failed", slog.Error(err))
// This exit code is designed to be unlikely to be confused for a legit exit code
// from the process.
+ closeCause(err.Error())
_ = session.Exit(MagicSessionErrorCode)
return
}
@@ -650,7 +716,7 @@ func handleSignal(logger slog.Logger, ssig ssh.Signal, signaler interface{ Signa
}
}
-func (s *Server) sftpHandler(logger slog.Logger, session ssh.Session) {
+func (s *Server) sftpHandler(logger slog.Logger, session ssh.Session) error {
s.metrics.sftpConnectionsTotal.Add(1)
ctx := session.Context()
@@ -674,7 +740,7 @@ func (s *Server) sftpHandler(logger slog.Logger, session ssh.Session) {
server, err := sftp.NewServer(session, opts...)
if err != nil {
logger.Debug(ctx, "initialize sftp server", slog.Error(err))
- return
+ return xerrors.Errorf("initialize sftp server: %w", err)
}
defer server.Close()
@@ -689,11 +755,12 @@ func (s *Server) sftpHandler(logger slog.Logger, session ssh.Session) {
// code but `scp` on macOS does (when using the default
// SFTP backend).
_ = session.Exit(0)
- return
+ return nil
}
logger.Warn(ctx, "sftp server closed with error", slog.Error(err))
s.metrics.sftpServerErrors.Add(1)
_ = session.Exit(1)
+ return xerrors.Errorf("sftp server closed with error: %w", err)
}
// CreateCommand processes raw command input with OpenSSH-like behavior.
diff --git a/agent/agentssh/jetbrainstrack.go b/agent/agentssh/jetbrainstrack.go
index 534f2899b11ae..9b2fdf83b21d0 100644
--- a/agent/agentssh/jetbrainstrack.go
+++ b/agent/agentssh/jetbrainstrack.go
@@ -6,6 +6,7 @@ import (
"sync"
"github.com/gliderlabs/ssh"
+ "github.com/google/uuid"
"go.uber.org/atomic"
gossh "golang.org/x/crypto/ssh"
@@ -28,9 +29,11 @@ type JetbrainsChannelWatcher struct {
gossh.NewChannel
jetbrainsCounter *atomic.Int64
logger slog.Logger
+ originAddr string
+ reportConnection reportConnectionFunc
}
-func NewJetbrainsChannelWatcher(ctx ssh.Context, logger slog.Logger, newChannel gossh.NewChannel, counter *atomic.Int64) gossh.NewChannel {
+func NewJetbrainsChannelWatcher(ctx ssh.Context, logger slog.Logger, reportConnection reportConnectionFunc, newChannel gossh.NewChannel, counter *atomic.Int64) gossh.NewChannel {
d := localForwardChannelData{}
if err := gossh.Unmarshal(newChannel.ExtraData(), &d); err != nil {
// If the data fails to unmarshal, do nothing.
@@ -61,12 +64,17 @@ func NewJetbrainsChannelWatcher(ctx ssh.Context, logger slog.Logger, newChannel
NewChannel: newChannel,
jetbrainsCounter: counter,
logger: logger.With(slog.F("destination_port", d.DestPort)),
+ originAddr: d.OriginAddr,
+ reportConnection: reportConnection,
}
}
func (w *JetbrainsChannelWatcher) Accept() (gossh.Channel, <-chan *gossh.Request, error) {
+ disconnected := w.reportConnection(uuid.New(), MagicSessionTypeJetBrains, w.originAddr)
+
c, r, err := w.NewChannel.Accept()
if err != nil {
+ disconnected(1, err.Error())
return c, r, err
}
w.jetbrainsCounter.Add(1)
@@ -77,6 +85,7 @@ func (w *JetbrainsChannelWatcher) Accept() (gossh.Channel, <-chan *gossh.Request
Channel: c,
done: func() {
w.jetbrainsCounter.Add(-1)
+ disconnected(0, "")
// nolint: gocritic // JetBrains is a proper noun and should be capitalized
w.logger.Debug(context.Background(), "JetBrains watcher channel closed")
},
diff --git a/agent/agenttest/client.go b/agent/agenttest/client.go
index ed734c6df9f6c..b5fa6ea8c2189 100644
--- a/agent/agenttest/client.go
+++ b/agent/agenttest/client.go
@@ -158,20 +158,24 @@ func (c *Client) SetLogsChannel(ch chan<- *agentproto.BatchCreateLogsRequest) {
c.fakeAgentAPI.SetLogsChannel(ch)
}
+func (c *Client) GetConnectionReports() []*agentproto.ReportConnectionRequest {
+ return c.fakeAgentAPI.GetConnectionReports()
+}
+
type FakeAgentAPI struct {
sync.Mutex
t testing.TB
logger slog.Logger
- manifest *agentproto.Manifest
- startupCh chan *agentproto.Startup
- statsCh chan *agentproto.Stats
- appHealthCh chan *agentproto.BatchUpdateAppHealthRequest
- logsCh chan<- *agentproto.BatchCreateLogsRequest
- lifecycleStates []codersdk.WorkspaceAgentLifecycle
- metadata map[string]agentsdk.Metadata
- timings []*agentproto.Timing
- connections []*agentproto.Connection
+ manifest *agentproto.Manifest
+ startupCh chan *agentproto.Startup
+ statsCh chan *agentproto.Stats
+ appHealthCh chan *agentproto.BatchUpdateAppHealthRequest
+ logsCh chan<- *agentproto.BatchCreateLogsRequest
+ lifecycleStates []codersdk.WorkspaceAgentLifecycle
+ metadata map[string]agentsdk.Metadata
+ timings []*agentproto.Timing
+ connectionReports []*agentproto.ReportConnectionRequest
getAnnouncementBannersFunc func() ([]codersdk.BannerConfig, error)
getResourcesMonitoringConfigurationFunc func() (*agentproto.GetResourcesMonitoringConfigurationResponse, error)
@@ -348,12 +352,18 @@ func (f *FakeAgentAPI) ScriptCompleted(_ context.Context, req *agentproto.Worksp
func (f *FakeAgentAPI) ReportConnection(_ context.Context, req *agentproto.ReportConnectionRequest) (*emptypb.Empty, error) {
f.Lock()
- f.connections = append(f.connections, req.GetConnection())
+ f.connectionReports = append(f.connectionReports, req)
f.Unlock()
return &emptypb.Empty{}, nil
}
+func (f *FakeAgentAPI) GetConnectionReports() []*agentproto.ReportConnectionRequest {
+ f.Lock()
+ defer f.Unlock()
+ return slices.Clone(f.connectionReports)
+}
+
func NewFakeAgentAPI(t testing.TB, logger slog.Logger, manifest *agentproto.Manifest, statsCh chan *agentproto.Stats) *FakeAgentAPI {
return &FakeAgentAPI{
t: t,
diff --git a/agent/reconnectingpty/server.go b/agent/reconnectingpty/server.go
index ab4ce854c789c..7ad7db976c8b0 100644
--- a/agent/reconnectingpty/server.go
+++ b/agent/reconnectingpty/server.go
@@ -20,11 +20,14 @@ import (
"github.com/coder/coder/v2/codersdk/workspacesdk"
)
+type reportConnectionFunc func(id uuid.UUID, ip string) (disconnected func(code int, reason string))
+
type Server struct {
logger slog.Logger
connectionsTotal prometheus.Counter
errorsTotal *prometheus.CounterVec
commandCreator *agentssh.Server
+ reportConnection reportConnectionFunc
connCount atomic.Int64
reconnectingPTYs sync.Map
timeout time.Duration
@@ -33,13 +36,19 @@ type Server struct {
}
// NewServer returns a new ReconnectingPTY server
-func NewServer(logger slog.Logger, commandCreator *agentssh.Server,
+func NewServer(logger slog.Logger, commandCreator *agentssh.Server, reportConnection reportConnectionFunc,
connectionsTotal prometheus.Counter, errorsTotal *prometheus.CounterVec,
timeout time.Duration, opts ...func(*Server),
) *Server {
+ if reportConnection == nil {
+ reportConnection = func(uuid.UUID, string) func(int, string) {
+ return func(int, string) {}
+ }
+ }
s := &Server{
logger: logger,
commandCreator: commandCreator,
+ reportConnection: reportConnection,
connectionsTotal: connectionsTotal,
errorsTotal: errorsTotal,
timeout: timeout,
@@ -67,20 +76,31 @@ func (s *Server) Serve(ctx, hardCtx context.Context, l net.Listener) (retErr err
slog.F("local", conn.LocalAddr().String()))
clog.Info(ctx, "accepted conn")
wg.Add(1)
+ disconnected := s.reportConnection(uuid.New(), conn.RemoteAddr().String())
closed := make(chan struct{})
go func() {
+ defer wg.Done()
select {
case <-closed:
case <-hardCtx.Done():
+ disconnected(1, "server shut down")
_ = conn.Close()
}
- wg.Done()
}()
wg.Add(1)
go func() {
defer close(closed)
defer wg.Done()
- _ = s.handleConn(ctx, clog, conn)
+ err := s.handleConn(ctx, clog, conn)
+ if err != nil {
+ if ctx.Err() != nil {
+ disconnected(1, "server shutting down")
+ } else {
+ disconnected(1, err.Error())
+ }
+ } else {
+ disconnected(0, "")
+ }
}()
}
wg.Wait()
diff --git a/cli/agent.go b/cli/agent.go
index 01d6c36f7a045..638f7083805ab 100644
--- a/cli/agent.go
+++ b/cli/agent.go
@@ -54,6 +54,8 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
agentHeaderCommand string
agentHeader []string
devcontainersEnabled bool
+
+ experimentalConnectionReports bool
)
cmd := &serpent.Command{
Use: "agent",
@@ -325,6 +327,10 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
containerLister = agentcontainers.NewDocker(execer)
}
+ if experimentalConnectionReports {
+ logger.Info(ctx, "experimental connection reports enabled")
+ }
+
agnt := agent.New(agent.Options{
Client: client,
Logger: logger,
@@ -353,6 +359,7 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
ContainerLister: containerLister,
ExperimentalContainersEnabled: devcontainersEnabled,
+ ExperimentalConnectionReports: experimentalConnectionReports,
})
promHandler := agent.PrometheusMetricsHandler(prometheusRegistry, logger)
@@ -482,6 +489,14 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
Description: "Allow the agent to automatically detect running devcontainers.",
Value: serpent.BoolOf(&devcontainersEnabled),
},
+ {
+ Flag: "experimental-connection-reports-enable",
+ Hidden: true,
+ Default: "false",
+ Env: "CODER_AGENT_EXPERIMENTAL_CONNECTION_REPORTS_ENABLE",
+ Description: "Enable experimental connection reports.",
+ Value: serpent.BoolOf(&experimentalConnectionReports),
+ },
}
return cmd
From cccdf1ecac805fd8b83ad2e05b8747968fc2f933 Mon Sep 17 00:00:00 2001
From: Steven Masley
Date: Thu, 27 Feb 2025 05:23:18 -0600
Subject: [PATCH 20/44] feat: implement WorkspaceCreationBan org role (#16686)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Using negative permissions, this role prevents a user's ability to
create & delete a workspace within a given organization.
Workspaces are uniquely owned by an org and a user, so the org has to
supercede the user permission with a negative permission.
# Use case
Organizations must be able to restrict a member's ability to create a
workspace. This permission is implicitly granted (see
https://github.com/coder/coder/issues/16546#issuecomment-2655437860).
To revoke this permission, the solution chosen was to use negative
permissions in a built in role called `WorkspaceCreationBan`.
# Rational
Using negative permissions is new territory, and not ideal. However,
workspaces are in a unique position.
Workspaces have 2 owners. The organization and the user. To prevent
users from creating a workspace in another organization, an [implied
negative
permission](https://github.com/coder/coder/blob/36d9f5ddb3d98029fee07d004709e1e51022e979/coderd/rbac/policy.rego#L172-L192)
is used. So the truth table looks like: _how to read this table
[here](https://github.com/coder/coder/blob/36d9f5ddb3d98029fee07d004709e1e51022e979/coderd/rbac/README.md#roles)_
| Role (example) | Site | Org | User | Result |
|-----------------|------|------|------|--------|
| non-org-member | \_ | N | YN\_ | N |
| user | \_ | \_ | Y | Y |
| WorkspaceBan | \_ | N | Y | Y |
| unauthenticated | \_ | \_ | \_ | N |
This new role, `WorkspaceCreationBan` is the same truth table condition
as if the user was not a member of the organization (when doing a
workspace create/delete). So this behavior **is not entirely new**.
How to do it without a negative permission
The alternate approach would be to remove the implied permission, and
grant it via and organization role. However this would add new behavior
that an organizational role has the ability to grant a user permissions
on their own resources?
It does not make sense for an org role to prevent user from changing
their profile information for example. So the only option is to create a
new truth table column for resources that are owned by both an
organization and a user.
| Role (example) | Site | Org |User+Org| User | Result |
|-----------------|------|------|--------|------|--------|
| non-org-member | \_ | N | \_ | \_ | N |
| user | \_ | \_ | \_ | \_ | N |
| WorkspaceAllow | \_ | \_ | Y | \_ | Y |
| unauthenticated | \_ | \_ | \_ | \_ | N |
Now a user has no opinion on if they can create a workspace, which feels
a little wrong. A user should have the authority over what is theres.
There is fundamental _philosophical_ question of "Who does a workspace
belong to?". The user has some set of autonomy, yet it is the
organization that controls it's existence. A head scratcher :thinking:
## Will we need more negative built in roles?
There are few resources that have shared ownership. Only
`ResourceOrganizationMember` and `ResourceGroupMember`. Since negative
permissions is intended to revoke access to a shared resource, then
**no.** **This is the only one we need**.
Classic resources like `ResourceTemplate` are entirely controlled by the
Organization permissions. And resources entirely in the user control
(like user profile) are only controlled by `User` permissions.
![Uploading Screenshot 2025-02-26 at 22.26.52.png…]()
---------
Co-authored-by: Jaayden Halko
Co-authored-by: ケイラ
---
coderd/httpapi/httpapi.go | 10 +-
coderd/rbac/roles.go | 107 ++++++++++++------
coderd/rbac/roles_test.go | 18 ++-
coderd/workspaces_test.go | 48 ++++++++
coderd/wsbuilder/wsbuilder.go | 9 ++
codersdk/rbacroles.go | 11 +-
enterprise/coderd/roles_test.go | 27 +++--
site/src/api/typesGenerated.ts | 4 +
.../UserTable/EditRolesButton.stories.tsx | 12 ++
.../UserTable/EditRolesButton.tsx | 64 ++++++++++-
site/src/testHelpers/entities.ts | 16 ++-
11 files changed, 261 insertions(+), 65 deletions(-)
diff --git a/coderd/httpapi/httpapi.go b/coderd/httpapi/httpapi.go
index a9687d58a0604..d5895dcbf86f0 100644
--- a/coderd/httpapi/httpapi.go
+++ b/coderd/httpapi/httpapi.go
@@ -151,11 +151,13 @@ func ResourceNotFound(rw http.ResponseWriter) {
Write(context.Background(), rw, http.StatusNotFound, ResourceNotFoundResponse)
}
+var ResourceForbiddenResponse = codersdk.Response{
+ Message: "Forbidden.",
+ Detail: "You don't have permission to view this content. If you believe this is a mistake, please contact your administrator or try signing in with different credentials.",
+}
+
func Forbidden(rw http.ResponseWriter) {
- Write(context.Background(), rw, http.StatusForbidden, codersdk.Response{
- Message: "Forbidden.",
- Detail: "You don't have permission to view this content. If you believe this is a mistake, please contact your administrator or try signing in with different credentials.",
- })
+ Write(context.Background(), rw, http.StatusForbidden, ResourceForbiddenResponse)
}
func InternalServerError(rw http.ResponseWriter, err error) {
diff --git a/coderd/rbac/roles.go b/coderd/rbac/roles.go
index 7c733016430fe..440494450e2d1 100644
--- a/coderd/rbac/roles.go
+++ b/coderd/rbac/roles.go
@@ -27,11 +27,12 @@ const (
customSiteRole string = "custom-site-role"
customOrganizationRole string = "custom-organization-role"
- orgAdmin string = "organization-admin"
- orgMember string = "organization-member"
- orgAuditor string = "organization-auditor"
- orgUserAdmin string = "organization-user-admin"
- orgTemplateAdmin string = "organization-template-admin"
+ orgAdmin string = "organization-admin"
+ orgMember string = "organization-member"
+ orgAuditor string = "organization-auditor"
+ orgUserAdmin string = "organization-user-admin"
+ orgTemplateAdmin string = "organization-template-admin"
+ orgWorkspaceCreationBan string = "organization-workspace-creation-ban"
)
func init() {
@@ -159,6 +160,10 @@ func RoleOrgTemplateAdmin() string {
return orgTemplateAdmin
}
+func RoleOrgWorkspaceCreationBan() string {
+ return orgWorkspaceCreationBan
+}
+
// ScopedRoleOrgAdmin is the org role with the organization ID
func ScopedRoleOrgAdmin(organizationID uuid.UUID) RoleIdentifier {
return RoleIdentifier{Name: RoleOrgAdmin(), OrganizationID: organizationID}
@@ -181,6 +186,10 @@ func ScopedRoleOrgTemplateAdmin(organizationID uuid.UUID) RoleIdentifier {
return RoleIdentifier{Name: RoleOrgTemplateAdmin(), OrganizationID: organizationID}
}
+func ScopedRoleOrgWorkspaceCreationBan(organizationID uuid.UUID) RoleIdentifier {
+ return RoleIdentifier{Name: RoleOrgWorkspaceCreationBan(), OrganizationID: organizationID}
+}
+
func allPermsExcept(excepts ...Objecter) []Permission {
resources := AllResources()
var perms []Permission
@@ -496,6 +505,31 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
User: []Permission{},
}
},
+ // orgWorkspaceCreationBan prevents creating & deleting workspaces. This
+ // overrides any permissions granted by the org or user level. It accomplishes
+ // this by using negative permissions.
+ orgWorkspaceCreationBan: func(organizationID uuid.UUID) Role {
+ return Role{
+ Identifier: RoleIdentifier{Name: orgWorkspaceCreationBan, OrganizationID: organizationID},
+ DisplayName: "Organization Workspace Creation Ban",
+ Site: []Permission{},
+ Org: map[string][]Permission{
+ organizationID.String(): {
+ {
+ Negate: true,
+ ResourceType: ResourceWorkspace.Type,
+ Action: policy.ActionCreate,
+ },
+ {
+ Negate: true,
+ ResourceType: ResourceWorkspace.Type,
+ Action: policy.ActionDelete,
+ },
+ },
+ },
+ User: []Permission{},
+ }
+ },
}
}
@@ -506,44 +540,47 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
// map[actor_role][assign_role]
var assignRoles = map[string]map[string]bool{
"system": {
- owner: true,
- auditor: true,
- member: true,
- orgAdmin: true,
- orgMember: true,
- orgAuditor: true,
- orgUserAdmin: true,
- orgTemplateAdmin: true,
- templateAdmin: true,
- userAdmin: true,
- customSiteRole: true,
- customOrganizationRole: true,
+ owner: true,
+ auditor: true,
+ member: true,
+ orgAdmin: true,
+ orgMember: true,
+ orgAuditor: true,
+ orgUserAdmin: true,
+ orgTemplateAdmin: true,
+ orgWorkspaceCreationBan: true,
+ templateAdmin: true,
+ userAdmin: true,
+ customSiteRole: true,
+ customOrganizationRole: true,
},
owner: {
- owner: true,
- auditor: true,
- member: true,
- orgAdmin: true,
- orgMember: true,
- orgAuditor: true,
- orgUserAdmin: true,
- orgTemplateAdmin: true,
- templateAdmin: true,
- userAdmin: true,
- customSiteRole: true,
- customOrganizationRole: true,
+ owner: true,
+ auditor: true,
+ member: true,
+ orgAdmin: true,
+ orgMember: true,
+ orgAuditor: true,
+ orgUserAdmin: true,
+ orgTemplateAdmin: true,
+ orgWorkspaceCreationBan: true,
+ templateAdmin: true,
+ userAdmin: true,
+ customSiteRole: true,
+ customOrganizationRole: true,
},
userAdmin: {
member: true,
orgMember: true,
},
orgAdmin: {
- orgAdmin: true,
- orgMember: true,
- orgAuditor: true,
- orgUserAdmin: true,
- orgTemplateAdmin: true,
- customOrganizationRole: true,
+ orgAdmin: true,
+ orgMember: true,
+ orgAuditor: true,
+ orgUserAdmin: true,
+ orgTemplateAdmin: true,
+ orgWorkspaceCreationBan: true,
+ customOrganizationRole: true,
},
orgUserAdmin: {
orgMember: true,
diff --git a/coderd/rbac/roles_test.go b/coderd/rbac/roles_test.go
index b23849229e900..f81d5723d5ec2 100644
--- a/coderd/rbac/roles_test.go
+++ b/coderd/rbac/roles_test.go
@@ -112,6 +112,7 @@ func TestRolePermissions(t *testing.T) {
// Subjects to user
memberMe := authSubject{Name: "member_me", Actor: rbac.Subject{ID: currentUser.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember()}}}
orgMemberMe := authSubject{Name: "org_member_me", Actor: rbac.Subject{ID: currentUser.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.ScopedRoleOrgMember(orgID)}}}
+ orgMemberMeBanWorkspace := authSubject{Name: "org_member_me_workspace_ban", Actor: rbac.Subject{ID: currentUser.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.ScopedRoleOrgMember(orgID), rbac.ScopedRoleOrgWorkspaceCreationBan(orgID)}}}
groupMemberMe := authSubject{Name: "group_member_me", Actor: rbac.Subject{ID: currentUser.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.ScopedRoleOrgMember(orgID)}, Groups: []string{groupID.String()}}}
owner := authSubject{Name: "owner", Actor: rbac.Subject{ID: adminID.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.RoleOwner()}}}
@@ -181,20 +182,30 @@ func TestRolePermissions(t *testing.T) {
Actions: []policy.Action{policy.ActionRead},
Resource: rbac.ResourceWorkspace.WithID(workspaceID).InOrg(orgID).WithOwner(currentUser.String()),
AuthorizeMap: map[bool][]hasAuthSubjects{
- true: {owner, orgMemberMe, orgAdmin, templateAdmin, orgTemplateAdmin},
+ true: {owner, orgMemberMe, orgAdmin, templateAdmin, orgTemplateAdmin, orgMemberMeBanWorkspace},
false: {setOtherOrg, memberMe, userAdmin, orgAuditor, orgUserAdmin},
},
},
{
- Name: "C_RDMyWorkspaceInOrg",
+ Name: "UpdateMyWorkspaceInOrg",
// When creating the WithID won't be set, but it does not change the result.
- Actions: []policy.Action{policy.ActionCreate, policy.ActionUpdate, policy.ActionDelete},
+ Actions: []policy.Action{policy.ActionUpdate},
Resource: rbac.ResourceWorkspace.WithID(workspaceID).InOrg(orgID).WithOwner(currentUser.String()),
AuthorizeMap: map[bool][]hasAuthSubjects{
true: {owner, orgMemberMe, orgAdmin},
false: {setOtherOrg, memberMe, userAdmin, templateAdmin, orgTemplateAdmin, orgUserAdmin, orgAuditor},
},
},
+ {
+ Name: "CreateDeleteMyWorkspaceInOrg",
+ // When creating the WithID won't be set, but it does not change the result.
+ Actions: []policy.Action{policy.ActionCreate, policy.ActionDelete},
+ Resource: rbac.ResourceWorkspace.WithID(workspaceID).InOrg(orgID).WithOwner(currentUser.String()),
+ AuthorizeMap: map[bool][]hasAuthSubjects{
+ true: {owner, orgMemberMe, orgAdmin},
+ false: {setOtherOrg, memberMe, userAdmin, templateAdmin, orgTemplateAdmin, orgUserAdmin, orgAuditor, orgMemberMeBanWorkspace},
+ },
+ },
{
Name: "MyWorkspaceInOrgExecution",
// When creating the WithID won't be set, but it does not change the result.
@@ -942,6 +953,7 @@ func TestListRoles(t *testing.T) {
fmt.Sprintf("organization-auditor:%s", orgID.String()),
fmt.Sprintf("organization-user-admin:%s", orgID.String()),
fmt.Sprintf("organization-template-admin:%s", orgID.String()),
+ fmt.Sprintf("organization-workspace-creation-ban:%s", orgID.String()),
},
orgRoleNames)
}
diff --git a/coderd/workspaces_test.go b/coderd/workspaces_test.go
index 7a81d5192668f..8ee23dcd5100d 100644
--- a/coderd/workspaces_test.go
+++ b/coderd/workspaces_test.go
@@ -375,6 +375,54 @@ func TestWorkspace(t *testing.T) {
require.Error(t, err, "create workspace with archived version")
require.ErrorContains(t, err, "Archived template versions cannot")
})
+
+ t.Run("WorkspaceBan", func(t *testing.T) {
+ t.Parallel()
+ owner, _, _ := coderdtest.NewWithAPI(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
+ first := coderdtest.CreateFirstUser(t, owner)
+
+ version := coderdtest.CreateTemplateVersion(t, owner, first.OrganizationID, nil)
+ coderdtest.AwaitTemplateVersionJobCompleted(t, owner, version.ID)
+ template := coderdtest.CreateTemplate(t, owner, first.OrganizationID, version.ID)
+
+ goodClient, _ := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID)
+
+ // When a user with workspace-creation-ban
+ client, user := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID, rbac.ScopedRoleOrgWorkspaceCreationBan(first.OrganizationID))
+
+ // Ensure a similar user can create a workspace
+ coderdtest.CreateWorkspace(t, goodClient, template.ID)
+
+ ctx := testutil.Context(t, testutil.WaitLong)
+ // Then: Cannot create a workspace
+ _, err := client.CreateUserWorkspace(ctx, codersdk.Me, codersdk.CreateWorkspaceRequest{
+ TemplateID: template.ID,
+ TemplateVersionID: uuid.UUID{},
+ Name: "random",
+ })
+ require.Error(t, err)
+ var apiError *codersdk.Error
+ require.ErrorAs(t, err, &apiError)
+ require.Equal(t, http.StatusForbidden, apiError.StatusCode())
+
+ // When: workspace-ban use has a workspace
+ wrk, err := owner.CreateUserWorkspace(ctx, user.ID.String(), codersdk.CreateWorkspaceRequest{
+ TemplateID: template.ID,
+ TemplateVersionID: uuid.UUID{},
+ Name: "random",
+ })
+ require.NoError(t, err)
+ coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, wrk.LatestBuild.ID)
+
+ // Then: They cannot delete said workspace
+ _, err = client.CreateWorkspaceBuild(ctx, wrk.ID, codersdk.CreateWorkspaceBuildRequest{
+ Transition: codersdk.WorkspaceTransitionDelete,
+ ProvisionerState: []byte{},
+ })
+ require.Error(t, err)
+ require.ErrorAs(t, err, &apiError)
+ require.Equal(t, http.StatusForbidden, apiError.StatusCode())
+ })
}
func TestResolveAutostart(t *testing.T) {
diff --git a/coderd/wsbuilder/wsbuilder.go b/coderd/wsbuilder/wsbuilder.go
index a31e5eff4686a..f6d6d7381a24f 100644
--- a/coderd/wsbuilder/wsbuilder.go
+++ b/coderd/wsbuilder/wsbuilder.go
@@ -790,6 +790,15 @@ func (b *Builder) authorize(authFunc func(action policy.Action, object rbac.Obje
return BuildError{http.StatusBadRequest, msg, xerrors.New(msg)}
}
if !authFunc(action, b.workspace) {
+ if authFunc(policy.ActionRead, b.workspace) {
+ // If the user can read the workspace, but not delete/create/update. Show
+ // a more helpful error. They are allowed to know the workspace exists.
+ return BuildError{
+ Status: http.StatusForbidden,
+ Message: fmt.Sprintf("You do not have permission to %s this workspace.", action),
+ Wrapped: xerrors.New(httpapi.ResourceForbiddenResponse.Detail),
+ }
+ }
// We use the same wording as the httpapi to avoid leaking the existence of the workspace
return BuildError{http.StatusNotFound, httpapi.ResourceNotFoundResponse.Message, xerrors.New(httpapi.ResourceNotFoundResponse.Message)}
}
diff --git a/codersdk/rbacroles.go b/codersdk/rbacroles.go
index 49ed5c5b73176..7721eacbd5624 100644
--- a/codersdk/rbacroles.go
+++ b/codersdk/rbacroles.go
@@ -8,9 +8,10 @@ const (
RoleUserAdmin string = "user-admin"
RoleAuditor string = "auditor"
- RoleOrganizationAdmin string = "organization-admin"
- RoleOrganizationMember string = "organization-member"
- RoleOrganizationAuditor string = "organization-auditor"
- RoleOrganizationTemplateAdmin string = "organization-template-admin"
- RoleOrganizationUserAdmin string = "organization-user-admin"
+ RoleOrganizationAdmin string = "organization-admin"
+ RoleOrganizationMember string = "organization-member"
+ RoleOrganizationAuditor string = "organization-auditor"
+ RoleOrganizationTemplateAdmin string = "organization-template-admin"
+ RoleOrganizationUserAdmin string = "organization-user-admin"
+ RoleOrganizationWorkspaceCreationBan string = "organization-workspace-creation-ban"
)
diff --git a/enterprise/coderd/roles_test.go b/enterprise/coderd/roles_test.go
index 8bbf9218058e7..57b66a368248c 100644
--- a/enterprise/coderd/roles_test.go
+++ b/enterprise/coderd/roles_test.go
@@ -441,10 +441,11 @@ func TestListRoles(t *testing.T) {
return member.ListOrganizationRoles(ctx, owner.OrganizationID)
},
ExpectedRoles: convertRoles(map[rbac.RoleIdentifier]bool{
- {Name: codersdk.RoleOrganizationAdmin, OrganizationID: owner.OrganizationID}: false,
- {Name: codersdk.RoleOrganizationAuditor, OrganizationID: owner.OrganizationID}: false,
- {Name: codersdk.RoleOrganizationTemplateAdmin, OrganizationID: owner.OrganizationID}: false,
- {Name: codersdk.RoleOrganizationUserAdmin, OrganizationID: owner.OrganizationID}: false,
+ {Name: codersdk.RoleOrganizationAdmin, OrganizationID: owner.OrganizationID}: false,
+ {Name: codersdk.RoleOrganizationAuditor, OrganizationID: owner.OrganizationID}: false,
+ {Name: codersdk.RoleOrganizationTemplateAdmin, OrganizationID: owner.OrganizationID}: false,
+ {Name: codersdk.RoleOrganizationUserAdmin, OrganizationID: owner.OrganizationID}: false,
+ {Name: codersdk.RoleOrganizationWorkspaceCreationBan, OrganizationID: owner.OrganizationID}: false,
}),
},
{
@@ -473,10 +474,11 @@ func TestListRoles(t *testing.T) {
return orgAdmin.ListOrganizationRoles(ctx, owner.OrganizationID)
},
ExpectedRoles: convertRoles(map[rbac.RoleIdentifier]bool{
- {Name: codersdk.RoleOrganizationAdmin, OrganizationID: owner.OrganizationID}: true,
- {Name: codersdk.RoleOrganizationAuditor, OrganizationID: owner.OrganizationID}: true,
- {Name: codersdk.RoleOrganizationTemplateAdmin, OrganizationID: owner.OrganizationID}: true,
- {Name: codersdk.RoleOrganizationUserAdmin, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationAdmin, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationAuditor, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationTemplateAdmin, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationUserAdmin, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationWorkspaceCreationBan, OrganizationID: owner.OrganizationID}: true,
}),
},
{
@@ -505,10 +507,11 @@ func TestListRoles(t *testing.T) {
return client.ListOrganizationRoles(ctx, owner.OrganizationID)
},
ExpectedRoles: convertRoles(map[rbac.RoleIdentifier]bool{
- {Name: codersdk.RoleOrganizationAdmin, OrganizationID: owner.OrganizationID}: true,
- {Name: codersdk.RoleOrganizationAuditor, OrganizationID: owner.OrganizationID}: true,
- {Name: codersdk.RoleOrganizationTemplateAdmin, OrganizationID: owner.OrganizationID}: true,
- {Name: codersdk.RoleOrganizationUserAdmin, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationAdmin, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationAuditor, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationTemplateAdmin, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationUserAdmin, OrganizationID: owner.OrganizationID}: true,
+ {Name: codersdk.RoleOrganizationWorkspaceCreationBan, OrganizationID: owner.OrganizationID}: true,
}),
},
}
diff --git a/site/src/api/typesGenerated.ts b/site/src/api/typesGenerated.ts
index fdda12254052c..1a011b57b4c39 100644
--- a/site/src/api/typesGenerated.ts
+++ b/site/src/api/typesGenerated.ts
@@ -2101,6 +2101,10 @@ export const RoleOrganizationTemplateAdmin = "organization-template-admin";
// From codersdk/rbacroles.go
export const RoleOrganizationUserAdmin = "organization-user-admin";
+// From codersdk/rbacroles.go
+export const RoleOrganizationWorkspaceCreationBan =
+ "organization-workspace-creation-ban";
+
// From codersdk/rbacroles.go
export const RoleOwner = "owner";
diff --git a/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.stories.tsx b/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.stories.tsx
index 0511a9d877ea1..f3244898483ce 100644
--- a/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.stories.tsx
+++ b/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.stories.tsx
@@ -4,6 +4,7 @@ import {
MockOwnerRole,
MockSiteRoles,
MockUserAdminRole,
+ MockWorkspaceCreationBanRole,
} from "testHelpers/entities";
import { withDesktopViewport } from "testHelpers/storybook";
import { EditRolesButton } from "./EditRolesButton";
@@ -41,3 +42,14 @@ export const Loading: Story = {
await userEvent.click(canvas.getByRole("button"));
},
};
+
+export const AdvancedOpen: Story = {
+ args: {
+ selectedRoleNames: new Set([MockWorkspaceCreationBanRole.name]),
+ roles: MockSiteRoles,
+ },
+ play: async ({ canvasElement }) => {
+ const canvas = within(canvasElement);
+ await userEvent.click(canvas.getByRole("button"));
+ },
+};
diff --git a/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.tsx b/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.tsx
index 64e059b4134f6..c8eb4001e406a 100644
--- a/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.tsx
+++ b/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.tsx
@@ -16,7 +16,9 @@ import {
PopoverContent,
PopoverTrigger,
} from "components/deprecated/Popover/Popover";
-import type { FC } from "react";
+import { ChevronDownIcon, ChevronRightIcon } from "lucide-react";
+import { type FC, useEffect, useState } from "react";
+import { cn } from "utils/cn";
const roleDescriptions: Record = {
owner:
@@ -57,7 +59,7 @@ const Option: FC = ({
}}
/>
- {name}
+ {name}
{description}
@@ -91,6 +93,7 @@ export const EditRolesButton: FC = ({
onChange([...selectedRoleNames, roleName]);
};
+ const [isAdvancedOpen, setIsAdvancedOpen] = useState(false);
const canSetRoles =
userLoginType !== "oidc" || (userLoginType === "oidc" && !oidcRoleSync);
@@ -109,6 +112,20 @@ export const EditRolesButton: FC = ({
);
}
+ const filteredRoles = roles.filter(
+ (role) => role.name !== "organization-workspace-creation-ban",
+ );
+ const advancedRoles = roles.filter(
+ (role) => role.name === "organization-workspace-creation-ban",
+ );
+
+ // make sure the advanced roles are always visible if the user has one of these roles
+ useEffect(() => {
+ if (selectedRoleNames.has("organization-workspace-creation-ban")) {
+ setIsAdvancedOpen(true);
+ }
+ }, [selectedRoleNames]);
+
return (
@@ -124,14 +141,14 @@ export const EditRolesButton: FC = ({
-
+
-
- {roles.map((role) => (
+
+ {filteredRoles.map((role) => (
= ({
description={roleDescriptions[role.name] ?? ""}
/>
))}
+ {advancedRoles.length > 0 && (
+ <>
+ {
+ setIsAdvancedOpen((v) => !v);
+ }}
+ >
+ {isAdvancedOpen ? (
+
+ ) : (
+
+ )}
+
+ ({isAdvancedOpen ? "Hide" : "Show advanced"})
+
+ Advanced
+
+
+ {isAdvancedOpen &&
+ advancedRoles.map((role) => (
+
+ ))}
+ >
+ )}
diff --git a/site/src/testHelpers/entities.ts b/site/src/testHelpers/entities.ts
index 938537c08d70c..12654bc064fee 100644
--- a/site/src/testHelpers/entities.ts
+++ b/site/src/testHelpers/entities.ts
@@ -296,6 +296,15 @@ export const MockAuditorRole: TypesGen.Role = {
organization_id: "",
};
+export const MockWorkspaceCreationBanRole: TypesGen.Role = {
+ name: "organization-workspace-creation-ban",
+ display_name: "Organization Workspace Creation Ban",
+ site_permissions: [],
+ organization_permissions: [],
+ user_permissions: [],
+ organization_id: "",
+};
+
export const MockMemberRole: TypesGen.SlimRole = {
name: "member",
display_name: "Member",
@@ -459,10 +468,15 @@ export function assignableRole(
};
}
-export const MockSiteRoles = [MockUserAdminRole, MockAuditorRole];
+export const MockSiteRoles = [
+ MockUserAdminRole,
+ MockAuditorRole,
+ MockWorkspaceCreationBanRole,
+];
export const MockAssignableSiteRoles = [
assignableRole(MockUserAdminRole, true),
assignableRole(MockAuditorRole, true),
+ assignableRole(MockWorkspaceCreationBanRole, true),
];
export const MockMemberPermissions = {
From 464fccd8075a65a67e8f977597da48b36a9716f5 Mon Sep 17 00:00:00 2001
From: Jaayden Halko
Date: Thu, 27 Feb 2025 17:20:33 +0000
Subject: [PATCH 21/44] chore: create collapsible summary component (#16705)
This is based on the Figma designs here:
https://www.figma.com/design/WfqIgsTFXN2BscBSSyXWF8/Coder-kit?node-id=507-1525&m=dev
---------
Co-authored-by: Steven Masley
---
.../CollapsibleSummary.stories.tsx | 120 ++++++++++++++++++
.../CollapsibleSummary/CollapsibleSummary.tsx | 91 +++++++++++++
.../UserTable/EditRolesButton.tsx | 48 ++-----
3 files changed, 224 insertions(+), 35 deletions(-)
create mode 100644 site/src/components/CollapsibleSummary/CollapsibleSummary.stories.tsx
create mode 100644 site/src/components/CollapsibleSummary/CollapsibleSummary.tsx
diff --git a/site/src/components/CollapsibleSummary/CollapsibleSummary.stories.tsx b/site/src/components/CollapsibleSummary/CollapsibleSummary.stories.tsx
new file mode 100644
index 0000000000000..98f63c24ccbc7
--- /dev/null
+++ b/site/src/components/CollapsibleSummary/CollapsibleSummary.stories.tsx
@@ -0,0 +1,120 @@
+import type { Meta, StoryObj } from "@storybook/react";
+import { Button } from "../Button/Button";
+import { CollapsibleSummary } from "./CollapsibleSummary";
+
+const meta: Meta = {
+ title: "components/CollapsibleSummary",
+ component: CollapsibleSummary,
+ args: {
+ label: "Advanced options",
+ children: (
+ <>
+
+ Option 1
+
+
+ Option 2
+
+
+ Option 3
+
+ >
+ ),
+ },
+};
+
+export default meta;
+type Story = StoryObj;
+
+export const Default: Story = {};
+
+export const DefaultOpen: Story = {
+ args: {
+ defaultOpen: true,
+ },
+};
+
+export const MediumSize: Story = {
+ args: {
+ size: "md",
+ },
+};
+
+export const SmallSize: Story = {
+ args: {
+ size: "sm",
+ },
+};
+
+export const CustomClassName: Story = {
+ args: {
+ className: "text-blue-500 font-bold",
+ },
+};
+
+export const ManyChildren: Story = {
+ args: {
+ defaultOpen: true,
+ children: (
+ <>
+ {Array.from({ length: 10 }).map((_, i) => (
+
+ Option {i + 1}
+
+ ))}
+ >
+ ),
+ },
+};
+
+export const NestedCollapsible: Story = {
+ args: {
+ defaultOpen: true,
+ children: (
+ <>
+
+ Option 1
+
+
+
+ Nested Option 1
+
+
+ Nested Option 2
+
+
+
+ Option 3
+
+ >
+ ),
+ },
+};
+
+export const ComplexContent: Story = {
+ args: {
+ defaultOpen: true,
+ children: (
+
+
Complex Content
+
+ This is a more complex content example with various elements.
+
+
+ Action 1
+ Action 2
+
+
+ ),
+ },
+};
+
+export const LongLabel: Story = {
+ args: {
+ label:
+ "This is a very long label that might wrap or cause layout issues if not handled properly",
+ },
+};
diff --git a/site/src/components/CollapsibleSummary/CollapsibleSummary.tsx b/site/src/components/CollapsibleSummary/CollapsibleSummary.tsx
new file mode 100644
index 0000000000000..675500685adf3
--- /dev/null
+++ b/site/src/components/CollapsibleSummary/CollapsibleSummary.tsx
@@ -0,0 +1,91 @@
+import { type VariantProps, cva } from "class-variance-authority";
+import { ChevronRightIcon } from "lucide-react";
+import { type FC, type ReactNode, useState } from "react";
+import { cn } from "utils/cn";
+
+const collapsibleSummaryVariants = cva(
+ `flex items-center gap-1 p-0 bg-transparent border-0 text-inherit cursor-pointer
+ transition-colors text-content-secondary hover:text-content-primary font-medium
+ whitespace-nowrap`,
+ {
+ variants: {
+ size: {
+ md: "text-sm",
+ sm: "text-xs",
+ },
+ },
+ defaultVariants: {
+ size: "md",
+ },
+ },
+);
+
+export interface CollapsibleSummaryProps
+ extends VariantProps {
+ /**
+ * The label to display for the collapsible section
+ */
+ label: string;
+ /**
+ * The content to show when expanded
+ */
+ children: ReactNode;
+ /**
+ * Whether the section is initially expanded
+ */
+ defaultOpen?: boolean;
+ /**
+ * Optional className for the button
+ */
+ className?: string;
+ /**
+ * The size of the component
+ */
+ size?: "md" | "sm";
+}
+
+export const CollapsibleSummary: FC = ({
+ label,
+ children,
+ defaultOpen = false,
+ className,
+ size,
+}) => {
+ const [isOpen, setIsOpen] = useState(defaultOpen);
+
+ return (
+
+
{
+ setIsOpen((v) => !v);
+ }}
+ >
+
+
+
+
+ ({isOpen ? "Hide" : "Show"}) {label}
+
+ {label}
+
+
+ {isOpen &&
{children}
}
+
+ );
+};
diff --git a/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.tsx b/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.tsx
index c8eb4001e406a..9efd99bccf106 100644
--- a/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.tsx
+++ b/site/src/pages/OrganizationSettingsPage/UserTable/EditRolesButton.tsx
@@ -3,6 +3,7 @@ import Checkbox from "@mui/material/Checkbox";
import Tooltip from "@mui/material/Tooltip";
import type { SlimRole } from "api/typesGenerated";
import { Button } from "components/Button/Button";
+import { CollapsibleSummary } from "components/CollapsibleSummary/CollapsibleSummary";
import {
HelpTooltip,
HelpTooltipContent,
@@ -159,41 +160,18 @@ export const EditRolesButton: FC = ({
/>
))}
{advancedRoles.length > 0 && (
- <>
- {
- setIsAdvancedOpen((v) => !v);
- }}
- >
- {isAdvancedOpen ? (
-
- ) : (
-
- )}
-
- ({isAdvancedOpen ? "Hide" : "Show advanced"})
-
- Advanced
-
-
- {isAdvancedOpen &&
- advancedRoles.map((role) => (
-
- ))}
- >
+
+ {advancedRoles.map((role) => (
+
+ ))}
+
)}
From bf5b0028299f1a67adddcd00dce97d9d130f0592 Mon Sep 17 00:00:00 2001
From: Jaayden Halko
Date: Thu, 27 Feb 2025 17:28:43 +0000
Subject: [PATCH 22/44] fix: add org role read permissions to site wide
template admins and auditors (#16733)
resolves coder/internal#388
Since site-wide admins and auditors are able to access the members page
of any org, they should have read access to org roles
---
coderd/rbac/roles.go | 6 ++++--
coderd/rbac/roles_test.go | 4 ++--
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/coderd/rbac/roles.go b/coderd/rbac/roles.go
index 440494450e2d1..af3e972fc9a6d 100644
--- a/coderd/rbac/roles.go
+++ b/coderd/rbac/roles.go
@@ -307,7 +307,8 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
Identifier: RoleAuditor(),
DisplayName: "Auditor",
Site: Permissions(map[string][]policy.Action{
- ResourceAuditLog.Type: {policy.ActionRead},
+ ResourceAssignOrgRole.Type: {policy.ActionRead},
+ ResourceAuditLog.Type: {policy.ActionRead},
// Allow auditors to see the resources that audit logs reflect.
ResourceTemplate.Type: {policy.ActionRead, policy.ActionViewInsights},
ResourceUser.Type: {policy.ActionRead},
@@ -327,7 +328,8 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
Identifier: RoleTemplateAdmin(),
DisplayName: "Template Admin",
Site: Permissions(map[string][]policy.Action{
- ResourceTemplate.Type: ResourceTemplate.AvailableActions(),
+ ResourceAssignOrgRole.Type: {policy.ActionRead},
+ ResourceTemplate.Type: ResourceTemplate.AvailableActions(),
// CRUD all files, even those they did not upload.
ResourceFile.Type: {policy.ActionCreate, policy.ActionRead},
ResourceWorkspace.Type: {policy.ActionRead},
diff --git a/coderd/rbac/roles_test.go b/coderd/rbac/roles_test.go
index f81d5723d5ec2..af62a5cd5d1b3 100644
--- a/coderd/rbac/roles_test.go
+++ b/coderd/rbac/roles_test.go
@@ -352,8 +352,8 @@ func TestRolePermissions(t *testing.T) {
Actions: []policy.Action{policy.ActionRead},
Resource: rbac.ResourceAssignOrgRole.InOrg(orgID),
AuthorizeMap: map[bool][]hasAuthSubjects{
- true: {owner, setOrgNotMe, orgMemberMe, userAdmin},
- false: {setOtherOrg, memberMe, templateAdmin},
+ true: {owner, setOrgNotMe, orgMemberMe, userAdmin, templateAdmin},
+ false: {setOtherOrg, memberMe},
},
},
{
From 91a4a98c27f906aab5341a65bb435badd0b19ced Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E3=82=B1=E3=82=A4=E3=83=A9?=
Date: Thu, 27 Feb 2025 10:39:06 -0700
Subject: [PATCH 23/44] chore: add an unassign action for roles (#16728)
---
coderd/apidoc/docs.go | 2 +
coderd/apidoc/swagger.json | 2 +
coderd/database/dbauthz/customroles_test.go | 122 +++++++++-----------
coderd/database/dbauthz/dbauthz.go | 71 ++++++------
coderd/database/dbauthz/dbauthz_test.go | 54 +++------
coderd/database/queries.sql.go | 56 ++++-----
coderd/database/queries/roles.sql | 56 ++++-----
coderd/members.go | 2 +-
coderd/rbac/object_gen.go | 18 +--
coderd/rbac/policy/policy.go | 22 ++--
coderd/rbac/roles.go | 6 +-
coderd/rbac/roles_test.go | 10 +-
codersdk/rbacresources_gen.go | 5 +-
docs/reference/api/members.md | 5 +
docs/reference/api/schemas.md | 1 +
enterprise/coderd/roles.go | 3 +-
site/src/api/rbacresourcesGenerated.ts | 17 ++-
site/src/api/typesGenerated.ts | 2 +
18 files changed, 214 insertions(+), 240 deletions(-)
diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go
index d7e9408eb677f..125cf4faa5ba1 100644
--- a/coderd/apidoc/docs.go
+++ b/coderd/apidoc/docs.go
@@ -13699,6 +13699,7 @@ const docTemplate = `{
"read",
"read_personal",
"ssh",
+ "unassign",
"update",
"update_personal",
"use",
@@ -13714,6 +13715,7 @@ const docTemplate = `{
"ActionRead",
"ActionReadPersonal",
"ActionSSH",
+ "ActionUnassign",
"ActionUpdate",
"ActionUpdatePersonal",
"ActionUse",
diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json
index ff714e416c5ce..104d6fd70e077 100644
--- a/coderd/apidoc/swagger.json
+++ b/coderd/apidoc/swagger.json
@@ -12388,6 +12388,7 @@
"read",
"read_personal",
"ssh",
+ "unassign",
"update",
"update_personal",
"use",
@@ -12403,6 +12404,7 @@
"ActionRead",
"ActionReadPersonal",
"ActionSSH",
+ "ActionUnassign",
"ActionUpdate",
"ActionUpdatePersonal",
"ActionUse",
diff --git a/coderd/database/dbauthz/customroles_test.go b/coderd/database/dbauthz/customroles_test.go
index c5d40b0323185..815d6629f64f9 100644
--- a/coderd/database/dbauthz/customroles_test.go
+++ b/coderd/database/dbauthz/customroles_test.go
@@ -34,11 +34,12 @@ func TestInsertCustomRoles(t *testing.T) {
}
}
- canAssignRole := rbac.Role{
+ canCreateCustomRole := rbac.Role{
Identifier: rbac.RoleIdentifier{Name: "can-assign"},
DisplayName: "",
Site: rbac.Permissions(map[string][]policy.Action{
- rbac.ResourceAssignRole.Type: {policy.ActionRead, policy.ActionCreate},
+ rbac.ResourceAssignRole.Type: {policy.ActionRead},
+ rbac.ResourceAssignOrgRole.Type: {policy.ActionRead, policy.ActionCreate},
}),
}
@@ -61,17 +62,15 @@ func TestInsertCustomRoles(t *testing.T) {
return all
}
- orgID := uuid.NullUUID{
- UUID: uuid.New(),
- Valid: true,
- }
+ orgID := uuid.New()
+
testCases := []struct {
name string
subject rbac.ExpandableRoles
// Perms to create on new custom role
- organizationID uuid.NullUUID
+ organizationID uuid.UUID
site []codersdk.Permission
org []codersdk.Permission
user []codersdk.Permission
@@ -79,19 +78,21 @@ func TestInsertCustomRoles(t *testing.T) {
}{
{
// No roles, so no assign role
- name: "no-roles",
- subject: rbac.RoleIdentifiers{},
- errorContains: "forbidden",
+ name: "no-roles",
+ organizationID: orgID,
+ subject: rbac.RoleIdentifiers{},
+ errorContains: "forbidden",
},
{
// This works because the new role has 0 perms
- name: "empty",
- subject: merge(canAssignRole),
+ name: "empty",
+ organizationID: orgID,
+ subject: merge(canCreateCustomRole),
},
{
name: "mixed-scopes",
- subject: merge(canAssignRole, rbac.RoleOwner()),
organizationID: orgID,
+ subject: merge(canCreateCustomRole, rbac.RoleOwner()),
site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
codersdk.ResourceWorkspace: {codersdk.ActionRead},
}),
@@ -101,27 +102,30 @@ func TestInsertCustomRoles(t *testing.T) {
errorContains: "organization roles specify site or user permissions",
},
{
- name: "invalid-action",
- subject: merge(canAssignRole, rbac.RoleOwner()),
- site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
+ name: "invalid-action",
+ organizationID: orgID,
+ subject: merge(canCreateCustomRole, rbac.RoleOwner()),
+ org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
// Action does not go with resource
codersdk.ResourceWorkspace: {codersdk.ActionViewInsights},
}),
errorContains: "invalid action",
},
{
- name: "invalid-resource",
- subject: merge(canAssignRole, rbac.RoleOwner()),
- site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
+ name: "invalid-resource",
+ organizationID: orgID,
+ subject: merge(canCreateCustomRole, rbac.RoleOwner()),
+ org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
"foobar": {codersdk.ActionViewInsights},
}),
errorContains: "invalid resource",
},
{
// Not allowing these at this time.
- name: "negative-permission",
- subject: merge(canAssignRole, rbac.RoleOwner()),
- site: []codersdk.Permission{
+ name: "negative-permission",
+ organizationID: orgID,
+ subject: merge(canCreateCustomRole, rbac.RoleOwner()),
+ org: []codersdk.Permission{
{
Negate: true,
ResourceType: codersdk.ResourceWorkspace,
@@ -131,89 +135,69 @@ func TestInsertCustomRoles(t *testing.T) {
errorContains: "no negative permissions",
},
{
- name: "wildcard", // not allowed
- subject: merge(canAssignRole, rbac.RoleOwner()),
- site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
+ name: "wildcard", // not allowed
+ organizationID: orgID,
+ subject: merge(canCreateCustomRole, rbac.RoleOwner()),
+ org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
codersdk.ResourceWorkspace: {"*"},
}),
errorContains: "no wildcard symbols",
},
// escalation checks
{
- name: "read-workspace-escalation",
- subject: merge(canAssignRole),
- site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
+ name: "read-workspace-escalation",
+ organizationID: orgID,
+ subject: merge(canCreateCustomRole),
+ org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
codersdk.ResourceWorkspace: {codersdk.ActionRead},
}),
errorContains: "not allowed to grant this permission",
},
{
- name: "read-workspace-outside-org",
- organizationID: uuid.NullUUID{
- UUID: uuid.New(),
- Valid: true,
- },
- subject: merge(canAssignRole, rbac.ScopedRoleOrgAdmin(orgID.UUID)),
+ name: "read-workspace-outside-org",
+ organizationID: uuid.New(),
+ subject: merge(canCreateCustomRole, rbac.ScopedRoleOrgAdmin(orgID)),
org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
codersdk.ResourceWorkspace: {codersdk.ActionRead},
}),
- errorContains: "forbidden",
+ errorContains: "not allowed to grant this permission",
},
{
name: "user-escalation",
// These roles do not grant user perms
- subject: merge(canAssignRole, rbac.ScopedRoleOrgAdmin(orgID.UUID)),
+ organizationID: orgID,
+ subject: merge(canCreateCustomRole, rbac.ScopedRoleOrgAdmin(orgID)),
user: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
codersdk.ResourceWorkspace: {codersdk.ActionRead},
}),
- errorContains: "not allowed to grant this permission",
+ errorContains: "organization roles specify site or user permissions",
},
{
- name: "template-admin-escalation",
- subject: merge(canAssignRole, rbac.RoleTemplateAdmin()),
+ name: "site-escalation",
+ organizationID: orgID,
+ subject: merge(canCreateCustomRole, rbac.RoleTemplateAdmin()),
site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
- codersdk.ResourceWorkspace: {codersdk.ActionRead}, // ok!
codersdk.ResourceDeploymentConfig: {codersdk.ActionUpdate}, // not ok!
}),
- user: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
- codersdk.ResourceWorkspace: {codersdk.ActionRead}, // ok!
- }),
- errorContains: "deployment_config",
+ errorContains: "organization roles specify site or user permissions",
},
// ok!
{
- name: "read-workspace-template-admin",
- subject: merge(canAssignRole, rbac.RoleTemplateAdmin()),
- site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
+ name: "read-workspace-template-admin",
+ organizationID: orgID,
+ subject: merge(canCreateCustomRole, rbac.RoleTemplateAdmin()),
+ org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
codersdk.ResourceWorkspace: {codersdk.ActionRead},
}),
},
{
name: "read-workspace-in-org",
- subject: merge(canAssignRole, rbac.ScopedRoleOrgAdmin(orgID.UUID)),
organizationID: orgID,
+ subject: merge(canCreateCustomRole, rbac.ScopedRoleOrgAdmin(orgID)),
org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
codersdk.ResourceWorkspace: {codersdk.ActionRead},
}),
},
- {
- name: "user-perms",
- // This is weird, but is ok
- subject: merge(canAssignRole, rbac.RoleMember()),
- user: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
- codersdk.ResourceWorkspace: {codersdk.ActionRead},
- }),
- },
- {
- name: "site+user-perms",
- subject: merge(canAssignRole, rbac.RoleMember(), rbac.RoleTemplateAdmin()),
- site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
- codersdk.ResourceWorkspace: {codersdk.ActionRead},
- }),
- user: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
- codersdk.ResourceWorkspace: {codersdk.ActionRead},
- }),
- },
}
for _, tc := range testCases {
@@ -234,7 +218,7 @@ func TestInsertCustomRoles(t *testing.T) {
_, err := az.InsertCustomRole(ctx, database.InsertCustomRoleParams{
Name: "test-role",
DisplayName: "",
- OrganizationID: tc.organizationID,
+ OrganizationID: uuid.NullUUID{UUID: tc.organizationID, Valid: true},
SitePermissions: db2sdk.List(tc.site, convertSDKPerm),
OrgPermissions: db2sdk.List(tc.org, convertSDKPerm),
UserPermissions: db2sdk.List(tc.user, convertSDKPerm),
@@ -249,11 +233,11 @@ func TestInsertCustomRoles(t *testing.T) {
LookupRoles: []database.NameOrganizationPair{
{
Name: "test-role",
- OrganizationID: tc.organizationID.UUID,
+ OrganizationID: tc.organizationID,
},
},
ExcludeOrgRoles: false,
- OrganizationID: uuid.UUID{},
+ OrganizationID: uuid.Nil,
})
require.NoError(t, err)
require.Len(t, roles, 1)
diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go
index fdc9f6504d95d..877727069ab76 100644
--- a/coderd/database/dbauthz/dbauthz.go
+++ b/coderd/database/dbauthz/dbauthz.go
@@ -747,7 +747,7 @@ func (*querier) convertToDeploymentRoles(names []string) []rbac.RoleIdentifier {
}
// canAssignRoles handles assigning built in and custom roles.
-func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, removed []rbac.RoleIdentifier) error {
+func (q *querier) canAssignRoles(ctx context.Context, orgID uuid.UUID, added, removed []rbac.RoleIdentifier) error {
actor, ok := ActorFromContext(ctx)
if !ok {
return NoActorError
@@ -755,12 +755,14 @@ func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, r
roleAssign := rbac.ResourceAssignRole
shouldBeOrgRoles := false
- if orgID != nil {
- roleAssign = rbac.ResourceAssignOrgRole.InOrg(*orgID)
+ if orgID != uuid.Nil {
+ roleAssign = rbac.ResourceAssignOrgRole.InOrg(orgID)
shouldBeOrgRoles = true
}
- grantedRoles := append(added, removed...)
+ grantedRoles := make([]rbac.RoleIdentifier, 0, len(added)+len(removed))
+ grantedRoles = append(grantedRoles, added...)
+ grantedRoles = append(grantedRoles, removed...)
customRoles := make([]rbac.RoleIdentifier, 0)
// Validate that the roles being assigned are valid.
for _, r := range grantedRoles {
@@ -774,11 +776,11 @@ func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, r
}
if shouldBeOrgRoles {
- if orgID == nil {
+ if orgID == uuid.Nil {
return xerrors.Errorf("should never happen, orgID is nil, but trying to assign an organization role")
}
- if r.OrganizationID != *orgID {
+ if r.OrganizationID != orgID {
return xerrors.Errorf("attempted to assign role from a different org, role %q to %q", r, orgID.String())
}
}
@@ -824,7 +826,7 @@ func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, r
}
if len(removed) > 0 {
- if err := q.authorizeContext(ctx, policy.ActionDelete, roleAssign); err != nil {
+ if err := q.authorizeContext(ctx, policy.ActionUnassign, roleAssign); err != nil {
return err
}
}
@@ -1124,11 +1126,15 @@ func (q *querier) CleanTailnetTunnels(ctx context.Context) error {
return q.db.CleanTailnetTunnels(ctx)
}
-// TODO: Handle org scoped lookups
func (q *querier) CustomRoles(ctx context.Context, arg database.CustomRolesParams) ([]database.CustomRole, error) {
- if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceAssignRole); err != nil {
+ roleObject := rbac.ResourceAssignRole
+ if arg.OrganizationID != uuid.Nil {
+ roleObject = rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID)
+ }
+ if err := q.authorizeContext(ctx, policy.ActionRead, roleObject); err != nil {
return nil, err
}
+
return q.db.CustomRoles(ctx, arg)
}
@@ -1185,14 +1191,11 @@ func (q *querier) DeleteCryptoKey(ctx context.Context, arg database.DeleteCrypto
}
func (q *querier) DeleteCustomRole(ctx context.Context, arg database.DeleteCustomRoleParams) error {
- if arg.OrganizationID.UUID != uuid.Nil {
- if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
- return err
- }
- } else {
- if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceAssignRole); err != nil {
- return err
- }
+ if !arg.OrganizationID.Valid || arg.OrganizationID.UUID == uuid.Nil {
+ return NotAuthorizedError{Err: xerrors.New("custom roles must belong to an organization")}
+ }
+ if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
+ return err
}
return q.db.DeleteCustomRole(ctx, arg)
@@ -3009,14 +3012,11 @@ func (q *querier) InsertCryptoKey(ctx context.Context, arg database.InsertCrypto
func (q *querier) InsertCustomRole(ctx context.Context, arg database.InsertCustomRoleParams) (database.CustomRole, error) {
// Org and site role upsert share the same query. So switch the assertion based on the org uuid.
- if arg.OrganizationID.UUID != uuid.Nil {
- if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
- return database.CustomRole{}, err
- }
- } else {
- if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignRole); err != nil {
- return database.CustomRole{}, err
- }
+ if !arg.OrganizationID.Valid || arg.OrganizationID.UUID == uuid.Nil {
+ return database.CustomRole{}, NotAuthorizedError{Err: xerrors.New("custom roles must belong to an organization")}
+ }
+ if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
+ return database.CustomRole{}, err
}
if err := q.customRoleCheck(ctx, database.CustomRole{
@@ -3146,7 +3146,7 @@ func (q *querier) InsertOrganizationMember(ctx context.Context, arg database.Ins
// All roles are added roles. Org member is always implied.
addedRoles := append(orgRoles, rbac.ScopedRoleOrgMember(arg.OrganizationID))
- err = q.canAssignRoles(ctx, &arg.OrganizationID, addedRoles, []rbac.RoleIdentifier{})
+ err = q.canAssignRoles(ctx, arg.OrganizationID, addedRoles, []rbac.RoleIdentifier{})
if err != nil {
return database.OrganizationMember{}, err
}
@@ -3270,7 +3270,7 @@ func (q *querier) InsertTemplateVersionWorkspaceTag(ctx context.Context, arg dat
func (q *querier) InsertUser(ctx context.Context, arg database.InsertUserParams) (database.User, error) {
// Always check if the assigned roles can actually be assigned by this actor.
impliedRoles := append([]rbac.RoleIdentifier{rbac.RoleMember()}, q.convertToDeploymentRoles(arg.RBACRoles)...)
- err := q.canAssignRoles(ctx, nil, impliedRoles, []rbac.RoleIdentifier{})
+ err := q.canAssignRoles(ctx, uuid.Nil, impliedRoles, []rbac.RoleIdentifier{})
if err != nil {
return database.User{}, err
}
@@ -3608,14 +3608,11 @@ func (q *querier) UpdateCryptoKeyDeletesAt(ctx context.Context, arg database.Upd
}
func (q *querier) UpdateCustomRole(ctx context.Context, arg database.UpdateCustomRoleParams) (database.CustomRole, error) {
- if arg.OrganizationID.UUID != uuid.Nil {
- if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
- return database.CustomRole{}, err
- }
- } else {
- if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceAssignRole); err != nil {
- return database.CustomRole{}, err
- }
+ if !arg.OrganizationID.Valid || arg.OrganizationID.UUID == uuid.Nil {
+ return database.CustomRole{}, NotAuthorizedError{Err: xerrors.New("custom roles must belong to an organization")}
+ }
+ if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
+ return database.CustomRole{}, err
}
if err := q.customRoleCheck(ctx, database.CustomRole{
@@ -3695,7 +3692,7 @@ func (q *querier) UpdateMemberRoles(ctx context.Context, arg database.UpdateMemb
impliedTypes := append(scopedGranted, rbac.ScopedRoleOrgMember(arg.OrgID))
added, removed := rbac.ChangeRoleSet(originalRoles, impliedTypes)
- err = q.canAssignRoles(ctx, &arg.OrgID, added, removed)
+ err = q.canAssignRoles(ctx, arg.OrgID, added, removed)
if err != nil {
return database.OrganizationMember{}, err
}
@@ -4102,7 +4099,7 @@ func (q *querier) UpdateUserRoles(ctx context.Context, arg database.UpdateUserRo
impliedTypes := append(q.convertToDeploymentRoles(arg.GrantedRoles), rbac.RoleMember())
// If the changeset is nothing, less rbac checks need to be done.
added, removed := rbac.ChangeRoleSet(q.convertToDeploymentRoles(user.RBACRoles), impliedTypes)
- err = q.canAssignRoles(ctx, nil, added, removed)
+ err = q.canAssignRoles(ctx, uuid.Nil, added, removed)
if err != nil {
return database.User{}, err
}
diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go
index 108a8166d19fb..1f2ae5eca62c4 100644
--- a/coderd/database/dbauthz/dbauthz_test.go
+++ b/coderd/database/dbauthz/dbauthz_test.go
@@ -1011,7 +1011,7 @@ func (s *MethodTestSuite) TestOrganization() {
Asserts(
mem, policy.ActionRead,
rbac.ResourceAssignOrgRole.InOrg(o.ID), policy.ActionAssign, // org-mem
- rbac.ResourceAssignOrgRole.InOrg(o.ID), policy.ActionDelete, // org-admin
+ rbac.ResourceAssignOrgRole.InOrg(o.ID), policy.ActionUnassign, // org-admin
).Returns(out)
}))
}
@@ -1619,7 +1619,7 @@ func (s *MethodTestSuite) TestUser() {
}).Asserts(
u, policy.ActionRead,
rbac.ResourceAssignRole, policy.ActionAssign,
- rbac.ResourceAssignRole, policy.ActionDelete,
+ rbac.ResourceAssignRole, policy.ActionUnassign,
).Returns(o)
}))
s.Run("AllUserIDs", s.Subtest(func(db database.Store, check *expects) {
@@ -1653,30 +1653,28 @@ func (s *MethodTestSuite) TestUser() {
check.Args(database.DeleteCustomRoleParams{
Name: customRole.Name,
}).Asserts(
- rbac.ResourceAssignRole, policy.ActionDelete)
+ // fails immediately, missing organization id
+ ).Errors(dbauthz.NotAuthorizedError{Err: xerrors.New("custom roles must belong to an organization")})
}))
s.Run("Blank/UpdateCustomRole", s.Subtest(func(db database.Store, check *expects) {
dbtestutil.DisableForeignKeysAndTriggers(s.T(), db)
- customRole := dbgen.CustomRole(s.T(), db, database.CustomRole{})
+ customRole := dbgen.CustomRole(s.T(), db, database.CustomRole{
+ OrganizationID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
+ })
// Blank is no perms in the role
check.Args(database.UpdateCustomRoleParams{
Name: customRole.Name,
DisplayName: "Test Name",
+ OrganizationID: customRole.OrganizationID,
SitePermissions: nil,
OrgPermissions: nil,
UserPermissions: nil,
- }).Asserts(rbac.ResourceAssignRole, policy.ActionUpdate).ErrorsWithPG(sql.ErrNoRows)
+ }).Asserts(rbac.ResourceAssignOrgRole.InOrg(customRole.OrganizationID.UUID), policy.ActionUpdate)
}))
s.Run("SitePermissions/UpdateCustomRole", s.Subtest(func(db database.Store, check *expects) {
- customRole := dbgen.CustomRole(s.T(), db, database.CustomRole{
- OrganizationID: uuid.NullUUID{
- UUID: uuid.Nil,
- Valid: false,
- },
- })
check.Args(database.UpdateCustomRoleParams{
- Name: customRole.Name,
- OrganizationID: customRole.OrganizationID,
+ Name: "",
+ OrganizationID: uuid.NullUUID{UUID: uuid.Nil, Valid: false},
DisplayName: "Test Name",
SitePermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
codersdk.ResourceTemplate: {codersdk.ActionCreate, codersdk.ActionRead, codersdk.ActionUpdate, codersdk.ActionDelete, codersdk.ActionViewInsights},
@@ -1686,17 +1684,8 @@ func (s *MethodTestSuite) TestUser() {
codersdk.ResourceWorkspace: {codersdk.ActionRead},
}), convertSDKPerm),
}).Asserts(
- // First check
- rbac.ResourceAssignRole, policy.ActionUpdate,
- // Escalation checks
- rbac.ResourceTemplate, policy.ActionCreate,
- rbac.ResourceTemplate, policy.ActionRead,
- rbac.ResourceTemplate, policy.ActionUpdate,
- rbac.ResourceTemplate, policy.ActionDelete,
- rbac.ResourceTemplate, policy.ActionViewInsights,
-
- rbac.ResourceWorkspace.WithOwner(testActorID.String()), policy.ActionRead,
- ).ErrorsWithPG(sql.ErrNoRows)
+ // fails immediately, missing organization id
+ ).Errors(dbauthz.NotAuthorizedError{Err: xerrors.New("custom roles must belong to an organization")})
}))
s.Run("OrgPermissions/UpdateCustomRole", s.Subtest(func(db database.Store, check *expects) {
orgID := uuid.New()
@@ -1726,13 +1715,15 @@ func (s *MethodTestSuite) TestUser() {
}))
s.Run("Blank/InsertCustomRole", s.Subtest(func(db database.Store, check *expects) {
// Blank is no perms in the role
+ orgID := uuid.New()
check.Args(database.InsertCustomRoleParams{
Name: "test",
DisplayName: "Test Name",
+ OrganizationID: uuid.NullUUID{UUID: orgID, Valid: true},
SitePermissions: nil,
OrgPermissions: nil,
UserPermissions: nil,
- }).Asserts(rbac.ResourceAssignRole, policy.ActionCreate)
+ }).Asserts(rbac.ResourceAssignOrgRole.InOrg(orgID), policy.ActionCreate)
}))
s.Run("SitePermissions/InsertCustomRole", s.Subtest(func(db database.Store, check *expects) {
check.Args(database.InsertCustomRoleParams{
@@ -1746,17 +1737,8 @@ func (s *MethodTestSuite) TestUser() {
codersdk.ResourceWorkspace: {codersdk.ActionRead},
}), convertSDKPerm),
}).Asserts(
- // First check
- rbac.ResourceAssignRole, policy.ActionCreate,
- // Escalation checks
- rbac.ResourceTemplate, policy.ActionCreate,
- rbac.ResourceTemplate, policy.ActionRead,
- rbac.ResourceTemplate, policy.ActionUpdate,
- rbac.ResourceTemplate, policy.ActionDelete,
- rbac.ResourceTemplate, policy.ActionViewInsights,
-
- rbac.ResourceWorkspace.WithOwner(testActorID.String()), policy.ActionRead,
- )
+ // fails immediately, missing organization id
+ ).Errors(dbauthz.NotAuthorizedError{Err: xerrors.New("custom roles must belong to an organization")})
}))
s.Run("OrgPermissions/InsertCustomRole", s.Subtest(func(db database.Store, check *expects) {
orgID := uuid.New()
diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go
index 779bbf4b47ee9..56ee5cfa3a9af 100644
--- a/coderd/database/queries.sql.go
+++ b/coderd/database/queries.sql.go
@@ -7775,25 +7775,25 @@ SELECT
FROM
custom_roles
WHERE
- true
- -- @lookup_roles will filter for exact (role_name, org_id) pairs
- -- To do this manually in SQL, you can construct an array and cast it:
- -- cast(ARRAY[('customrole','ece79dac-926e-44ca-9790-2ff7c5eb6e0c')] AS name_organization_pair[])
- AND CASE WHEN array_length($1 :: name_organization_pair[], 1) > 0 THEN
- -- Using 'coalesce' to avoid troubles with null literals being an empty string.
- (name, coalesce(organization_id, '00000000-0000-0000-0000-000000000000' ::uuid)) = ANY ($1::name_organization_pair[])
- ELSE true
- END
- -- This allows fetching all roles, or just site wide roles
- AND CASE WHEN $2 :: boolean THEN
- organization_id IS null
+ true
+ -- @lookup_roles will filter for exact (role_name, org_id) pairs
+ -- To do this manually in SQL, you can construct an array and cast it:
+ -- cast(ARRAY[('customrole','ece79dac-926e-44ca-9790-2ff7c5eb6e0c')] AS name_organization_pair[])
+ AND CASE WHEN array_length($1 :: name_organization_pair[], 1) > 0 THEN
+ -- Using 'coalesce' to avoid troubles with null literals being an empty string.
+ (name, coalesce(organization_id, '00000000-0000-0000-0000-000000000000' ::uuid)) = ANY ($1::name_organization_pair[])
ELSE true
- END
- -- Allows fetching all roles to a particular organization
- AND CASE WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
- organization_id = $3
- ELSE true
- END
+ END
+ -- This allows fetching all roles, or just site wide roles
+ AND CASE WHEN $2 :: boolean THEN
+ organization_id IS null
+ ELSE true
+ END
+ -- Allows fetching all roles to a particular organization
+ AND CASE WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
+ organization_id = $3
+ ELSE true
+ END
`
type CustomRolesParams struct {
@@ -7866,16 +7866,16 @@ INSERT INTO
updated_at
)
VALUES (
- -- Always force lowercase names
- lower($1),
- $2,
- $3,
- $4,
- $5,
- $6,
- now(),
- now()
- )
+ -- Always force lowercase names
+ lower($1),
+ $2,
+ $3,
+ $4,
+ $5,
+ $6,
+ now(),
+ now()
+)
RETURNING name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id, id
`
diff --git a/coderd/database/queries/roles.sql b/coderd/database/queries/roles.sql
index 7246ddb6dee2d..ee5d35d91ab65 100644
--- a/coderd/database/queries/roles.sql
+++ b/coderd/database/queries/roles.sql
@@ -4,25 +4,25 @@ SELECT
FROM
custom_roles
WHERE
- true
- -- @lookup_roles will filter for exact (role_name, org_id) pairs
- -- To do this manually in SQL, you can construct an array and cast it:
- -- cast(ARRAY[('customrole','ece79dac-926e-44ca-9790-2ff7c5eb6e0c')] AS name_organization_pair[])
- AND CASE WHEN array_length(@lookup_roles :: name_organization_pair[], 1) > 0 THEN
- -- Using 'coalesce' to avoid troubles with null literals being an empty string.
- (name, coalesce(organization_id, '00000000-0000-0000-0000-000000000000' ::uuid)) = ANY (@lookup_roles::name_organization_pair[])
- ELSE true
- END
- -- This allows fetching all roles, or just site wide roles
- AND CASE WHEN @exclude_org_roles :: boolean THEN
- organization_id IS null
+ true
+ -- @lookup_roles will filter for exact (role_name, org_id) pairs
+ -- To do this manually in SQL, you can construct an array and cast it:
+ -- cast(ARRAY[('customrole','ece79dac-926e-44ca-9790-2ff7c5eb6e0c')] AS name_organization_pair[])
+ AND CASE WHEN array_length(@lookup_roles :: name_organization_pair[], 1) > 0 THEN
+ -- Using 'coalesce' to avoid troubles with null literals being an empty string.
+ (name, coalesce(organization_id, '00000000-0000-0000-0000-000000000000' ::uuid)) = ANY (@lookup_roles::name_organization_pair[])
ELSE true
- END
- -- Allows fetching all roles to a particular organization
- AND CASE WHEN @organization_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
- organization_id = @organization_id
- ELSE true
- END
+ END
+ -- This allows fetching all roles, or just site wide roles
+ AND CASE WHEN @exclude_org_roles :: boolean THEN
+ organization_id IS null
+ ELSE true
+ END
+ -- Allows fetching all roles to a particular organization
+ AND CASE WHEN @organization_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
+ organization_id = @organization_id
+ ELSE true
+ END
;
-- name: DeleteCustomRole :exec
@@ -46,16 +46,16 @@ INSERT INTO
updated_at
)
VALUES (
- -- Always force lowercase names
- lower(@name),
- @display_name,
- @organization_id,
- @site_permissions,
- @org_permissions,
- @user_permissions,
- now(),
- now()
- )
+ -- Always force lowercase names
+ lower(@name),
+ @display_name,
+ @organization_id,
+ @site_permissions,
+ @org_permissions,
+ @user_permissions,
+ now(),
+ now()
+)
RETURNING *;
-- name: UpdateCustomRole :one
diff --git a/coderd/members.go b/coderd/members.go
index 97950b19e9137..c89b4c9c09c1a 100644
--- a/coderd/members.go
+++ b/coderd/members.go
@@ -323,7 +323,7 @@ func convertOrganizationMembers(ctx context.Context, db database.Store, mems []d
customRoles, err := db.CustomRoles(ctx, database.CustomRolesParams{
LookupRoles: roleLookup,
ExcludeOrgRoles: false,
- OrganizationID: uuid.UUID{},
+ OrganizationID: uuid.Nil,
})
if err != nil {
// We are missing the display names, but that is not absolutely required. So just
diff --git a/coderd/rbac/object_gen.go b/coderd/rbac/object_gen.go
index e1fefada0f422..86faa5f9456dc 100644
--- a/coderd/rbac/object_gen.go
+++ b/coderd/rbac/object_gen.go
@@ -27,22 +27,21 @@ var (
// ResourceAssignOrgRole
// Valid Actions
- // - "ActionAssign" :: ability to assign org scoped roles
- // - "ActionCreate" :: ability to create/delete custom roles within an organization
- // - "ActionDelete" :: ability to delete org scoped roles
- // - "ActionRead" :: view what roles are assignable
- // - "ActionUpdate" :: ability to edit custom roles within an organization
+ // - "ActionAssign" :: assign org scoped roles
+ // - "ActionCreate" :: create/delete custom roles within an organization
+ // - "ActionDelete" :: delete roles within an organization
+ // - "ActionRead" :: view what roles are assignable within an organization
+ // - "ActionUnassign" :: unassign org scoped roles
+ // - "ActionUpdate" :: edit custom roles within an organization
ResourceAssignOrgRole = Object{
Type: "assign_org_role",
}
// ResourceAssignRole
// Valid Actions
- // - "ActionAssign" :: ability to assign roles
- // - "ActionCreate" :: ability to create/delete/edit custom roles
- // - "ActionDelete" :: ability to unassign roles
+ // - "ActionAssign" :: assign user roles
// - "ActionRead" :: view what roles are assignable
- // - "ActionUpdate" :: ability to edit custom roles
+ // - "ActionUnassign" :: unassign user roles
ResourceAssignRole = Object{
Type: "assign_role",
}
@@ -367,6 +366,7 @@ func AllActions() []policy.Action {
policy.ActionRead,
policy.ActionReadPersonal,
policy.ActionSSH,
+ policy.ActionUnassign,
policy.ActionUpdate,
policy.ActionUpdatePersonal,
policy.ActionUse,
diff --git a/coderd/rbac/policy/policy.go b/coderd/rbac/policy/policy.go
index 2aae17badfb95..0988401e3849c 100644
--- a/coderd/rbac/policy/policy.go
+++ b/coderd/rbac/policy/policy.go
@@ -19,7 +19,8 @@ const (
ActionWorkspaceStart Action = "start"
ActionWorkspaceStop Action = "stop"
- ActionAssign Action = "assign"
+ ActionAssign Action = "assign"
+ ActionUnassign Action = "unassign"
ActionReadPersonal Action = "read_personal"
ActionUpdatePersonal Action = "update_personal"
@@ -221,20 +222,19 @@ var RBACPermissions = map[string]PermissionDefinition{
},
"assign_role": {
Actions: map[Action]ActionDefinition{
- ActionAssign: actDef("ability to assign roles"),
- ActionRead: actDef("view what roles are assignable"),
- ActionDelete: actDef("ability to unassign roles"),
- ActionCreate: actDef("ability to create/delete/edit custom roles"),
- ActionUpdate: actDef("ability to edit custom roles"),
+ ActionAssign: actDef("assign user roles"),
+ ActionUnassign: actDef("unassign user roles"),
+ ActionRead: actDef("view what roles are assignable"),
},
},
"assign_org_role": {
Actions: map[Action]ActionDefinition{
- ActionAssign: actDef("ability to assign org scoped roles"),
- ActionRead: actDef("view what roles are assignable"),
- ActionDelete: actDef("ability to delete org scoped roles"),
- ActionCreate: actDef("ability to create/delete custom roles within an organization"),
- ActionUpdate: actDef("ability to edit custom roles within an organization"),
+ ActionAssign: actDef("assign org scoped roles"),
+ ActionUnassign: actDef("unassign org scoped roles"),
+ ActionCreate: actDef("create/delete custom roles within an organization"),
+ ActionRead: actDef("view what roles are assignable within an organization"),
+ ActionUpdate: actDef("edit custom roles within an organization"),
+ ActionDelete: actDef("delete roles within an organization"),
},
},
"oauth2_app": {
diff --git a/coderd/rbac/roles.go b/coderd/rbac/roles.go
index af3e972fc9a6d..6b99cb4e871a2 100644
--- a/coderd/rbac/roles.go
+++ b/coderd/rbac/roles.go
@@ -350,10 +350,10 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
Identifier: RoleUserAdmin(),
DisplayName: "User Admin",
Site: Permissions(map[string][]policy.Action{
- ResourceAssignRole.Type: {policy.ActionAssign, policy.ActionDelete, policy.ActionRead},
+ ResourceAssignRole.Type: {policy.ActionAssign, policy.ActionUnassign, policy.ActionRead},
// Need organization assign as well to create users. At present, creating a user
// will always assign them to some organization.
- ResourceAssignOrgRole.Type: {policy.ActionAssign, policy.ActionDelete, policy.ActionRead},
+ ResourceAssignOrgRole.Type: {policy.ActionAssign, policy.ActionUnassign, policy.ActionRead},
ResourceUser.Type: {
policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete,
policy.ActionUpdatePersonal, policy.ActionReadPersonal,
@@ -470,7 +470,7 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
Org: map[string][]Permission{
organizationID.String(): Permissions(map[string][]policy.Action{
// Assign, remove, and read roles in the organization.
- ResourceAssignOrgRole.Type: {policy.ActionAssign, policy.ActionDelete, policy.ActionRead},
+ ResourceAssignOrgRole.Type: {policy.ActionAssign, policy.ActionUnassign, policy.ActionRead},
ResourceOrganization.Type: {policy.ActionRead},
ResourceOrganizationMember.Type: {policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete},
ResourceGroup.Type: ResourceGroup.AvailableActions(),
diff --git a/coderd/rbac/roles_test.go b/coderd/rbac/roles_test.go
index af62a5cd5d1b3..51eb15def9739 100644
--- a/coderd/rbac/roles_test.go
+++ b/coderd/rbac/roles_test.go
@@ -303,9 +303,9 @@ func TestRolePermissions(t *testing.T) {
},
},
{
- Name: "CreateCustomRole",
- Actions: []policy.Action{policy.ActionCreate, policy.ActionUpdate},
- Resource: rbac.ResourceAssignRole,
+ Name: "CreateUpdateDeleteCustomRole",
+ Actions: []policy.Action{policy.ActionCreate, policy.ActionUpdate, policy.ActionDelete},
+ Resource: rbac.ResourceAssignOrgRole,
AuthorizeMap: map[bool][]hasAuthSubjects{
true: {owner},
false: {setOtherOrg, setOrgNotMe, userAdmin, orgMemberMe, memberMe, templateAdmin},
@@ -313,7 +313,7 @@ func TestRolePermissions(t *testing.T) {
},
{
Name: "RoleAssignment",
- Actions: []policy.Action{policy.ActionAssign, policy.ActionDelete},
+ Actions: []policy.Action{policy.ActionAssign, policy.ActionUnassign},
Resource: rbac.ResourceAssignRole,
AuthorizeMap: map[bool][]hasAuthSubjects{
true: {owner, userAdmin},
@@ -331,7 +331,7 @@ func TestRolePermissions(t *testing.T) {
},
{
Name: "OrgRoleAssignment",
- Actions: []policy.Action{policy.ActionAssign, policy.ActionDelete},
+ Actions: []policy.Action{policy.ActionAssign, policy.ActionUnassign},
Resource: rbac.ResourceAssignOrgRole.InOrg(orgID),
AuthorizeMap: map[bool][]hasAuthSubjects{
true: {owner, orgAdmin, userAdmin, orgUserAdmin},
diff --git a/codersdk/rbacresources_gen.go b/codersdk/rbacresources_gen.go
index f2751ac0334aa..68b765db3f8a6 100644
--- a/codersdk/rbacresources_gen.go
+++ b/codersdk/rbacresources_gen.go
@@ -49,6 +49,7 @@ const (
ActionRead RBACAction = "read"
ActionReadPersonal RBACAction = "read_personal"
ActionSSH RBACAction = "ssh"
+ ActionUnassign RBACAction = "unassign"
ActionUpdate RBACAction = "update"
ActionUpdatePersonal RBACAction = "update_personal"
ActionUse RBACAction = "use"
@@ -62,8 +63,8 @@ const (
var RBACResourceActions = map[RBACResource][]RBACAction{
ResourceWildcard: {},
ResourceApiKey: {ActionCreate, ActionDelete, ActionRead, ActionUpdate},
- ResourceAssignOrgRole: {ActionAssign, ActionCreate, ActionDelete, ActionRead, ActionUpdate},
- ResourceAssignRole: {ActionAssign, ActionCreate, ActionDelete, ActionRead, ActionUpdate},
+ ResourceAssignOrgRole: {ActionAssign, ActionCreate, ActionDelete, ActionRead, ActionUnassign, ActionUpdate},
+ ResourceAssignRole: {ActionAssign, ActionRead, ActionUnassign},
ResourceAuditLog: {ActionCreate, ActionRead},
ResourceCryptoKey: {ActionCreate, ActionDelete, ActionRead, ActionUpdate},
ResourceDebugInfo: {ActionRead},
diff --git a/docs/reference/api/members.md b/docs/reference/api/members.md
index 6daaaaeea736f..d29774663bc32 100644
--- a/docs/reference/api/members.md
+++ b/docs/reference/api/members.md
@@ -173,6 +173,7 @@ Status Code **200**
| `action` | `read` |
| `action` | `read_personal` |
| `action` | `ssh` |
+| `action` | `unassign` |
| `action` | `update` |
| `action` | `update_personal` |
| `action` | `use` |
@@ -335,6 +336,7 @@ Status Code **200**
| `action` | `read` |
| `action` | `read_personal` |
| `action` | `ssh` |
+| `action` | `unassign` |
| `action` | `update` |
| `action` | `update_personal` |
| `action` | `use` |
@@ -497,6 +499,7 @@ Status Code **200**
| `action` | `read` |
| `action` | `read_personal` |
| `action` | `ssh` |
+| `action` | `unassign` |
| `action` | `update` |
| `action` | `update_personal` |
| `action` | `use` |
@@ -628,6 +631,7 @@ Status Code **200**
| `action` | `read` |
| `action` | `read_personal` |
| `action` | `ssh` |
+| `action` | `unassign` |
| `action` | `update` |
| `action` | `update_personal` |
| `action` | `use` |
@@ -891,6 +895,7 @@ Status Code **200**
| `action` | `read` |
| `action` | `read_personal` |
| `action` | `ssh` |
+| `action` | `unassign` |
| `action` | `update` |
| `action` | `update_personal` |
| `action` | `use` |
diff --git a/docs/reference/api/schemas.md b/docs/reference/api/schemas.md
index 99f94e53992e8..b3e4821c2e39e 100644
--- a/docs/reference/api/schemas.md
+++ b/docs/reference/api/schemas.md
@@ -5104,6 +5104,7 @@ Git clone makes use of this by parsing the URL from: 'Username for "https://gith
| `read` |
| `read_personal` |
| `ssh` |
+| `unassign` |
| `update` |
| `update_personal` |
| `use` |
diff --git a/enterprise/coderd/roles.go b/enterprise/coderd/roles.go
index d5af54a35b03b..30432af76c7eb 100644
--- a/enterprise/coderd/roles.go
+++ b/enterprise/coderd/roles.go
@@ -127,8 +127,7 @@ func (api *API) putOrgRoles(rw http.ResponseWriter, r *http.Request) {
},
},
ExcludeOrgRoles: false,
- // Linter requires all fields to be set. This field is not actually required.
- OrganizationID: organization.ID,
+ OrganizationID: organization.ID,
})
// If it is a 404 (not found) error, ignore it.
if err != nil && !httpapi.Is404Error(err) {
diff --git a/site/src/api/rbacresourcesGenerated.ts b/site/src/api/rbacresourcesGenerated.ts
index 483508bc11554..bfd1a46861090 100644
--- a/site/src/api/rbacresourcesGenerated.ts
+++ b/site/src/api/rbacresourcesGenerated.ts
@@ -15,18 +15,17 @@ export const RBACResourceActions: Partial<
update: "update an api key, eg expires",
},
assign_org_role: {
- assign: "ability to assign org scoped roles",
- create: "ability to create/delete custom roles within an organization",
- delete: "ability to delete org scoped roles",
- read: "view what roles are assignable",
- update: "ability to edit custom roles within an organization",
+ assign: "assign org scoped roles",
+ create: "create/delete custom roles within an organization",
+ delete: "delete roles within an organization",
+ read: "view what roles are assignable within an organization",
+ unassign: "unassign org scoped roles",
+ update: "edit custom roles within an organization",
},
assign_role: {
- assign: "ability to assign roles",
- create: "ability to create/delete/edit custom roles",
- delete: "ability to unassign roles",
+ assign: "assign user roles",
read: "view what roles are assignable",
- update: "ability to edit custom roles",
+ unassign: "unassign user roles",
},
audit_log: {
create: "create new audit log entries",
diff --git a/site/src/api/typesGenerated.ts b/site/src/api/typesGenerated.ts
index 1a011b57b4c39..8c350d8f5bc31 100644
--- a/site/src/api/typesGenerated.ts
+++ b/site/src/api/typesGenerated.ts
@@ -1856,6 +1856,7 @@ export type RBACAction =
| "read"
| "read_personal"
| "ssh"
+ | "unassign"
| "update"
| "update_personal"
| "use"
@@ -1871,6 +1872,7 @@ export const RBACActions: RBACAction[] = [
"read",
"read_personal",
"ssh",
+ "unassign",
"update",
"update_personal",
"use",
From 0ea06012fcb375cd1c6d1d8fdb34685880571b0d Mon Sep 17 00:00:00 2001
From: Marcin Tojek
Date: Thu, 27 Feb 2025 20:30:11 +0100
Subject: [PATCH 24/44] fix: handle undefined job while updating build progress
(#16732)
Fixes: https://github.com/coder/coder/issues/15444
---
site/src/pages/WorkspacePage/WorkspaceBuildProgress.tsx | 1 +
1 file changed, 1 insertion(+)
diff --git a/site/src/pages/WorkspacePage/WorkspaceBuildProgress.tsx b/site/src/pages/WorkspacePage/WorkspaceBuildProgress.tsx
index 88f006681495e..52f3e725c6003 100644
--- a/site/src/pages/WorkspacePage/WorkspaceBuildProgress.tsx
+++ b/site/src/pages/WorkspacePage/WorkspaceBuildProgress.tsx
@@ -81,6 +81,7 @@ export const WorkspaceBuildProgress: FC = ({
useEffect(() => {
const updateProgress = () => {
if (
+ job === undefined ||
job.status !== "running" ||
transitionStats.P50 === undefined ||
transitionStats.P95 === undefined ||
From 7e339021c13aa7788edb2c4519e37d14467d68b6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E3=82=B1=E3=82=A4=E3=83=A9?=
Date: Thu, 27 Feb 2025 12:55:30 -0700
Subject: [PATCH 25/44] chore: use org-scoped roles for organization groups and
members e2e tests (#16691)
---
site/e2e/api.ts | 32 ++++++++++++++++++++--
site/e2e/constants.ts | 7 +++++
site/e2e/helpers.ts | 29 +++++++++++++++++++-
site/e2e/tests/organizationGroups.spec.ts | 15 ++++++++--
site/e2e/tests/organizationMembers.spec.ts | 20 ++++++--------
5 files changed, 85 insertions(+), 18 deletions(-)
diff --git a/site/e2e/api.ts b/site/e2e/api.ts
index 902485b7b15b6..0dc9e46831708 100644
--- a/site/e2e/api.ts
+++ b/site/e2e/api.ts
@@ -3,8 +3,8 @@ import { expect } from "@playwright/test";
import { API, type DeploymentConfig } from "api/api";
import type { SerpentOption } from "api/typesGenerated";
import { formatDuration, intervalToDuration } from "date-fns";
-import { coderPort } from "./constants";
-import { findSessionToken, randomName } from "./helpers";
+import { coderPort, defaultPassword } from "./constants";
+import { type LoginOptions, findSessionToken, randomName } from "./helpers";
let currentOrgId: string;
@@ -29,14 +29,40 @@ export const createUser = async (...orgIds: string[]) => {
email: `${name}@coder.com`,
username: name,
name: name,
- password: "s3cure&password!",
+ password: defaultPassword,
login_type: "password",
organization_ids: orgIds,
user_status: null,
});
+
return user;
};
+export const createOrganizationMember = async (
+ orgRoles: Record,
+): Promise => {
+ const name = randomName();
+ const user = await API.createUser({
+ email: `${name}@coder.com`,
+ username: name,
+ name: name,
+ password: defaultPassword,
+ login_type: "password",
+ organization_ids: Object.keys(orgRoles),
+ user_status: null,
+ });
+
+ for (const [org, roles] of Object.entries(orgRoles)) {
+ API.updateOrganizationMemberRoles(org, user.id, roles);
+ }
+
+ return {
+ username: user.username,
+ email: user.email,
+ password: defaultPassword,
+ };
+};
+
export const createGroup = async (orgId: string) => {
const name = randomName();
const group = await API.createGroup(orgId, {
diff --git a/site/e2e/constants.ts b/site/e2e/constants.ts
index 4fcada0e6d15b..4d2d9099692d5 100644
--- a/site/e2e/constants.ts
+++ b/site/e2e/constants.ts
@@ -15,6 +15,7 @@ export const coderdPProfPort = 6062;
// The name of the organization that should be used by default when needed.
export const defaultOrganizationName = "coder";
+export const defaultOrganizationId = "00000000-0000-0000-0000-000000000000";
export const defaultPassword = "SomeSecurePassword!";
// Credentials for users
@@ -30,6 +31,12 @@ export const users = {
email: "templateadmin@coder.com",
roles: ["Template Admin"],
},
+ userAdmin: {
+ username: "user-admin",
+ password: defaultPassword,
+ email: "useradmin@coder.com",
+ roles: ["User Admin"],
+ },
auditor: {
username: "auditor",
password: defaultPassword,
diff --git a/site/e2e/helpers.ts b/site/e2e/helpers.ts
index 5692909355fca..24b46d47a151b 100644
--- a/site/e2e/helpers.ts
+++ b/site/e2e/helpers.ts
@@ -61,7 +61,7 @@ export function requireTerraformProvisioner() {
test.skip(!requireTerraformTests);
}
-type LoginOptions = {
+export type LoginOptions = {
username: string;
email: string;
password: string;
@@ -1127,3 +1127,30 @@ export async function createOrganization(page: Page): Promise<{
return { name, displayName, description };
}
+
+/**
+ * @param organization organization name
+ * @param user user email or username
+ */
+export async function addUserToOrganization(
+ page: Page,
+ organization: string,
+ user: string,
+ roles: string[] = [],
+): Promise {
+ await page.goto(`/organizations/${organization}`, {
+ waitUntil: "domcontentloaded",
+ });
+
+ await page.getByPlaceholder("User email or username").fill(user);
+ await page.getByRole("option", { name: user }).click();
+ await page.getByRole("button", { name: "Add user" }).click();
+ const addedRow = page.locator("tr", { hasText: user });
+ await expect(addedRow).toBeVisible();
+
+ await addedRow.getByLabel("Edit user roles").click();
+ for (const role of roles) {
+ await page.getByText(role).click();
+ }
+ await page.mouse.click(10, 10); // close the popover by clicking outside of it
+}
diff --git a/site/e2e/tests/organizationGroups.spec.ts b/site/e2e/tests/organizationGroups.spec.ts
index dff12ab91c453..6e8aa74a4bf8b 100644
--- a/site/e2e/tests/organizationGroups.spec.ts
+++ b/site/e2e/tests/organizationGroups.spec.ts
@@ -2,10 +2,11 @@ import { expect, test } from "@playwright/test";
import {
createGroup,
createOrganization,
+ createOrganizationMember,
createUser,
setupApiCalls,
} from "../api";
-import { defaultOrganizationName } from "../constants";
+import { defaultOrganizationId, defaultOrganizationName } from "../constants";
import { expectUrl } from "../expectUrl";
import { login, randomName, requiresLicense } from "../helpers";
import { beforeCoderTest } from "../hooks";
@@ -32,6 +33,11 @@ test("create group", async ({ page }) => {
// Create a new organization
const org = await createOrganization();
+ const orgUserAdmin = await createOrganizationMember({
+ [org.id]: ["organization-user-admin"],
+ });
+
+ await login(page, orgUserAdmin);
await page.goto(`/organizations/${org.name}`);
// Navigate to groups page
@@ -64,8 +70,7 @@ test("create group", async ({ page }) => {
await expect(addedRow).toBeVisible();
// Ensure we can't add a user who isn't in the org
- const otherOrg = await createOrganization();
- const personToReject = await createUser(otherOrg.id);
+ const personToReject = await createUser(defaultOrganizationId);
await page
.getByPlaceholder("User email or username")
.fill(personToReject.email);
@@ -93,8 +98,12 @@ test("change quota settings", async ({ page }) => {
// Create a new organization and group
const org = await createOrganization();
const group = await createGroup(org.id);
+ const orgUserAdmin = await createOrganizationMember({
+ [org.id]: ["organization-user-admin"],
+ });
// Go to settings
+ await login(page, orgUserAdmin);
await page.goto(`/organizations/${org.name}/groups/${group.name}`);
await page.getByRole("button", { name: "Settings", exact: true }).click();
expectUrl(page).toHavePathName(
diff --git a/site/e2e/tests/organizationMembers.spec.ts b/site/e2e/tests/organizationMembers.spec.ts
index 9edb2eb922ab8..51c3491ae3d62 100644
--- a/site/e2e/tests/organizationMembers.spec.ts
+++ b/site/e2e/tests/organizationMembers.spec.ts
@@ -1,6 +1,7 @@
import { expect, test } from "@playwright/test";
import { setupApiCalls } from "../api";
import {
+ addUserToOrganization,
createOrganization,
createUser,
login,
@@ -18,7 +19,7 @@ test("add and remove organization member", async ({ page }) => {
requiresLicense();
// Create a new organization
- const { displayName } = await createOrganization(page);
+ const { name: orgName, displayName } = await createOrganization(page);
// Navigate to members page
await page.getByRole("link", { name: "Members" }).click();
@@ -26,17 +27,14 @@ test("add and remove organization member", async ({ page }) => {
// Add a user to the org
const personToAdd = await createUser(page);
- await page.getByPlaceholder("User email or username").fill(personToAdd.email);
- await page.getByRole("option", { name: personToAdd.email }).click();
- await page.getByRole("button", { name: "Add user" }).click();
- const addedRow = page.locator("tr", { hasText: personToAdd.email });
- await expect(addedRow).toBeVisible();
+ // This must be done as an admin, because you can't assign a role that has more
+ // permissions than you, even if you have the ability to assign roles.
+ await addUserToOrganization(page, orgName, personToAdd.email, [
+ "Organization User Admin",
+ "Organization Template Admin",
+ ]);
- // Give them a role
- await addedRow.getByLabel("Edit user roles").click();
- await page.getByText("Organization User Admin").click();
- await page.getByText("Organization Template Admin").click();
- await page.mouse.click(10, 10); // close the popover by clicking outside of it
+ const addedRow = page.locator("tr", { hasText: personToAdd.email });
await expect(addedRow.getByText("Organization User Admin")).toBeVisible();
await expect(addedRow.getByText("+1 more")).toBeVisible();
From b23e05b1fe746ae2e65967651bb6a1631504847b Mon Sep 17 00:00:00 2001
From: Dean Sheather
Date: Fri, 28 Feb 2025 15:20:00 +1100
Subject: [PATCH 26/44] fix(vpn): fail early if wintun.dll is not present
(#16707)
Prevents the VPN startup from hanging for 5 minutes due to a startup
backoff if `wintun.dll` cannot be loaded.
Because the `wintun` package doesn't expose an easy `Load() error`
method for us, the only way for us to force it to load (without unwanted
side effects) is through `wintun.Version()` which doesn't return an
error message.
So, we call that function so the `wintun` package loads the DLL and
configures the logging properly, then we try to load the DLL ourselves.
`LoadLibraryEx` will not load the library multiple times and returns a
reference to the existing library.
Closes https://github.com/coder/coder-desktop-windows/issues/24
---
vpn/tun_windows.go | 34 +++++++++++++++++++++++++++++++---
1 file changed, 31 insertions(+), 3 deletions(-)
diff --git a/vpn/tun_windows.go b/vpn/tun_windows.go
index a70cb8f28d60d..52778a8a9d08b 100644
--- a/vpn/tun_windows.go
+++ b/vpn/tun_windows.go
@@ -25,7 +25,12 @@ import (
"github.com/coder/retry"
)
-const tunName = "Coder"
+const (
+ tunName = "Coder"
+ tunGUID = "{0ed1515d-04a4-4c46-abae-11ad07cf0e6d}"
+
+ wintunDLL = "wintun.dll"
+)
func GetNetworkingStack(t *Tunnel, _ *StartRequest, logger slog.Logger) (NetworkStack, error) {
// Initialize COM process-wide so Tailscale can make calls to the windows
@@ -44,12 +49,35 @@ func GetNetworkingStack(t *Tunnel, _ *StartRequest, logger slog.Logger) (Network
// Set the name and GUID for the TUN interface.
tun.WintunTunnelType = tunName
- guid, err := windows.GUIDFromString("{0ed1515d-04a4-4c46-abae-11ad07cf0e6d}")
+ guid, err := windows.GUIDFromString(tunGUID)
if err != nil {
- panic(err)
+ return NetworkStack{}, xerrors.Errorf("could not parse GUID %q: %w", tunGUID, err)
}
tun.WintunStaticRequestedGUID = &guid
+ // Ensure wintun.dll is available, and fail early if it's not to avoid
+ // hanging for 5 minutes in tstunNewWithWindowsRetries.
+ //
+ // First, we call wintun.Version() to make the wintun package attempt to
+ // load wintun.dll. This allows the wintun package to set the logging
+ // callback in the DLL before we load it ourselves.
+ _ = wintun.Version()
+
+ // Then, we try to load wintun.dll ourselves so we get a better error
+ // message if there was a problem. This call matches the wintun package, so
+ // we're loading it in the same way.
+ //
+ // Note: this leaks the handle to wintun.dll, but since it's already loaded
+ // it wouldn't be freed anyways.
+ const (
+ LOAD_LIBRARY_SEARCH_APPLICATION_DIR = 0x00000200
+ LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
+ )
+ _, err = windows.LoadLibraryEx(wintunDLL, 0, LOAD_LIBRARY_SEARCH_APPLICATION_DIR|LOAD_LIBRARY_SEARCH_SYSTEM32)
+ if err != nil {
+ return NetworkStack{}, xerrors.Errorf("could not load %q, it should be in the same directory as the executable (in Coder Desktop, this should have been installed automatically): %w", wintunDLL, err)
+ }
+
tunDev, tunName, err := tstunNewWithWindowsRetries(tailnet.Logger(logger.Named("net.tun.device")), tunName)
if err != nil {
return NetworkStack{}, xerrors.Errorf("create tun device: %w", err)
From 3997eeee26d2c18123edba0043bf398759922d0c Mon Sep 17 00:00:00 2001
From: Dean Sheather
Date: Fri, 28 Feb 2025 15:35:56 +1100
Subject: [PATCH 27/44] chore: update tailscale (#16737)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 5e730b4f2a704..4b38c65265f4d 100644
--- a/go.mod
+++ b/go.mod
@@ -36,7 +36,7 @@ replace github.com/tcnksm/go-httpstat => github.com/coder/go-httpstat v0.0.0-202
// There are a few minor changes we make to Tailscale that we're slowly upstreaming. Compare here:
// https://github.com/tailscale/tailscale/compare/main...coder:tailscale:main
-replace tailscale.com => github.com/coder/tailscale v1.1.1-0.20250129014916-8086c871eae6
+replace tailscale.com => github.com/coder/tailscale v1.1.1-0.20250227024825-c9983534152a
// This is replaced to include
// 1. a fix for a data race: c.f. https://github.com/tailscale/wireguard-go/pull/25
diff --git a/go.sum b/go.sum
index c94a9be8df40a..6496dfc84118d 100644
--- a/go.sum
+++ b/go.sum
@@ -236,8 +236,8 @@ github.com/coder/serpent v0.10.0 h1:ofVk9FJXSek+SmL3yVE3GoArP83M+1tX+H7S4t8BSuM=
github.com/coder/serpent v0.10.0/go.mod h1:cZFW6/fP+kE9nd/oRkEHJpG6sXCtQ+AX7WMMEHv0Y3Q=
github.com/coder/ssh v0.0.0-20231128192721-70855dedb788 h1:YoUSJ19E8AtuUFVYBpXuOD6a/zVP3rcxezNsoDseTUw=
github.com/coder/ssh v0.0.0-20231128192721-70855dedb788/go.mod h1:aGQbuCLyhRLMzZF067xc84Lh7JDs1FKwCmF1Crl9dxQ=
-github.com/coder/tailscale v1.1.1-0.20250129014916-8086c871eae6 h1:prDIwUcsSEKbs1Rc5FfdvtSfz2XGpW3FnJtWR+Mc7MY=
-github.com/coder/tailscale v1.1.1-0.20250129014916-8086c871eae6/go.mod h1:1ggFFdHTRjPRu9Yc1yA7nVHBYB50w9Ce7VIXNqcW6Ko=
+github.com/coder/tailscale v1.1.1-0.20250227024825-c9983534152a h1:18TQ03KlYrkW8hOohTQaDnlmkY1H9pDPGbZwOnUUmm8=
+github.com/coder/tailscale v1.1.1-0.20250227024825-c9983534152a/go.mod h1:1ggFFdHTRjPRu9Yc1yA7nVHBYB50w9Ce7VIXNqcW6Ko=
github.com/coder/terraform-config-inspect v0.0.0-20250107175719-6d06d90c630e h1:JNLPDi2P73laR1oAclY6jWzAbucf70ASAvf5mh2cME0=
github.com/coder/terraform-config-inspect v0.0.0-20250107175719-6d06d90c630e/go.mod h1:Gz/z9Hbn+4KSp8A2FBtNszfLSdT2Tn/uAKGuVqqWmDI=
github.com/coder/terraform-provider-coder/v2 v2.1.3 h1:zB7ObGsiOGBHcJUUMmcSauEPlTWRIYmMYieF05LxHSc=
From 64fec8bf0b602c7b7069ae435c79ac5ccfbfe58b Mon Sep 17 00:00:00 2001
From: Dean Sheather
Date: Fri, 28 Feb 2025 16:03:08 +1100
Subject: [PATCH 28/44] feat: include winres metadata in Windows binaries
(#16706)
Adds information like product/file version, description, product name
and copyright to compiled Windows binaries in dogfood and release
builds. Also adds an icon to the executable.
This is necessary for Coder Desktop to be able to check the version on
binaries.
### Before:


### After:



Closes https://github.com/coder/coder/issues/16693
---
.github/workflows/ci.yaml | 53 +++++++++++++-
.github/workflows/release.yaml | 28 ++++----
buildinfo/resources/.gitignore | 1 +
buildinfo/resources/resources.go | 8 +++
cmd/coder/main.go | 1 +
enterprise/cmd/coder/main.go | 1 +
scripts/build_go.sh | 114 +++++++++++++++++++++++++++++--
7 files changed, 185 insertions(+), 21 deletions(-)
create mode 100644 buildinfo/resources/.gitignore
create mode 100644 buildinfo/resources/resources.go
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 6cd3238cad2bf..7b47532ed46e1 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -1021,7 +1021,10 @@ jobs:
if: github.ref == 'refs/heads/main' && needs.changes.outputs.docs-only == 'false' && !github.event.pull_request.head.repo.fork
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-22.04' }}
permissions:
- packages: write # Needed to push images to ghcr.io
+ # Necessary to push docker images to ghcr.io.
+ packages: write
+ # Necessary for GCP authentication (https://github.com/google-github-actions/setup-gcloud#usage)
+ id-token: write
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
outputs:
@@ -1050,12 +1053,44 @@ jobs:
- name: Setup Go
uses: ./.github/actions/setup-go
+ # Necessary for signing Windows binaries.
+ - name: Setup Java
+ uses: actions/setup-java@3a4f6e1af504cf6a31855fa899c6aa5355ba6c12 # v4.7.0
+ with:
+ distribution: "zulu"
+ java-version: "11.0"
+
+ - name: Install go-winres
+ run: go install github.com/tc-hib/go-winres@d743268d7ea168077ddd443c4240562d4f5e8c3e # v0.3.3
+
- name: Install nfpm
run: go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.35.1
- name: Install zstd
run: sudo apt-get install -y zstd
+ - name: Setup Windows EV Signing Certificate
+ run: |
+ set -euo pipefail
+ touch /tmp/ev_cert.pem
+ chmod 600 /tmp/ev_cert.pem
+ echo "$EV_SIGNING_CERT" > /tmp/ev_cert.pem
+ wget https://github.com/ebourg/jsign/releases/download/6.0/jsign-6.0.jar -O /tmp/jsign-6.0.jar
+ env:
+ EV_SIGNING_CERT: ${{ secrets.EV_SIGNING_CERT }}
+
+ # Setup GCloud for signing Windows binaries.
+ - name: Authenticate to Google Cloud
+ id: gcloud_auth
+ uses: google-github-actions/auth@71f986410dfbc7added4569d411d040a91dc6935 # v2.1.8
+ with:
+ workload_identity_provider: ${{ secrets.GCP_CODE_SIGNING_WORKLOAD_ID_PROVIDER }}
+ service_account: ${{ secrets.GCP_CODE_SIGNING_SERVICE_ACCOUNT }}
+ token_format: "access_token"
+
+ - name: Setup GCloud SDK
+ uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4
+
- name: Download dylibs
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
@@ -1082,6 +1117,18 @@ jobs:
build/coder_linux_{amd64,arm64,armv7} \
build/coder_"$version"_windows_amd64.zip \
build/coder_"$version"_linux_amd64.{tar.gz,deb}
+ env:
+ # The Windows slim binary must be signed for Coder Desktop to accept
+ # it. The darwin executables don't need to be signed, but the dylibs
+ # do (see above).
+ CODER_SIGN_WINDOWS: "1"
+ CODER_WINDOWS_RESOURCES: "1"
+ EV_KEY: ${{ secrets.EV_KEY }}
+ EV_KEYSTORE: ${{ secrets.EV_KEYSTORE }}
+ EV_TSA_URL: ${{ secrets.EV_TSA_URL }}
+ EV_CERTIFICATE_PATH: /tmp/ev_cert.pem
+ GCLOUD_ACCESS_TOKEN: ${{ steps.gcloud_auth.outputs.access_token }}
+ JSIGN_PATH: /tmp/jsign-6.0.jar
- name: Build Linux Docker images
id: build-docker
@@ -1183,10 +1230,10 @@ jobs:
uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4
- name: Set up Flux CLI
- uses: fluxcd/flux2/action@af67405ee43a6cd66e0b73f4b3802e8583f9d961 # v2.5.0
+ uses: fluxcd/flux2/action@8d5f40dca5aa5d3c0fc3414457dda15a0ac92fa4 # v2.5.1
with:
# Keep this and the github action up to date with the version of flux installed in dogfood cluster
- version: "2.2.1"
+ version: "2.5.1"
- name: Get Cluster Credentials
uses: google-github-actions/get-gke-credentials@7a108e64ed8546fe38316b4086e91da13f4785e1 # v2.3.1
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 89b4e4e84a401..614b3542d5a80 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -223,21 +223,12 @@ jobs:
distribution: "zulu"
java-version: "11.0"
+ - name: Install go-winres
+ run: go install github.com/tc-hib/go-winres@d743268d7ea168077ddd443c4240562d4f5e8c3e # v0.3.3
+
- name: Install nsis and zstd
run: sudo apt-get install -y nsis zstd
- - name: Download dylibs
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- with:
- name: dylibs
- path: ./build
-
- - name: Insert dylibs
- run: |
- mv ./build/*amd64.dylib ./site/out/bin/coder-vpn-darwin-amd64.dylib
- mv ./build/*arm64.dylib ./site/out/bin/coder-vpn-darwin-arm64.dylib
- mv ./build/*arm64.h ./site/out/bin/coder-vpn-darwin-dylib.h
-
- name: Install nfpm
run: |
set -euo pipefail
@@ -294,6 +285,18 @@ jobs:
- name: Setup GCloud SDK
uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4
+ - name: Download dylibs
+ uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
+ with:
+ name: dylibs
+ path: ./build
+
+ - name: Insert dylibs
+ run: |
+ mv ./build/*amd64.dylib ./site/out/bin/coder-vpn-darwin-amd64.dylib
+ mv ./build/*arm64.dylib ./site/out/bin/coder-vpn-darwin-arm64.dylib
+ mv ./build/*arm64.h ./site/out/bin/coder-vpn-darwin-dylib.h
+
- name: Build binaries
run: |
set -euo pipefail
@@ -310,6 +313,7 @@ jobs:
env:
CODER_SIGN_WINDOWS: "1"
CODER_SIGN_DARWIN: "1"
+ CODER_WINDOWS_RESOURCES: "1"
AC_CERTIFICATE_FILE: /tmp/apple_cert.p12
AC_CERTIFICATE_PASSWORD_FILE: /tmp/apple_cert_password.txt
AC_APIKEY_ISSUER_ID: ${{ secrets.AC_APIKEY_ISSUER_ID }}
diff --git a/buildinfo/resources/.gitignore b/buildinfo/resources/.gitignore
new file mode 100644
index 0000000000000..40679b193bdf9
--- /dev/null
+++ b/buildinfo/resources/.gitignore
@@ -0,0 +1 @@
+*.syso
diff --git a/buildinfo/resources/resources.go b/buildinfo/resources/resources.go
new file mode 100644
index 0000000000000..cd1e3e70af2b7
--- /dev/null
+++ b/buildinfo/resources/resources.go
@@ -0,0 +1,8 @@
+// This package is used for embedding .syso resource files into the binary
+// during build and does not contain any code. During build, .syso files will be
+// dropped in this directory and then removed after the build completes.
+//
+// This package must be imported by all binaries for this to work.
+//
+// See build_go.sh for more details.
+package resources
diff --git a/cmd/coder/main.go b/cmd/coder/main.go
index 1c22d578d7160..27918798b3a12 100644
--- a/cmd/coder/main.go
+++ b/cmd/coder/main.go
@@ -8,6 +8,7 @@ import (
tea "github.com/charmbracelet/bubbletea"
"github.com/coder/coder/v2/agent/agentexec"
+ _ "github.com/coder/coder/v2/buildinfo/resources"
"github.com/coder/coder/v2/cli"
)
diff --git a/enterprise/cmd/coder/main.go b/enterprise/cmd/coder/main.go
index 803903f390e5a..217cca324b762 100644
--- a/enterprise/cmd/coder/main.go
+++ b/enterprise/cmd/coder/main.go
@@ -8,6 +8,7 @@ import (
tea "github.com/charmbracelet/bubbletea"
"github.com/coder/coder/v2/agent/agentexec"
+ _ "github.com/coder/coder/v2/buildinfo/resources"
entcli "github.com/coder/coder/v2/enterprise/cli"
)
diff --git a/scripts/build_go.sh b/scripts/build_go.sh
index 91fc3a1e4b3e3..3e23e15d8b962 100755
--- a/scripts/build_go.sh
+++ b/scripts/build_go.sh
@@ -36,17 +36,19 @@ source "$(dirname "${BASH_SOURCE[0]}")/lib.sh"
version=""
os="${GOOS:-linux}"
arch="${GOARCH:-amd64}"
+output_path=""
slim="${CODER_SLIM_BUILD:-0}"
+agpl="${CODER_BUILD_AGPL:-0}"
sign_darwin="${CODER_SIGN_DARWIN:-0}"
sign_windows="${CODER_SIGN_WINDOWS:-0}"
-bin_ident="com.coder.cli"
-output_path=""
-agpl="${CODER_BUILD_AGPL:-0}"
boringcrypto=${CODER_BUILD_BORINGCRYPTO:-0}
-debug=0
dylib=0
+windows_resources="${CODER_WINDOWS_RESOURCES:-0}"
+debug=0
+
+bin_ident="com.coder.cli"
-args="$(getopt -o "" -l version:,os:,arch:,output:,slim,agpl,sign-darwin,boringcrypto,dylib,debug -- "$@")"
+args="$(getopt -o "" -l version:,os:,arch:,output:,slim,agpl,sign-darwin,sign-windows,boringcrypto,dylib,windows-resources,debug -- "$@")"
eval set -- "$args"
while true; do
case "$1" in
@@ -79,6 +81,10 @@ while true; do
sign_darwin=1
shift
;;
+ --sign-windows)
+ sign_windows=1
+ shift
+ ;;
--boringcrypto)
boringcrypto=1
shift
@@ -87,6 +93,10 @@ while true; do
dylib=1
shift
;;
+ --windows-resources)
+ windows_resources=1
+ shift
+ ;;
--debug)
debug=1
shift
@@ -115,11 +125,13 @@ if [[ "$sign_darwin" == 1 ]]; then
dependencies rcodesign
requiredenvs AC_CERTIFICATE_FILE AC_CERTIFICATE_PASSWORD_FILE
fi
-
if [[ "$sign_windows" == 1 ]]; then
dependencies java
requiredenvs JSIGN_PATH EV_KEYSTORE EV_KEY EV_CERTIFICATE_PATH EV_TSA_URL GCLOUD_ACCESS_TOKEN
fi
+if [[ "$windows_resources" == 1 ]]; then
+ dependencies go-winres
+fi
ldflags=(
-X "'github.com/coder/coder/v2/buildinfo.tag=$version'"
@@ -204,10 +216,100 @@ if [[ "$boringcrypto" == 1 ]]; then
goexp="boringcrypto"
fi
+# On Windows, we use go-winres to embed the resources into the binary.
+if [[ "$windows_resources" == 1 ]] && [[ "$os" == "windows" ]]; then
+ # Convert the version to a format that Windows understands.
+ # Remove any trailing data after a "+" or "-".
+ version_windows=$version
+ version_windows="${version_windows%+*}"
+ version_windows="${version_windows%-*}"
+ # If there wasn't any extra data, add a .0 to the version. Otherwise, add
+ # a .1 to the version to signify that this is not a release build so it can
+ # be distinguished from a release build.
+ non_release_build=0
+ if [[ "$version_windows" == "$version" ]]; then
+ version_windows+=".0"
+ else
+ version_windows+=".1"
+ non_release_build=1
+ fi
+
+ if [[ ! "$version_windows" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-1]$ ]]; then
+ error "Computed invalid windows version format: $version_windows"
+ fi
+
+ # File description changes based on slimness, AGPL status, and architecture.
+ file_description="Coder"
+ if [[ "$agpl" == 1 ]]; then
+ file_description+=" AGPL"
+ fi
+ if [[ "$slim" == 1 ]]; then
+ file_description+=" CLI"
+ fi
+ if [[ "$non_release_build" == 1 ]]; then
+ file_description+=" (development build)"
+ fi
+
+ # Because this writes to a file with the OS and arch in the filename, we
+ # don't support concurrent builds for the same OS and arch (irregardless of
+ # slimness or AGPL status).
+ #
+ # This is fine since we only embed resources during dogfood and release
+ # builds, which use make (which will build all slim targets in parallel,
+ # then all non-slim targets in parallel).
+ expected_rsrc_file="./buildinfo/resources/resources_windows_${arch}.syso"
+ if [[ -f "$expected_rsrc_file" ]]; then
+ rm "$expected_rsrc_file"
+ fi
+ touch "$expected_rsrc_file"
+
+ pushd ./buildinfo/resources
+ GOARCH="$arch" go-winres simply \
+ --arch "$arch" \
+ --out "resources" \
+ --product-version "$version_windows" \
+ --file-version "$version_windows" \
+ --manifest "cli" \
+ --file-description "$file_description" \
+ --product-name "Coder" \
+ --copyright "Copyright $(date +%Y) Coder Technologies Inc." \
+ --original-filename "coder.exe" \
+ --icon ../../scripts/win-installer/coder.ico
+ popd
+
+ if [[ ! -f "$expected_rsrc_file" ]]; then
+ error "Failed to generate $expected_rsrc_file"
+ fi
+fi
+
+set +e
GOEXPERIMENT="$goexp" CGO_ENABLED="$cgo" GOOS="$os" GOARCH="$arch" GOARM="$arm_version" \
go build \
"${build_args[@]}" \
"$cmd_path" 1>&2
+exit_code=$?
+set -e
+
+# Clean up the resources file if it was generated.
+if [[ "$windows_resources" == 1 ]] && [[ "$os" == "windows" ]]; then
+ rm "$expected_rsrc_file"
+fi
+
+if [[ "$exit_code" != 0 ]]; then
+ exit "$exit_code"
+fi
+
+# If we did embed resources, verify that they were included.
+if [[ "$windows_resources" == 1 ]] && [[ "$os" == "windows" ]]; then
+ winres_dir=$(mktemp -d)
+ if ! go-winres extract --dir "$winres_dir" "$output_path" 1>&2; then
+ rm -rf "$winres_dir"
+ error "Compiled binary does not contain embedded resources"
+ fi
+ # If go-winres didn't return an error, it means it did find embedded
+ # resources.
+ rm -rf "$winres_dir"
+fi
if [[ "$sign_darwin" == 1 ]] && [[ "$os" == "darwin" ]]; then
execrelative ./sign_darwin.sh "$output_path" "$bin_ident" 1>&2
From ec44f06f5c460553fe1d9cc338666c3264e909e0 Mon Sep 17 00:00:00 2001
From: Cian Johnston
Date: Fri, 28 Feb 2025 09:38:45 +0000
Subject: [PATCH 29/44] feat(cli): allow SSH command to connect to running
container (#16726)
Fixes https://github.com/coder/coder/issues/16709 and
https://github.com/coder/coder/issues/16420
Adds the capability to`coder ssh` into a running container if `CODER_AGENT_DEVCONTAINERS_ENABLE=true`.
Notes:
* SFTP is currently not supported
* Haven't tested X11 container forwarding
* Haven't tested agent forwarding
---
agent/agent.go | 12 ++--
agent/agent_test.go | 2 +-
agent/agentssh/agentssh.go | 70 +++++++++++++++++----
agent/reconnectingpty/server.go | 4 +-
cli/agent.go | 44 +++++++-------
cli/exp_rpty_test.go | 4 +-
cli/ssh.go | 56 +++++++++++++++++
cli/ssh_test.go | 104 ++++++++++++++++++++++++++++++++
8 files changed, 253 insertions(+), 43 deletions(-)
diff --git a/agent/agent.go b/agent/agent.go
index 504fff2386826..614ae0fdd0e65 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -91,8 +91,8 @@ type Options struct {
Execer agentexec.Execer
ContainerLister agentcontainers.Lister
- ExperimentalContainersEnabled bool
- ExperimentalConnectionReports bool
+ ExperimentalConnectionReports bool
+ ExperimentalDevcontainersEnabled bool
}
type Client interface {
@@ -156,7 +156,7 @@ func New(options Options) Agent {
options.Execer = agentexec.DefaultExecer
}
if options.ContainerLister == nil {
- options.ContainerLister = agentcontainers.NewDocker(options.Execer)
+ options.ContainerLister = agentcontainers.NoopLister{}
}
hardCtx, hardCancel := context.WithCancel(context.Background())
@@ -195,7 +195,7 @@ func New(options Options) Agent {
execer: options.Execer,
lister: options.ContainerLister,
- experimentalDevcontainersEnabled: options.ExperimentalContainersEnabled,
+ experimentalDevcontainersEnabled: options.ExperimentalDevcontainersEnabled,
experimentalConnectionReports: options.ExperimentalConnectionReports,
}
// Initially, we have a closed channel, reflecting the fact that we are not initially connected.
@@ -307,6 +307,8 @@ func (a *agent) init() {
return a.reportConnection(id, connectionType, ip)
},
+
+ ExperimentalDevContainersEnabled: a.experimentalDevcontainersEnabled,
})
if err != nil {
panic(err)
@@ -335,7 +337,7 @@ func (a *agent) init() {
a.metrics.connectionsTotal, a.metrics.reconnectingPTYErrors,
a.reconnectingPTYTimeout,
func(s *reconnectingpty.Server) {
- s.ExperimentalContainersEnabled = a.experimentalDevcontainersEnabled
+ s.ExperimentalDevcontainersEnabled = a.experimentalDevcontainersEnabled
},
)
go a.runLoop()
diff --git a/agent/agent_test.go b/agent/agent_test.go
index 7ccce20ae776e..6e27f525f8cb4 100644
--- a/agent/agent_test.go
+++ b/agent/agent_test.go
@@ -1841,7 +1841,7 @@ func TestAgent_ReconnectingPTYContainer(t *testing.T) {
// nolint: dogsled
conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
- o.ExperimentalContainersEnabled = true
+ o.ExperimentalDevcontainersEnabled = true
})
ac, err := conn.ReconnectingPTY(ctx, uuid.New(), 80, 80, "/bin/sh", func(arp *workspacesdk.AgentReconnectingPTYInit) {
arp.Container = ct.Container.ID
diff --git a/agent/agentssh/agentssh.go b/agent/agentssh/agentssh.go
index 4a5d3215db911..b1a1f32baf032 100644
--- a/agent/agentssh/agentssh.go
+++ b/agent/agentssh/agentssh.go
@@ -29,6 +29,7 @@ import (
"cdr.dev/slog"
+ "github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/agent/agentexec"
"github.com/coder/coder/v2/agent/agentrsa"
"github.com/coder/coder/v2/agent/usershell"
@@ -60,6 +61,14 @@ const (
// MagicSessionTypeEnvironmentVariable is used to track the purpose behind an SSH connection.
// This is stripped from any commands being executed, and is counted towards connection stats.
MagicSessionTypeEnvironmentVariable = "CODER_SSH_SESSION_TYPE"
+ // ContainerEnvironmentVariable is used to specify the target container for an SSH connection.
+ // This is stripped from any commands being executed.
+ // Only available if CODER_AGENT_DEVCONTAINERS_ENABLE=true.
+ ContainerEnvironmentVariable = "CODER_CONTAINER"
+ // ContainerUserEnvironmentVariable is used to specify the container user for
+ // an SSH connection.
+ // Only available if CODER_AGENT_DEVCONTAINERS_ENABLE=true.
+ ContainerUserEnvironmentVariable = "CODER_CONTAINER_USER"
)
// MagicSessionType enums.
@@ -104,6 +113,9 @@ type Config struct {
BlockFileTransfer bool
// ReportConnection.
ReportConnection reportConnectionFunc
+ // Experimental: allow connecting to running containers if
+ // CODER_AGENT_DEVCONTAINERS_ENABLE=true.
+ ExperimentalDevContainersEnabled bool
}
type Server struct {
@@ -324,6 +336,22 @@ func (s *sessionCloseTracker) Close() error {
return s.Session.Close()
}
+func extractContainerInfo(env []string) (container, containerUser string, filteredEnv []string) {
+ for _, kv := range env {
+ if strings.HasPrefix(kv, ContainerEnvironmentVariable+"=") {
+ container = strings.TrimPrefix(kv, ContainerEnvironmentVariable+"=")
+ }
+
+ if strings.HasPrefix(kv, ContainerUserEnvironmentVariable+"=") {
+ containerUser = strings.TrimPrefix(kv, ContainerUserEnvironmentVariable+"=")
+ }
+ }
+
+ return container, containerUser, slices.DeleteFunc(env, func(kv string) bool {
+ return strings.HasPrefix(kv, ContainerEnvironmentVariable+"=") || strings.HasPrefix(kv, ContainerUserEnvironmentVariable+"=")
+ })
+}
+
func (s *Server) sessionHandler(session ssh.Session) {
ctx := session.Context()
id := uuid.New()
@@ -353,6 +381,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
defer s.trackSession(session, false)
reportSession := true
+
switch magicType {
case MagicSessionTypeVSCode:
s.connCountVSCode.Add(1)
@@ -395,9 +424,22 @@ func (s *Server) sessionHandler(session ssh.Session) {
return
}
+ container, containerUser, env := extractContainerInfo(env)
+ if container != "" {
+ s.logger.Debug(ctx, "container info",
+ slog.F("container", container),
+ slog.F("container_user", containerUser),
+ )
+ }
+
switch ss := session.Subsystem(); ss {
case "":
case "sftp":
+ if s.config.ExperimentalDevContainersEnabled && container != "" {
+ closeCause("sftp not yet supported with containers")
+ _ = session.Exit(1)
+ return
+ }
err := s.sftpHandler(logger, session)
if err != nil {
closeCause(err.Error())
@@ -422,7 +464,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
env = append(env, fmt.Sprintf("DISPLAY=localhost:%d.%d", display, x11.ScreenNumber))
}
- err := s.sessionStart(logger, session, env, magicType)
+ err := s.sessionStart(logger, session, env, magicType, container, containerUser)
var exitError *exec.ExitError
if xerrors.As(err, &exitError) {
code := exitError.ExitCode()
@@ -495,18 +537,27 @@ func (s *Server) fileTransferBlocked(session ssh.Session) bool {
return false
}
-func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, env []string, magicType MagicSessionType) (retErr error) {
+func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, env []string, magicType MagicSessionType, container, containerUser string) (retErr error) {
ctx := session.Context()
magicTypeLabel := magicTypeMetricLabel(magicType)
sshPty, windowSize, isPty := session.Pty()
+ ptyLabel := "no"
+ if isPty {
+ ptyLabel = "yes"
+ }
- cmd, err := s.CreateCommand(ctx, session.RawCommand(), env, nil)
- if err != nil {
- ptyLabel := "no"
- if isPty {
- ptyLabel = "yes"
+ var ei usershell.EnvInfoer
+ var err error
+ if s.config.ExperimentalDevContainersEnabled && container != "" {
+ ei, err = agentcontainers.EnvInfo(ctx, s.Execer, container, containerUser)
+ if err != nil {
+ s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, ptyLabel, "container_env_info").Add(1)
+ return err
}
+ }
+ cmd, err := s.CreateCommand(ctx, session.RawCommand(), env, ei)
+ if err != nil {
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, ptyLabel, "create_command").Add(1)
return err
}
@@ -514,11 +565,6 @@ func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, env []str
if ssh.AgentRequested(session) {
l, err := ssh.NewAgentListener()
if err != nil {
- ptyLabel := "no"
- if isPty {
- ptyLabel = "yes"
- }
-
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, ptyLabel, "listener").Add(1)
return xerrors.Errorf("new agent listener: %w", err)
}
diff --git a/agent/reconnectingpty/server.go b/agent/reconnectingpty/server.go
index 7ad7db976c8b0..33ed76a73c60e 100644
--- a/agent/reconnectingpty/server.go
+++ b/agent/reconnectingpty/server.go
@@ -32,7 +32,7 @@ type Server struct {
reconnectingPTYs sync.Map
timeout time.Duration
- ExperimentalContainersEnabled bool
+ ExperimentalDevcontainersEnabled bool
}
// NewServer returns a new ReconnectingPTY server
@@ -187,7 +187,7 @@ func (s *Server) handleConn(ctx context.Context, logger slog.Logger, conn net.Co
}()
var ei usershell.EnvInfoer
- if s.ExperimentalContainersEnabled && msg.Container != "" {
+ if s.ExperimentalDevcontainersEnabled && msg.Container != "" {
dei, err := agentcontainers.EnvInfo(ctx, s.commandCreator.Execer, msg.Container, msg.ContainerUser)
if err != nil {
return xerrors.Errorf("get container env info: %w", err)
diff --git a/cli/agent.go b/cli/agent.go
index 638f7083805ab..5466ba9a5bc67 100644
--- a/cli/agent.go
+++ b/cli/agent.go
@@ -38,24 +38,24 @@ import (
func (r *RootCmd) workspaceAgent() *serpent.Command {
var (
- auth string
- logDir string
- scriptDataDir string
- pprofAddress string
- noReap bool
- sshMaxTimeout time.Duration
- tailnetListenPort int64
- prometheusAddress string
- debugAddress string
- slogHumanPath string
- slogJSONPath string
- slogStackdriverPath string
- blockFileTransfer bool
- agentHeaderCommand string
- agentHeader []string
- devcontainersEnabled bool
-
- experimentalConnectionReports bool
+ auth string
+ logDir string
+ scriptDataDir string
+ pprofAddress string
+ noReap bool
+ sshMaxTimeout time.Duration
+ tailnetListenPort int64
+ prometheusAddress string
+ debugAddress string
+ slogHumanPath string
+ slogJSONPath string
+ slogStackdriverPath string
+ blockFileTransfer bool
+ agentHeaderCommand string
+ agentHeader []string
+
+ experimentalConnectionReports bool
+ experimentalDevcontainersEnabled bool
)
cmd := &serpent.Command{
Use: "agent",
@@ -319,7 +319,7 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
}
var containerLister agentcontainers.Lister
- if !devcontainersEnabled {
+ if !experimentalDevcontainersEnabled {
logger.Info(ctx, "agent devcontainer detection not enabled")
containerLister = &agentcontainers.NoopLister{}
} else {
@@ -358,8 +358,8 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
Execer: execer,
ContainerLister: containerLister,
- ExperimentalContainersEnabled: devcontainersEnabled,
- ExperimentalConnectionReports: experimentalConnectionReports,
+ ExperimentalDevcontainersEnabled: experimentalDevcontainersEnabled,
+ ExperimentalConnectionReports: experimentalConnectionReports,
})
promHandler := agent.PrometheusMetricsHandler(prometheusRegistry, logger)
@@ -487,7 +487,7 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
Default: "false",
Env: "CODER_AGENT_DEVCONTAINERS_ENABLE",
Description: "Allow the agent to automatically detect running devcontainers.",
- Value: serpent.BoolOf(&devcontainersEnabled),
+ Value: serpent.BoolOf(&experimentalDevcontainersEnabled),
},
{
Flag: "experimental-connection-reports-enable",
diff --git a/cli/exp_rpty_test.go b/cli/exp_rpty_test.go
index 782a7b5c08d48..bfede8213d4c9 100644
--- a/cli/exp_rpty_test.go
+++ b/cli/exp_rpty_test.go
@@ -9,6 +9,7 @@ import (
"github.com/ory/dockertest/v3/docker"
"github.com/coder/coder/v2/agent"
+ "github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/agent/agenttest"
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/coderd/coderdtest"
@@ -88,7 +89,8 @@ func TestExpRpty(t *testing.T) {
})
_ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
- o.ExperimentalContainersEnabled = true
+ o.ExperimentalDevcontainersEnabled = true
+ o.ContainerLister = agentcontainers.NewDocker(o.Execer)
})
_ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
diff --git a/cli/ssh.go b/cli/ssh.go
index 884c5500d703c..da84a7886b048 100644
--- a/cli/ssh.go
+++ b/cli/ssh.go
@@ -34,6 +34,7 @@ import (
"cdr.dev/slog"
"cdr.dev/slog/sloggers/sloghuman"
+ "github.com/coder/coder/v2/agent/agentssh"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/cli/cliutil"
"github.com/coder/coder/v2/coderd/autobuild/notify"
@@ -76,6 +77,9 @@ func (r *RootCmd) ssh() *serpent.Command {
appearanceConfig codersdk.AppearanceConfig
networkInfoDir string
networkInfoInterval time.Duration
+
+ containerName string
+ containerUser string
)
client := new(codersdk.Client)
cmd := &serpent.Command{
@@ -282,6 +286,34 @@ func (r *RootCmd) ssh() *serpent.Command {
}
conn.AwaitReachable(ctx)
+ if containerName != "" {
+ cts, err := client.WorkspaceAgentListContainers(ctx, workspaceAgent.ID, nil)
+ if err != nil {
+ return xerrors.Errorf("list containers: %w", err)
+ }
+ if len(cts.Containers) == 0 {
+ cliui.Info(inv.Stderr, "No containers found!")
+ cliui.Info(inv.Stderr, "Tip: Agent container integration is experimental and not enabled by default.")
+ cliui.Info(inv.Stderr, " To enable it, set CODER_AGENT_DEVCONTAINERS_ENABLE=true in your template.")
+ return nil
+ }
+ var found bool
+ for _, c := range cts.Containers {
+ if c.FriendlyName == containerName || c.ID == containerName {
+ found = true
+ break
+ }
+ }
+ if !found {
+ availableContainers := make([]string, len(cts.Containers))
+ for i, c := range cts.Containers {
+ availableContainers[i] = c.FriendlyName
+ }
+ cliui.Errorf(inv.Stderr, "Container not found: %q\nAvailable containers: %v", containerName, availableContainers)
+ return nil
+ }
+ }
+
stopPolling := tryPollWorkspaceAutostop(ctx, client, workspace)
defer stopPolling()
@@ -454,6 +486,17 @@ func (r *RootCmd) ssh() *serpent.Command {
}
}
+ if containerName != "" {
+ for k, v := range map[string]string{
+ agentssh.ContainerEnvironmentVariable: containerName,
+ agentssh.ContainerUserEnvironmentVariable: containerUser,
+ } {
+ if err := sshSession.Setenv(k, v); err != nil {
+ return xerrors.Errorf("setenv: %w", err)
+ }
+ }
+ }
+
err = sshSession.RequestPty("xterm-256color", 128, 128, gossh.TerminalModes{})
if err != nil {
return xerrors.Errorf("request pty: %w", err)
@@ -594,6 +637,19 @@ func (r *RootCmd) ssh() *serpent.Command {
Default: "5s",
Value: serpent.DurationOf(&networkInfoInterval),
},
+ {
+ Flag: "container",
+ FlagShorthand: "c",
+ Description: "Specifies a container inside the workspace to connect to.",
+ Value: serpent.StringOf(&containerName),
+ Hidden: true, // Hidden until this features is at least in beta.
+ },
+ {
+ Flag: "container-user",
+ Description: "When connecting to a container, specifies the user to connect as.",
+ Value: serpent.StringOf(&containerUser),
+ Hidden: true, // Hidden until this features is at least in beta.
+ },
sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)),
}
return cmd
diff --git a/cli/ssh_test.go b/cli/ssh_test.go
index d20278bbf7ced..8a8d2d6ef3f6f 100644
--- a/cli/ssh_test.go
+++ b/cli/ssh_test.go
@@ -24,6 +24,8 @@ import (
"time"
"github.com/google/uuid"
+ "github.com/ory/dockertest/v3"
+ "github.com/ory/dockertest/v3/docker"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -33,6 +35,7 @@ import (
"golang.org/x/xerrors"
"github.com/coder/coder/v2/agent"
+ "github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/agent/agentssh"
"github.com/coder/coder/v2/agent/agenttest"
agentproto "github.com/coder/coder/v2/agent/proto"
@@ -1924,6 +1927,107 @@ Expire-Date: 0
<-cmdDone
}
+func TestSSH_Container(t *testing.T) {
+ t.Parallel()
+ if runtime.GOOS != "linux" {
+ t.Skip("Skipping test on non-Linux platform")
+ }
+
+ t.Run("OK", func(t *testing.T) {
+ t.Parallel()
+
+ client, workspace, agentToken := setupWorkspaceForAgent(t)
+ ctx := testutil.Context(t, testutil.WaitLong)
+ pool, err := dockertest.NewPool("")
+ require.NoError(t, err, "Could not connect to docker")
+ ct, err := pool.RunWithOptions(&dockertest.RunOptions{
+ Repository: "busybox",
+ Tag: "latest",
+ Cmd: []string{"sleep", "infnity"},
+ }, func(config *docker.HostConfig) {
+ config.AutoRemove = true
+ config.RestartPolicy = docker.RestartPolicy{Name: "no"}
+ })
+ require.NoError(t, err, "Could not start container")
+ // Wait for container to start
+ require.Eventually(t, func() bool {
+ ct, ok := pool.ContainerByName(ct.Container.Name)
+ return ok && ct.Container.State.Running
+ }, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
+ t.Cleanup(func() {
+ err := pool.Purge(ct)
+ require.NoError(t, err, "Could not stop container")
+ })
+
+ _ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
+ o.ExperimentalDevcontainersEnabled = true
+ o.ContainerLister = agentcontainers.NewDocker(o.Execer)
+ })
+ _ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
+
+ inv, root := clitest.New(t, "ssh", workspace.Name, "-c", ct.Container.ID)
+ clitest.SetupConfig(t, client, root)
+ ptty := ptytest.New(t).Attach(inv)
+
+ cmdDone := tGo(t, func() {
+ err := inv.WithContext(ctx).Run()
+ assert.NoError(t, err)
+ })
+
+ ptty.ExpectMatch(" #")
+ ptty.WriteLine("hostname")
+ ptty.ExpectMatch(ct.Container.Config.Hostname)
+ ptty.WriteLine("exit")
+ <-cmdDone
+ })
+
+ t.Run("NotFound", func(t *testing.T) {
+ t.Parallel()
+
+ ctx := testutil.Context(t, testutil.WaitShort)
+ client, workspace, agentToken := setupWorkspaceForAgent(t)
+ _ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
+ o.ExperimentalDevcontainersEnabled = true
+ o.ContainerLister = agentcontainers.NewDocker(o.Execer)
+ })
+ _ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
+
+ inv, root := clitest.New(t, "ssh", workspace.Name, "-c", uuid.NewString())
+ clitest.SetupConfig(t, client, root)
+ ptty := ptytest.New(t).Attach(inv)
+
+ cmdDone := tGo(t, func() {
+ err := inv.WithContext(ctx).Run()
+ assert.NoError(t, err)
+ })
+
+ ptty.ExpectMatch("Container not found:")
+ <-cmdDone
+ })
+
+ t.Run("NotEnabled", func(t *testing.T) {
+ t.Parallel()
+
+ ctx := testutil.Context(t, testutil.WaitShort)
+ client, workspace, agentToken := setupWorkspaceForAgent(t)
+ _ = agenttest.New(t, client.URL, agentToken)
+ _ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
+
+ inv, root := clitest.New(t, "ssh", workspace.Name, "-c", uuid.NewString())
+ clitest.SetupConfig(t, client, root)
+ ptty := ptytest.New(t).Attach(inv)
+
+ cmdDone := tGo(t, func() {
+ err := inv.WithContext(ctx).Run()
+ assert.NoError(t, err)
+ })
+
+ ptty.ExpectMatch("No containers found!")
+ ptty.ExpectMatch("Tip: Agent container integration is experimental and not enabled by default.")
+ <-cmdDone
+ })
+}
+
// tGoContext runs fn in a goroutine passing a context that will be
// canceled on test completion and wait until fn has finished executing.
// Done and cancel are returned for optionally waiting until completion
From 6889ad2e5e540c2e6d434e825146b85a129a135e Mon Sep 17 00:00:00 2001
From: Cian Johnston
Date: Fri, 28 Feb 2025 11:05:50 +0000
Subject: [PATCH 30/44] fix(agent/agentcontainers): remove empty warning if no
containers exist (#16748)
Fixes the current annoying response if no containers are running:
```
{"containers":null,"warnings":[""]}
```
---
agent/agentcontainers/containers_dockercli.go | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/agent/agentcontainers/containers_dockercli.go b/agent/agentcontainers/containers_dockercli.go
index 27e5f835d5adb..5218153bde427 100644
--- a/agent/agentcontainers/containers_dockercli.go
+++ b/agent/agentcontainers/containers_dockercli.go
@@ -253,11 +253,16 @@ func (dcl *DockerCLILister) List(ctx context.Context) (codersdk.WorkspaceAgentLi
return codersdk.WorkspaceAgentListContainersResponse{}, xerrors.Errorf("scan docker ps output: %w", err)
}
+ res := codersdk.WorkspaceAgentListContainersResponse{
+ Containers: make([]codersdk.WorkspaceAgentDevcontainer, 0, len(ids)),
+ Warnings: make([]string, 0),
+ }
dockerPsStderr := strings.TrimSpace(stderrBuf.String())
+ if dockerPsStderr != "" {
+ res.Warnings = append(res.Warnings, dockerPsStderr)
+ }
if len(ids) == 0 {
- return codersdk.WorkspaceAgentListContainersResponse{
- Warnings: []string{dockerPsStderr},
- }, nil
+ return res, nil
}
// now we can get the detailed information for each container
@@ -273,13 +278,10 @@ func (dcl *DockerCLILister) List(ctx context.Context) (codersdk.WorkspaceAgentLi
return codersdk.WorkspaceAgentListContainersResponse{}, xerrors.Errorf("run docker inspect: %w", err)
}
- res := codersdk.WorkspaceAgentListContainersResponse{
- Containers: make([]codersdk.WorkspaceAgentDevcontainer, len(ins)),
- }
- for idx, in := range ins {
+ for _, in := range ins {
out, warns := convertDockerInspect(in)
res.Warnings = append(res.Warnings, warns...)
- res.Containers[idx] = out
+ res.Containers = append(res.Containers, out)
}
if dockerPsStderr != "" {
From e27953d2bcb0516ec74178b52eb33d78a9072e8b Mon Sep 17 00:00:00 2001
From: Sas Swart
Date: Fri, 28 Feb 2025 14:41:53 +0200
Subject: [PATCH 31/44] fix(site): add a beta badge for presets (#16751)
closes #16731
This pull request adds a "beta" badge to the presets input field on the
workspace creation page.
---
.../CreateWorkspacePage/CreateWorkspacePageView.tsx | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/site/src/pages/CreateWorkspacePage/CreateWorkspacePageView.tsx b/site/src/pages/CreateWorkspacePage/CreateWorkspacePageView.tsx
index de72a79e456ef..8a1d380a16191 100644
--- a/site/src/pages/CreateWorkspacePage/CreateWorkspacePageView.tsx
+++ b/site/src/pages/CreateWorkspacePage/CreateWorkspacePageView.tsx
@@ -6,6 +6,7 @@ import { Alert } from "components/Alert/Alert";
import { ErrorAlert } from "components/Alert/ErrorAlert";
import { Avatar } from "components/Avatar/Avatar";
import { Button } from "components/Button/Button";
+import { FeatureStageBadge } from "components/FeatureStageBadge/FeatureStageBadge";
import { SelectFilter } from "components/Filter/SelectFilter";
import {
FormFields,
@@ -274,9 +275,12 @@ export const CreateWorkspacePageView: FC = ({
{presets.length > 0 && (
-
- Select a preset to get started
-
+
+
+ Select a preset to get started
+
+
+
Date: Fri, 28 Feb 2025 15:22:36 +0100
Subject: [PATCH 32/44] fix: locate Terraform entrypoint file (#16753)
Fixes: https://github.com/coder/coder/issues/16360
---
.../TemplateVersionEditorPage.test.tsx | 129 +++++++++++++++++-
.../TemplateVersionEditorPage.tsx | 29 +++-
site/src/utils/filetree.test.ts | 2 +-
site/src/utils/filetree.ts | 4 +-
4 files changed, 158 insertions(+), 6 deletions(-)
diff --git a/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.test.tsx b/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.test.tsx
index 07b1485eef770..684272503d01a 100644
--- a/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.test.tsx
+++ b/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.test.tsx
@@ -22,9 +22,12 @@ import {
waitForLoaderToBeRemoved,
} from "testHelpers/renderHelpers";
import { server } from "testHelpers/server";
+import type { FileTree } from "utils/filetree";
import type { MonacoEditorProps } from "./MonacoEditor";
import { Language } from "./PublishTemplateVersionDialog";
-import TemplateVersionEditorPage from "./TemplateVersionEditorPage";
+import TemplateVersionEditorPage, {
+ findEntrypointFile,
+} from "./TemplateVersionEditorPage";
const { API } = apiModule;
@@ -409,3 +412,127 @@ function renderEditorPage(queryClient: QueryClient) {
,
);
}
+
+describe("Find entrypoint", () => {
+ it("empty tree", () => {
+ const ft: FileTree = {};
+ const mainFile = findEntrypointFile(ft);
+ expect(mainFile).toBeUndefined();
+ });
+ it("flat structure, main.tf in root", () => {
+ const ft: FileTree = {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ "nnn.tf": "foobaz",
+ };
+
+ const mainFile = findEntrypointFile(ft);
+ expect(mainFile).toBe("main.tf");
+ });
+ it("flat structure, no main.tf", () => {
+ const ft: FileTree = {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "ccc.tf": "foobaz",
+ "nnn.tf": "foobaz",
+ };
+
+ const mainFile = findEntrypointFile(ft);
+ expect(mainFile).toBe("nnn.tf");
+ });
+ it("with dirs, single main.tf", () => {
+ const ft: FileTree = {
+ "aaa-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ },
+ "bbb-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ },
+ "main.tf": "foobar",
+ "nnn.tf": "foobaz",
+ };
+
+ const mainFile = findEntrypointFile(ft);
+ expect(mainFile).toBe("main.tf");
+ });
+ it("with dirs, multiple main.tf's", () => {
+ const ft: FileTree = {
+ "aaa-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ },
+ "bbb-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ },
+ "ccc-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ },
+ "main.tf": "foobar",
+ "nnn.tf": "foobaz",
+ "zzz-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ },
+ };
+
+ const mainFile = findEntrypointFile(ft);
+ expect(mainFile).toBe("main.tf");
+ });
+ it("with dirs, multiple main.tf, no main.tf in root", () => {
+ const ft: FileTree = {
+ "aaa-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ },
+ "bbb-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ },
+ "ccc-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ },
+ "nnn.tf": "foobaz",
+ "zzz-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ },
+ };
+
+ const mainFile = findEntrypointFile(ft);
+ expect(mainFile).toBe("aaa-dir/main.tf");
+ });
+ it("with dirs, multiple main.tf, unordered file tree", () => {
+ const ft: FileTree = {
+ "ccc-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ },
+ "aaa-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ },
+ "zzz-dir": {
+ "aaa.tf": "hello",
+ "bbb.tf": "world",
+ "main.tf": "foobar",
+ },
+ };
+
+ const mainFile = findEntrypointFile(ft);
+ expect(mainFile).toBe("aaa-dir/main.tf");
+ });
+});
diff --git a/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.tsx b/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.tsx
index b3090eb6d3f47..0158c872aed50 100644
--- a/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.tsx
+++ b/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.tsx
@@ -90,7 +90,7 @@ export const TemplateVersionEditorPage: FC = () => {
// File navigation
// It can be undefined when a selected file is deleted
const activePath: string | undefined =
- searchParams.get("path") ?? findInitialFile(fileTree ?? {});
+ searchParams.get("path") ?? findEntrypointFile(fileTree ?? {});
const onActivePathChange = (path: string | undefined) => {
if (path) {
searchParams.set("path", path);
@@ -357,10 +357,33 @@ const publishVersion = async (options: {
return Promise.all(publishActions);
};
-const findInitialFile = (fileTree: FileTree): string | undefined => {
+const defaultMainTerraformFile = "main.tf";
+
+// findEntrypointFile function locates the entrypoint file to open in the Editor.
+// It browses the filetree following these steps:
+// 1. If "main.tf" exists in root, return it.
+// 2. Traverse through sub-directories.
+// 3. If "main.tf" exists in a sub-directory, skip further browsing, and return the path.
+// 4. If "main.tf" was not found, return the last reviewed "".tf" file.
+export const findEntrypointFile = (fileTree: FileTree): string | undefined => {
let initialFile: string | undefined;
- traverse(fileTree, (content, filename, path) => {
+ if (Object.keys(fileTree).find((key) => key === defaultMainTerraformFile)) {
+ return defaultMainTerraformFile;
+ }
+
+ let skip = false;
+ traverse(fileTree, (_, filename, path) => {
+ if (skip) {
+ return;
+ }
+
+ if (filename === defaultMainTerraformFile) {
+ initialFile = path;
+ skip = true;
+ return;
+ }
+
if (filename.endsWith(".tf")) {
initialFile = path;
}
diff --git a/site/src/utils/filetree.test.ts b/site/src/utils/filetree.test.ts
index 21746baa6a54c..e4aadaabbe424 100644
--- a/site/src/utils/filetree.test.ts
+++ b/site/src/utils/filetree.test.ts
@@ -122,6 +122,6 @@ test("traverse() go trough all the file tree files", () => {
traverse(fileTree, (_content, _filename, fullPath) => {
filePaths.push(fullPath);
});
- const expectedFilePaths = ["main.tf", "images", "images/java.Dockerfile"];
+ const expectedFilePaths = ["images", "images/java.Dockerfile", "main.tf"];
expect(filePaths).toEqual(expectedFilePaths);
});
diff --git a/site/src/utils/filetree.ts b/site/src/utils/filetree.ts
index 757ed133e55f7..2f7d8ea84533b 100644
--- a/site/src/utils/filetree.ts
+++ b/site/src/utils/filetree.ts
@@ -96,7 +96,9 @@ export const traverse = (
) => void,
parent?: string,
) => {
- for (const [filename, content] of Object.entries(fileTree)) {
+ for (const [filename, content] of Object.entries(fileTree).sort(([a], [b]) =>
+ a.localeCompare(b),
+ )) {
const fullPath = parent ? `${parent}/${filename}` : filename;
callback(content, filename, fullPath);
if (typeof content === "object") {
From 4216e283ec953936567fb50fc697cd966ed92808 Mon Sep 17 00:00:00 2001
From: Marcin Tojek
Date: Fri, 28 Feb 2025 17:14:42 +0100
Subject: [PATCH 33/44] fix: editor: fallback to default entrypoint (#16757)
Related:
https://github.com/coder/coder/pull/16753#discussion_r1975558383
---
.../TemplateVersionEditorPage.test.tsx | 29 +++++++++++++++++++
.../TemplateVersionEditorPage.tsx | 18 +++++++++---
2 files changed, 43 insertions(+), 4 deletions(-)
diff --git a/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.test.tsx b/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.test.tsx
index 684272503d01a..999df793105a3 100644
--- a/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.test.tsx
+++ b/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.test.tsx
@@ -27,6 +27,7 @@ import type { MonacoEditorProps } from "./MonacoEditor";
import { Language } from "./PublishTemplateVersionDialog";
import TemplateVersionEditorPage, {
findEntrypointFile,
+ getActivePath,
} from "./TemplateVersionEditorPage";
const { API } = apiModule;
@@ -413,6 +414,34 @@ function renderEditorPage(queryClient: QueryClient) {
);
}
+describe("Get active path", () => {
+ it("empty path", () => {
+ const ft: FileTree = {
+ "main.tf": "foobar",
+ };
+ const searchParams = new URLSearchParams({ path: "" });
+ const activePath = getActivePath(searchParams, ft);
+ expect(activePath).toBe("main.tf");
+ });
+ it("invalid path", () => {
+ const ft: FileTree = {
+ "main.tf": "foobar",
+ };
+ const searchParams = new URLSearchParams({ path: "foobaz" });
+ const activePath = getActivePath(searchParams, ft);
+ expect(activePath).toBe("main.tf");
+ });
+ it("valid path", () => {
+ const ft: FileTree = {
+ "main.tf": "foobar",
+ "foobar.tf": "foobaz",
+ };
+ const searchParams = new URLSearchParams({ path: "foobar.tf" });
+ const activePath = getActivePath(searchParams, ft);
+ expect(activePath).toBe("foobar.tf");
+ });
+});
+
describe("Find entrypoint", () => {
it("empty tree", () => {
const ft: FileTree = {};
diff --git a/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.tsx b/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.tsx
index 0158c872aed50..0339d6df506f6 100644
--- a/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.tsx
+++ b/site/src/pages/TemplateVersionEditorPage/TemplateVersionEditorPage.tsx
@@ -20,7 +20,7 @@ import { type FC, useEffect, useState } from "react";
import { Helmet } from "react-helmet-async";
import { useMutation, useQuery, useQueryClient } from "react-query";
import { useNavigate, useParams, useSearchParams } from "react-router-dom";
-import { type FileTree, traverse } from "utils/filetree";
+import { type FileTree, existsFile, traverse } from "utils/filetree";
import { pageTitle } from "utils/page";
import { TarReader, TarWriter } from "utils/tar";
import { createTemplateVersionFileTree } from "utils/templateVersion";
@@ -88,9 +88,8 @@ export const TemplateVersionEditorPage: FC = () => {
useState();
// File navigation
- // It can be undefined when a selected file is deleted
- const activePath: string | undefined =
- searchParams.get("path") ?? findEntrypointFile(fileTree ?? {});
+ const activePath = getActivePath(searchParams, fileTree || {});
+
const onActivePathChange = (path: string | undefined) => {
if (path) {
searchParams.set("path", path);
@@ -392,4 +391,15 @@ export const findEntrypointFile = (fileTree: FileTree): string | undefined => {
return initialFile;
};
+export const getActivePath = (
+ searchParams: URLSearchParams,
+ fileTree: FileTree,
+): string | undefined => {
+ const selectedPath = searchParams.get("path");
+ if (selectedPath && existsFile(selectedPath, fileTree)) {
+ return selectedPath;
+ }
+ return findEntrypointFile(fileTree);
+};
+
export default TemplateVersionEditorPage;
From fc2815cfdbe585ac948dab0ddd33fc363635e06e Mon Sep 17 00:00:00 2001
From: Guspan Tanadi <36249910+guspan-tanadi@users.noreply.github.com>
Date: Sun, 2 Mar 2025 22:55:36 +0700
Subject: [PATCH 34/44] docs: fix anchor and repo links (#16555)
---
docs/admin/networking/index.md | 2 +-
docs/admin/networking/port-forwarding.md | 2 +-
docs/admin/templates/extending-templates/icons.md | 8 ++++----
docs/admin/templates/extending-templates/web-ides.md | 2 +-
4 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/docs/admin/networking/index.md b/docs/admin/networking/index.md
index 9858a8bfe4316..132b4775eeec6 100644
--- a/docs/admin/networking/index.md
+++ b/docs/admin/networking/index.md
@@ -76,7 +76,7 @@ as well. There must not be a NAT between users and the coder server.
Template admins can overwrite the site-wide access URL at the template level by
leveraging the `url` argument when
-[defining the Coder provider](https://registry.terraform.io/providers/coder/coder/latest/docs#url):
+[defining the Coder provider](https://registry.terraform.io/providers/coder/coder/latest/docs#url-1):
```terraform
provider "coder" {
diff --git a/docs/admin/networking/port-forwarding.md b/docs/admin/networking/port-forwarding.md
index 34a7133b75855..7cab58ff02eb8 100644
--- a/docs/admin/networking/port-forwarding.md
+++ b/docs/admin/networking/port-forwarding.md
@@ -106,7 +106,7 @@ only supported on Windows and Linux workspace agents).
We allow developers to share ports as URLs, either with other authenticated
coder users or publicly. Using the open ports interface, developers can assign a
sharing levels that match our `coder_app`’s share option in
-[Coder terraform provider](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/app#share).
+[Coder terraform provider](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/app#share-1).
- `owner` (Default): The implicit sharing level for all listening ports, only
visible to the workspace owner
diff --git a/docs/admin/templates/extending-templates/icons.md b/docs/admin/templates/extending-templates/icons.md
index 6f9876210b807..f7e50641997c0 100644
--- a/docs/admin/templates/extending-templates/icons.md
+++ b/docs/admin/templates/extending-templates/icons.md
@@ -12,13 +12,13 @@ come bundled with your Coder deployment.
- [**Terraform**](https://registry.terraform.io/providers/coder/coder/latest/docs):
- - [`coder_app`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/app#icon)
- - [`coder_parameter`](https://registry.terraform.io/providers/coder/coder/latest/docs/data-sources/parameter#icon)
+ - [`coder_app`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/app#icon-1)
+ - [`coder_parameter`](https://registry.terraform.io/providers/coder/coder/latest/docs/data-sources/parameter#icon-1)
and
[`option`](https://registry.terraform.io/providers/coder/coder/latest/docs/data-sources/parameter#nested-schema-for-option)
blocks
- - [`coder_script`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/script#icon)
- - [`coder_metadata`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/metadata#icon)
+ - [`coder_script`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/script#icon-1)
+ - [`coder_metadata`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/metadata#icon-1)
These can all be configured to use an icon by setting the `icon` field.
diff --git a/docs/admin/templates/extending-templates/web-ides.md b/docs/admin/templates/extending-templates/web-ides.md
index 1ded4fbf3482b..d46fcf80010e9 100644
--- a/docs/admin/templates/extending-templates/web-ides.md
+++ b/docs/admin/templates/extending-templates/web-ides.md
@@ -25,7 +25,7 @@ resource "coder_app" "portainer" {
## code-server
-[code-server](https://github.com/coder/coder) is our supported method of running
+[code-server](https://github.com/coder/code-server) is our supported method of running
VS Code in the web browser. A simple way to install code-server in Linux/macOS
workspaces is via the Coder agent in your template:
From ca23abe12c4699687578969aebed2de705d6badb Mon Sep 17 00:00:00 2001
From: Nick Fisher
Date: Sun, 2 Mar 2025 15:54:44 -0500
Subject: [PATCH 35/44] feat(provisioner): add support for
workspace_owner_rbac_roles (#16407)
Part of https://github.com/coder/terraform-provider-coder/pull/330
Adds support for the coder_workspace_owner.rbac_roles attribute
---
.../provisionerdserver/provisionerdserver.go | 14 +
.../provisionerdserver_test.go | 1 +
provisioner/terraform/provision.go | 6 +
provisioner/terraform/provision_test.go | 47 ++
provisionersdk/proto/provisioner.pb.go | 767 ++++++++++--------
provisionersdk/proto/provisioner.proto | 6 +
site/e2e/provisionerGenerated.ts | 21 +
7 files changed, 521 insertions(+), 341 deletions(-)
diff --git a/coderd/provisionerdserver/provisionerdserver.go b/coderd/provisionerdserver/provisionerdserver.go
index f431805a350a1..3c9650ffc82e0 100644
--- a/coderd/provisionerdserver/provisionerdserver.go
+++ b/coderd/provisionerdserver/provisionerdserver.go
@@ -594,6 +594,19 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo
})
}
+ roles, err := s.Database.GetAuthorizationUserRoles(ctx, owner.ID)
+ if err != nil {
+ return nil, failJob(fmt.Sprintf("get owner authorization roles: %s", err))
+ }
+ ownerRbacRoles := []*sdkproto.Role{}
+ for _, role := range roles.Roles {
+ if s.OrganizationID == uuid.Nil {
+ ownerRbacRoles = append(ownerRbacRoles, &sdkproto.Role{Name: role, OrgId: ""})
+ continue
+ }
+ ownerRbacRoles = append(ownerRbacRoles, &sdkproto.Role{Name: role, OrgId: s.OrganizationID.String()})
+ }
+
protoJob.Type = &proto.AcquiredJob_WorkspaceBuild_{
WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
WorkspaceBuildId: workspaceBuild.ID.String(),
@@ -621,6 +634,7 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo
WorkspaceOwnerSshPrivateKey: ownerSSHPrivateKey,
WorkspaceBuildId: workspaceBuild.ID.String(),
WorkspaceOwnerLoginType: string(owner.LoginType),
+ WorkspaceOwnerRbacRoles: ownerRbacRoles,
},
LogLevel: input.LogLevel,
},
diff --git a/coderd/provisionerdserver/provisionerdserver_test.go b/coderd/provisionerdserver/provisionerdserver_test.go
index cc73089e82b63..4d147a48f61bc 100644
--- a/coderd/provisionerdserver/provisionerdserver_test.go
+++ b/coderd/provisionerdserver/provisionerdserver_test.go
@@ -377,6 +377,7 @@ func TestAcquireJob(t *testing.T) {
WorkspaceOwnerSshPrivateKey: sshKey.PrivateKey,
WorkspaceBuildId: build.ID.String(),
WorkspaceOwnerLoginType: string(user.LoginType),
+ WorkspaceOwnerRbacRoles: []*sdkproto.Role{{Name: "member", OrgId: pd.OrganizationID.String()}},
},
},
})
diff --git a/provisioner/terraform/provision.go b/provisioner/terraform/provision.go
index bbb91a96cb3dd..78068fc43c819 100644
--- a/provisioner/terraform/provision.go
+++ b/provisioner/terraform/provision.go
@@ -242,6 +242,11 @@ func provisionEnv(
return nil, xerrors.Errorf("marshal owner groups: %w", err)
}
+ ownerRbacRoles, err := json.Marshal(metadata.GetWorkspaceOwnerRbacRoles())
+ if err != nil {
+ return nil, xerrors.Errorf("marshal owner rbac roles: %w", err)
+ }
+
env = append(env,
"CODER_AGENT_URL="+metadata.GetCoderUrl(),
"CODER_WORKSPACE_TRANSITION="+strings.ToLower(metadata.GetWorkspaceTransition().String()),
@@ -254,6 +259,7 @@ func provisionEnv(
"CODER_WORKSPACE_OWNER_SSH_PUBLIC_KEY="+metadata.GetWorkspaceOwnerSshPublicKey(),
"CODER_WORKSPACE_OWNER_SSH_PRIVATE_KEY="+metadata.GetWorkspaceOwnerSshPrivateKey(),
"CODER_WORKSPACE_OWNER_LOGIN_TYPE="+metadata.GetWorkspaceOwnerLoginType(),
+ "CODER_WORKSPACE_OWNER_RBAC_ROLES="+string(ownerRbacRoles),
"CODER_WORKSPACE_ID="+metadata.GetWorkspaceId(),
"CODER_WORKSPACE_OWNER_ID="+metadata.GetWorkspaceOwnerId(),
"CODER_WORKSPACE_OWNER_SESSION_TOKEN="+metadata.GetWorkspaceOwnerSessionToken(),
diff --git a/provisioner/terraform/provision_test.go b/provisioner/terraform/provision_test.go
index 50681f276c997..cd09ea2adf018 100644
--- a/provisioner/terraform/provision_test.go
+++ b/provisioner/terraform/provision_test.go
@@ -764,6 +764,53 @@ func TestProvision(t *testing.T) {
}},
},
},
+ {
+ Name: "workspace-owner-rbac-roles",
+ SkipReason: "field will be added in provider version 2.2.0",
+ Files: map[string]string{
+ "main.tf": `terraform {
+ required_providers {
+ coder = {
+ source = "coder/coder"
+ version = "2.2.0"
+ }
+ }
+ }
+
+ resource "null_resource" "example" {}
+ data "coder_workspace_owner" "me" {}
+ resource "coder_metadata" "example" {
+ resource_id = null_resource.example.id
+ item {
+ key = "rbac_roles_name"
+ value = data.coder_workspace_owner.me.rbac_roles[0].name
+ }
+ item {
+ key = "rbac_roles_org_id"
+ value = data.coder_workspace_owner.me.rbac_roles[0].org_id
+ }
+ }
+ `,
+ },
+ Request: &proto.PlanRequest{
+ Metadata: &proto.Metadata{
+ WorkspaceOwnerRbacRoles: []*proto.Role{{Name: "member", OrgId: ""}},
+ },
+ },
+ Response: &proto.PlanComplete{
+ Resources: []*proto.Resource{{
+ Name: "example",
+ Type: "null_resource",
+ Metadata: []*proto.Resource_Metadata{{
+ Key: "rbac_roles_name",
+ Value: "member",
+ }, {
+ Key: "rbac_roles_org_id",
+ Value: "",
+ }},
+ }},
+ },
+ },
}
for _, testCase := range testCases {
diff --git a/provisionersdk/proto/provisioner.pb.go b/provisionersdk/proto/provisioner.pb.go
index df74e01a4050b..e44afce39ea95 100644
--- a/provisionersdk/proto/provisioner.pb.go
+++ b/provisionersdk/proto/provisioner.pb.go
@@ -2097,6 +2097,61 @@ func (x *Module) GetKey() string {
return ""
}
+type Role struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ OrgId string `protobuf:"bytes,2,opt,name=org_id,json=orgId,proto3" json:"org_id,omitempty"`
+}
+
+func (x *Role) Reset() {
+ *x = Role{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Role) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Role) ProtoMessage() {}
+
+func (x *Role) ProtoReflect() protoreflect.Message {
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Role.ProtoReflect.Descriptor instead.
+func (*Role) Descriptor() ([]byte, []int) {
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *Role) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Role) GetOrgId() string {
+ if x != nil {
+ return x.OrgId
+ }
+ return ""
+}
+
// Metadata is information about a workspace used in the execution of a build
type Metadata struct {
state protoimpl.MessageState
@@ -2121,12 +2176,13 @@ type Metadata struct {
WorkspaceOwnerSshPrivateKey string `protobuf:"bytes,16,opt,name=workspace_owner_ssh_private_key,json=workspaceOwnerSshPrivateKey,proto3" json:"workspace_owner_ssh_private_key,omitempty"`
WorkspaceBuildId string `protobuf:"bytes,17,opt,name=workspace_build_id,json=workspaceBuildId,proto3" json:"workspace_build_id,omitempty"`
WorkspaceOwnerLoginType string `protobuf:"bytes,18,opt,name=workspace_owner_login_type,json=workspaceOwnerLoginType,proto3" json:"workspace_owner_login_type,omitempty"`
+ WorkspaceOwnerRbacRoles []*Role `protobuf:"bytes,19,rep,name=workspace_owner_rbac_roles,json=workspaceOwnerRbacRoles,proto3" json:"workspace_owner_rbac_roles,omitempty"`
}
func (x *Metadata) Reset() {
*x = Metadata{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[23]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2139,7 +2195,7 @@ func (x *Metadata) String() string {
func (*Metadata) ProtoMessage() {}
func (x *Metadata) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[23]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2152,7 +2208,7 @@ func (x *Metadata) ProtoReflect() protoreflect.Message {
// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
func (*Metadata) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{23}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{24}
}
func (x *Metadata) GetCoderUrl() string {
@@ -2281,6 +2337,13 @@ func (x *Metadata) GetWorkspaceOwnerLoginType() string {
return ""
}
+func (x *Metadata) GetWorkspaceOwnerRbacRoles() []*Role {
+ if x != nil {
+ return x.WorkspaceOwnerRbacRoles
+ }
+ return nil
+}
+
// Config represents execution configuration shared by all subsequent requests in the Session
type Config struct {
state protoimpl.MessageState
@@ -2297,7 +2360,7 @@ type Config struct {
func (x *Config) Reset() {
*x = Config{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[24]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2310,7 +2373,7 @@ func (x *Config) String() string {
func (*Config) ProtoMessage() {}
func (x *Config) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[24]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2323,7 +2386,7 @@ func (x *Config) ProtoReflect() protoreflect.Message {
// Deprecated: Use Config.ProtoReflect.Descriptor instead.
func (*Config) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{24}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{25}
}
func (x *Config) GetTemplateSourceArchive() []byte {
@@ -2357,7 +2420,7 @@ type ParseRequest struct {
func (x *ParseRequest) Reset() {
*x = ParseRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[25]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2370,7 +2433,7 @@ func (x *ParseRequest) String() string {
func (*ParseRequest) ProtoMessage() {}
func (x *ParseRequest) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[25]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2383,7 +2446,7 @@ func (x *ParseRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ParseRequest.ProtoReflect.Descriptor instead.
func (*ParseRequest) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{25}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{26}
}
// ParseComplete indicates a request to parse completed.
@@ -2401,7 +2464,7 @@ type ParseComplete struct {
func (x *ParseComplete) Reset() {
*x = ParseComplete{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[26]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2414,7 +2477,7 @@ func (x *ParseComplete) String() string {
func (*ParseComplete) ProtoMessage() {}
func (x *ParseComplete) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[26]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2427,7 +2490,7 @@ func (x *ParseComplete) ProtoReflect() protoreflect.Message {
// Deprecated: Use ParseComplete.ProtoReflect.Descriptor instead.
func (*ParseComplete) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{26}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{27}
}
func (x *ParseComplete) GetError() string {
@@ -2473,7 +2536,7 @@ type PlanRequest struct {
func (x *PlanRequest) Reset() {
*x = PlanRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[27]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2486,7 +2549,7 @@ func (x *PlanRequest) String() string {
func (*PlanRequest) ProtoMessage() {}
func (x *PlanRequest) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[27]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2499,7 +2562,7 @@ func (x *PlanRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlanRequest.ProtoReflect.Descriptor instead.
func (*PlanRequest) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{27}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{28}
}
func (x *PlanRequest) GetMetadata() *Metadata {
@@ -2548,7 +2611,7 @@ type PlanComplete struct {
func (x *PlanComplete) Reset() {
*x = PlanComplete{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[28]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2561,7 +2624,7 @@ func (x *PlanComplete) String() string {
func (*PlanComplete) ProtoMessage() {}
func (x *PlanComplete) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[28]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2574,7 +2637,7 @@ func (x *PlanComplete) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlanComplete.ProtoReflect.Descriptor instead.
func (*PlanComplete) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{28}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{29}
}
func (x *PlanComplete) GetError() string {
@@ -2639,7 +2702,7 @@ type ApplyRequest struct {
func (x *ApplyRequest) Reset() {
*x = ApplyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[29]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2652,7 +2715,7 @@ func (x *ApplyRequest) String() string {
func (*ApplyRequest) ProtoMessage() {}
func (x *ApplyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[29]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2665,7 +2728,7 @@ func (x *ApplyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ApplyRequest.ProtoReflect.Descriptor instead.
func (*ApplyRequest) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{29}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{30}
}
func (x *ApplyRequest) GetMetadata() *Metadata {
@@ -2692,7 +2755,7 @@ type ApplyComplete struct {
func (x *ApplyComplete) Reset() {
*x = ApplyComplete{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[30]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2705,7 +2768,7 @@ func (x *ApplyComplete) String() string {
func (*ApplyComplete) ProtoMessage() {}
func (x *ApplyComplete) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[30]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2718,7 +2781,7 @@ func (x *ApplyComplete) ProtoReflect() protoreflect.Message {
// Deprecated: Use ApplyComplete.ProtoReflect.Descriptor instead.
func (*ApplyComplete) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{30}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{31}
}
func (x *ApplyComplete) GetState() []byte {
@@ -2780,7 +2843,7 @@ type Timing struct {
func (x *Timing) Reset() {
*x = Timing{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[31]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2793,7 +2856,7 @@ func (x *Timing) String() string {
func (*Timing) ProtoMessage() {}
func (x *Timing) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[31]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2806,7 +2869,7 @@ func (x *Timing) ProtoReflect() protoreflect.Message {
// Deprecated: Use Timing.ProtoReflect.Descriptor instead.
func (*Timing) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{31}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{32}
}
func (x *Timing) GetStart() *timestamppb.Timestamp {
@@ -2868,7 +2931,7 @@ type CancelRequest struct {
func (x *CancelRequest) Reset() {
*x = CancelRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[32]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2881,7 +2944,7 @@ func (x *CancelRequest) String() string {
func (*CancelRequest) ProtoMessage() {}
func (x *CancelRequest) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[32]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2894,7 +2957,7 @@ func (x *CancelRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use CancelRequest.ProtoReflect.Descriptor instead.
func (*CancelRequest) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{32}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{33}
}
type Request struct {
@@ -2915,7 +2978,7 @@ type Request struct {
func (x *Request) Reset() {
*x = Request{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[33]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2928,7 +2991,7 @@ func (x *Request) String() string {
func (*Request) ProtoMessage() {}
func (x *Request) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[33]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2941,7 +3004,7 @@ func (x *Request) ProtoReflect() protoreflect.Message {
// Deprecated: Use Request.ProtoReflect.Descriptor instead.
func (*Request) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{33}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{34}
}
func (m *Request) GetType() isRequest_Type {
@@ -3037,7 +3100,7 @@ type Response struct {
func (x *Response) Reset() {
*x = Response{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[34]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3050,7 +3113,7 @@ func (x *Response) String() string {
func (*Response) ProtoMessage() {}
func (x *Response) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[34]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3063,7 +3126,7 @@ func (x *Response) ProtoReflect() protoreflect.Message {
// Deprecated: Use Response.ProtoReflect.Descriptor instead.
func (*Response) Descriptor() ([]byte, []int) {
- return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{34}
+ return file_provisionersdk_proto_provisioner_proto_rawDescGZIP(), []int{35}
}
func (m *Response) GetType() isResponse_Type {
@@ -3145,7 +3208,7 @@ type Agent_Metadata struct {
func (x *Agent_Metadata) Reset() {
*x = Agent_Metadata{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[35]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3158,7 +3221,7 @@ func (x *Agent_Metadata) String() string {
func (*Agent_Metadata) ProtoMessage() {}
func (x *Agent_Metadata) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[35]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3230,7 +3293,7 @@ type Resource_Metadata struct {
func (x *Resource_Metadata) Reset() {
*x = Resource_Metadata{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[37]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3243,7 +3306,7 @@ func (x *Resource_Metadata) String() string {
func (*Resource_Metadata) ProtoMessage() {}
func (x *Resource_Metadata) ProtoReflect() protoreflect.Message {
- mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[37]
+ mi := &file_provisionersdk_proto_provisioner_proto_msgTypes[38]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3571,236 +3634,244 @@ var file_provisionersdk_proto_provisioner_proto_rawDesc = []byte{
0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73,
0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x22, 0xac, 0x07, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x55, 0x72, 0x6c, 0x12, 0x53,
- 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e,
- 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x70,
- 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
- 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72,
- 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x6f,
- 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77,
- 0x6e, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e,
- 0x65, 0x72, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77,
- 0x6e, 0x65, 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x65, 0x6d, 0x70,
- 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a,
- 0x10, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74,
- 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x21, 0x77, 0x6f, 0x72, 0x6b,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x6f, 0x69, 0x64, 0x63,
- 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x1d, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77,
- 0x6e, 0x65, 0x72, 0x4f, 0x69, 0x64, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b,
- 0x65, 0x6e, 0x12, 0x41, 0x0a, 0x1d, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f,
- 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f,
- 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74,
- 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6d, 0x70,
- 0x6c, 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0d,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f,
- 0x77, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x77, 0x6f, 0x72, 0x6b,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75,
- 0x70, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x42,
- 0x0a, 0x1e, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65,
- 0x72, 0x5f, 0x73, 0x73, 0x68, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79,
- 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x53, 0x73, 0x68, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
- 0x65, 0x79, 0x12, 0x44, 0x0a, 0x1f, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f,
- 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x73, 0x68, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
- 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1b, 0x77, 0x6f, 0x72,
- 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x53, 0x73, 0x68, 0x50, 0x72,
- 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x11,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42,
- 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x5f,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x54,
- 0x79, 0x70, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36,
- 0x0a, 0x17, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41,
- 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x32, 0x0a, 0x15,
- 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x67, 0x5f,
- 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x70, 0x72, 0x6f,
- 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c,
- 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x22, 0xa3, 0x02, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
- 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4c, 0x0a, 0x12, 0x74, 0x65, 0x6d, 0x70,
- 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
- 0x65, 0x72, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x56, 0x61, 0x72, 0x69, 0x61,
- 0x62, 0x6c, 0x65, 0x52, 0x11, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x56, 0x61, 0x72,
- 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x64, 0x6d, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x61, 0x64, 0x6d, 0x65, 0x12, 0x54,
- 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x67, 0x73,
- 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69,
- 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
- 0x74, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x61, 0x67, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x54, 0x61, 0x67, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb5, 0x02, 0x0a, 0x0b, 0x50, 0x6c, 0x61, 0x6e, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
- 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x53, 0x0a, 0x15, 0x72, 0x69, 0x63,
- 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d,
- 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x72, 0x69, 0x63, 0x68, 0x50,
- 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x43,
- 0x0a, 0x0f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73,
- 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x17, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f,
- 0x61, 0x75, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
- 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50,
- 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
- 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x22, 0x85,
- 0x03, 0x0a, 0x0c, 0x50, 0x6c, 0x61, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
- 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61,
- 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x63,
- 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
- 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x61, 0x0a, 0x17, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x03, 0x6b, 0x65, 0x79, 0x22, 0x31, 0x0a, 0x04, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x72, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x6f, 0x72, 0x67, 0x49, 0x64, 0x22, 0xfc, 0x07, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61,
+ 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x5f, 0x75, 0x72,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x55, 0x72,
+ 0x6c, 0x12, 0x53, 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74,
+ 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x57, 0x6f,
+ 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
+ 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a,
+ 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x77, 0x6f,
+ 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x77, 0x6f, 0x72,
+ 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x74,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0c, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65,
+ 0x12, 0x29, 0x0a, 0x10, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65, 0x6d, 0x70,
+ 0x6c, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x21, 0x77,
+ 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x6f,
+ 0x69, 0x64, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x4f, 0x69, 0x64, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x41, 0x0a, 0x1d, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x77, 0x6f,
+ 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x73, 0x73,
+ 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6d, 0x70,
+ 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x6f, 0x72,
+ 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x77,
+ 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67,
+ 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72,
+ 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70,
+ 0x73, 0x12, 0x42, 0x0a, 0x1e, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f,
+ 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x73, 0x68, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x53, 0x73, 0x68, 0x50, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x44, 0x0a, 0x1f, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x73, 0x68, 0x5f, 0x70, 0x72, 0x69,
+ 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1b,
+ 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x53, 0x73,
+ 0x68, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x77,
+ 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69,
+ 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x77, 0x6f, 0x72,
+ 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x67,
+ 0x69, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77,
+ 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x4c, 0x6f, 0x67,
+ 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4e, 0x0a, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x72, 0x62, 0x61, 0x63, 0x5f, 0x72,
+ 0x6f, 0x6c, 0x65, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x72, 0x6f,
+ 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x17, 0x77,
+ 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x62, 0x61,
+ 0x63, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x22, 0x8a, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x36, 0x0a, 0x17, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61,
+ 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12,
+ 0x32, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x5f, 0x6c,
+ 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13,
+ 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x4c, 0x6f, 0x67, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x22, 0xa3, 0x02, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x73, 0x65, 0x43, 0x6f, 0x6d,
+ 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4c, 0x0a, 0x12, 0x74,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73,
+ 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x56, 0x61,
+ 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65,
+ 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61,
+ 0x64, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x61, 0x64, 0x6d,
+ 0x65, 0x12, 0x54, 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74,
+ 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x70, 0x72, 0x6f, 0x76,
+ 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x43, 0x6f, 0x6d,
+ 0x70, 0x6c, 0x65, 0x74, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54,
+ 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x54, 0x61, 0x67, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x57, 0x6f, 0x72, 0x6b, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb5, 0x02, 0x0a, 0x0b, 0x50, 0x6c,
+ 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72,
+ 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x53, 0x0a, 0x15,
+ 0x72, 0x69, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x63, 0x68, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x72, 0x69,
+ 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f,
+ 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c,
+ 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x17, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72,
- 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73,
+ 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73,
0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75,
- 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68,
- 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x74, 0x69, 0x6d,
- 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f,
- 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x52,
- 0x07, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x6d, 0x6f, 0x64, 0x75,
- 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76,
- 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x07,
- 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x70, 0x72, 0x65, 0x73, 0x65,
- 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x65, 0x73, 0x65, 0x74, 0x52, 0x07, 0x70,
- 0x72, 0x65, 0x73, 0x65, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
- 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xbe, 0x02, 0x0a, 0x0d, 0x41, 0x70,
- 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f,
- 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a,
- 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52,
- 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61,
- 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x61, 0x0a, 0x17, 0x65, 0x78, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64,
- 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76,
- 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
- 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75,
- 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x74,
- 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70,
- 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, 0x69, 0x6d, 0x69, 0x6e,
- 0x67, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xfa, 0x01, 0x0a, 0x06, 0x54,
- 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a,
- 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69,
- 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x0f, 0x0a, 0x0d, 0x43, 0x61, 0x6e, 0x63, 0x65,
- 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x8c, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
- 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x31, 0x0a, 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72,
- 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52,
- 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
- 0x65, 0x72, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00,
- 0x52, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x12, 0x31, 0x0a, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f,
- 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x48, 0x00, 0x52, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x63, 0x61, 0x6e,
- 0x63, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76,
- 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x06, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x42,
- 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xd1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e,
- 0x4c, 0x6f, 0x67, 0x48, 0x00, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x12, 0x32, 0x0a, 0x05, 0x70, 0x61,
- 0x72, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76,
- 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x43, 0x6f, 0x6d,
- 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x12, 0x2f,
- 0x0a, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70,
- 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x43,
- 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x12,
- 0x32, 0x0a, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x70, 0x70,
- 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x05, 0x61, 0x70,
- 0x70, 0x6c, 0x79, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x2a, 0x3f, 0x0a, 0x08, 0x4c,
- 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45,
- 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x01, 0x12, 0x08, 0x0a,
- 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e, 0x10,
- 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x2a, 0x3b, 0x0a, 0x0f,
- 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
- 0x09, 0x0a, 0x05, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x55,
- 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a,
- 0x06, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x10, 0x02, 0x2a, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70,
- 0x4f, 0x70, 0x65, 0x6e, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x06, 0x57, 0x49, 0x4e, 0x44, 0x4f, 0x57,
- 0x10, 0x00, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x4c, 0x49, 0x4d, 0x5f, 0x57,
- 0x49, 0x4e, 0x44, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x41, 0x42, 0x10, 0x02,
- 0x2a, 0x37, 0x0a, 0x13, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x72, 0x61,
- 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x52, 0x54,
- 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07,
- 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x10, 0x02, 0x2a, 0x35, 0x0a, 0x0b, 0x54, 0x69, 0x6d,
- 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x41, 0x52,
- 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54,
- 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x02,
- 0x32, 0x49, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12,
- 0x3a, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f,
- 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x30, 0x5a, 0x2e, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f,
- 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69,
- 0x6f, 0x6e, 0x65, 0x72, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72,
+ 0x73, 0x22, 0x85, 0x03, 0x0a, 0x0c, 0x50, 0x6c, 0x61, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
+ 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72,
+ 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a,
+ 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e,
+ 0x52, 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70,
+ 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x61, 0x0a, 0x17, 0x65, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69,
+ 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f,
+ 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
+ 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41,
+ 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x07,
+ 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e,
+ 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, 0x69, 0x6d, 0x69,
+ 0x6e, 0x67, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x6d,
+ 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70,
+ 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c,
+ 0x65, 0x52, 0x07, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x70, 0x72,
+ 0x65, 0x73, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72,
+ 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x65, 0x73, 0x65, 0x74,
+ 0x52, 0x07, 0x70, 0x72, 0x65, 0x73, 0x65, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x0c, 0x41, 0x70, 0x70,
+ 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72,
+ 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xbe, 0x02, 0x0a,
+ 0x0d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12,
+ 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65,
+ 0x72, 0x2e, 0x52, 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52,
+ 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x61, 0x0a, 0x17, 0x65,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f,
+ 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70,
+ 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
+ 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2d,
+ 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, 0x69,
+ 0x6d, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xfa, 0x01,
+ 0x0a, 0x06, 0x54, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x65, 0x6e,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x76,
+ 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x74,
+ 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x0f, 0x0a, 0x0d, 0x43, 0x61,
+ 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x8c, 0x02, 0x0a, 0x07,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73,
+ 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x0a, 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f,
+ 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x48, 0x00, 0x52, 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x04, 0x70, 0x6c, 0x61,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73,
+ 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x12, 0x31, 0x0a, 0x05, 0x61, 0x70, 0x70,
+ 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69,
+ 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x06,
+ 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70,
+ 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65,
+ 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x06, 0x63, 0x61, 0x6e, 0x63,
+ 0x65, 0x6c, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xd1, 0x01, 0x0a, 0x08, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
+ 0x65, 0x72, 0x2e, 0x4c, 0x6f, 0x67, 0x48, 0x00, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x12, 0x32, 0x0a,
+ 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70,
+ 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65,
+ 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x05, 0x70, 0x61, 0x72, 0x73,
+ 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x6c,
+ 0x61, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6c,
+ 0x61, 0x6e, 0x12, 0x32, 0x0a, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e,
+ 0x41, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52,
+ 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x2a, 0x3f,
+ 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52,
+ 0x41, 0x43, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x01,
+ 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41,
+ 0x52, 0x4e, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x2a,
+ 0x3b, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x10, 0x00, 0x12, 0x11, 0x0a,
+ 0x0d, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x01,
+ 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x10, 0x02, 0x2a, 0x35, 0x0a, 0x09,
+ 0x41, 0x70, 0x70, 0x4f, 0x70, 0x65, 0x6e, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x06, 0x57, 0x49, 0x4e,
+ 0x44, 0x4f, 0x57, 0x10, 0x00, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x4c, 0x49,
+ 0x4d, 0x5f, 0x57, 0x49, 0x4e, 0x44, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x41,
+ 0x42, 0x10, 0x02, 0x2a, 0x37, 0x0a, 0x13, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54,
+ 0x41, 0x52, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12,
+ 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x10, 0x02, 0x2a, 0x35, 0x0a, 0x0b,
+ 0x54, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53,
+ 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50,
+ 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45,
+ 0x44, 0x10, 0x02, 0x32, 0x49, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
+ 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x2e,
+ 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65,
+ 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x30,
+ 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x64,
+ 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x76,
+ 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -3816,7 +3887,7 @@ func file_provisionersdk_proto_provisioner_proto_rawDescGZIP() []byte {
}
var file_provisionersdk_proto_provisioner_proto_enumTypes = make([]protoimpl.EnumInfo, 5)
-var file_provisionersdk_proto_provisioner_proto_msgTypes = make([]protoimpl.MessageInfo, 39)
+var file_provisionersdk_proto_provisioner_proto_msgTypes = make([]protoimpl.MessageInfo, 40)
var file_provisionersdk_proto_provisioner_proto_goTypes = []interface{}{
(LogLevel)(0), // 0: provisioner.LogLevel
(AppSharingLevel)(0), // 1: provisioner.AppSharingLevel
@@ -3846,31 +3917,32 @@ var file_provisionersdk_proto_provisioner_proto_goTypes = []interface{}{
(*Healthcheck)(nil), // 25: provisioner.Healthcheck
(*Resource)(nil), // 26: provisioner.Resource
(*Module)(nil), // 27: provisioner.Module
- (*Metadata)(nil), // 28: provisioner.Metadata
- (*Config)(nil), // 29: provisioner.Config
- (*ParseRequest)(nil), // 30: provisioner.ParseRequest
- (*ParseComplete)(nil), // 31: provisioner.ParseComplete
- (*PlanRequest)(nil), // 32: provisioner.PlanRequest
- (*PlanComplete)(nil), // 33: provisioner.PlanComplete
- (*ApplyRequest)(nil), // 34: provisioner.ApplyRequest
- (*ApplyComplete)(nil), // 35: provisioner.ApplyComplete
- (*Timing)(nil), // 36: provisioner.Timing
- (*CancelRequest)(nil), // 37: provisioner.CancelRequest
- (*Request)(nil), // 38: provisioner.Request
- (*Response)(nil), // 39: provisioner.Response
- (*Agent_Metadata)(nil), // 40: provisioner.Agent.Metadata
- nil, // 41: provisioner.Agent.EnvEntry
- (*Resource_Metadata)(nil), // 42: provisioner.Resource.Metadata
- nil, // 43: provisioner.ParseComplete.WorkspaceTagsEntry
- (*timestamppb.Timestamp)(nil), // 44: google.protobuf.Timestamp
+ (*Role)(nil), // 28: provisioner.Role
+ (*Metadata)(nil), // 29: provisioner.Metadata
+ (*Config)(nil), // 30: provisioner.Config
+ (*ParseRequest)(nil), // 31: provisioner.ParseRequest
+ (*ParseComplete)(nil), // 32: provisioner.ParseComplete
+ (*PlanRequest)(nil), // 33: provisioner.PlanRequest
+ (*PlanComplete)(nil), // 34: provisioner.PlanComplete
+ (*ApplyRequest)(nil), // 35: provisioner.ApplyRequest
+ (*ApplyComplete)(nil), // 36: provisioner.ApplyComplete
+ (*Timing)(nil), // 37: provisioner.Timing
+ (*CancelRequest)(nil), // 38: provisioner.CancelRequest
+ (*Request)(nil), // 39: provisioner.Request
+ (*Response)(nil), // 40: provisioner.Response
+ (*Agent_Metadata)(nil), // 41: provisioner.Agent.Metadata
+ nil, // 42: provisioner.Agent.EnvEntry
+ (*Resource_Metadata)(nil), // 43: provisioner.Resource.Metadata
+ nil, // 44: provisioner.ParseComplete.WorkspaceTagsEntry
+ (*timestamppb.Timestamp)(nil), // 45: google.protobuf.Timestamp
}
var file_provisionersdk_proto_provisioner_proto_depIdxs = []int32{
7, // 0: provisioner.RichParameter.options:type_name -> provisioner.RichParameterOption
11, // 1: provisioner.Preset.parameters:type_name -> provisioner.PresetParameter
0, // 2: provisioner.Log.level:type_name -> provisioner.LogLevel
- 41, // 3: provisioner.Agent.env:type_name -> provisioner.Agent.EnvEntry
+ 42, // 3: provisioner.Agent.env:type_name -> provisioner.Agent.EnvEntry
24, // 4: provisioner.Agent.apps:type_name -> provisioner.App
- 40, // 5: provisioner.Agent.metadata:type_name -> provisioner.Agent.Metadata
+ 41, // 5: provisioner.Agent.metadata:type_name -> provisioner.Agent.Metadata
21, // 6: provisioner.Agent.display_apps:type_name -> provisioner.DisplayApps
23, // 7: provisioner.Agent.scripts:type_name -> provisioner.Script
22, // 8: provisioner.Agent.extra_envs:type_name -> provisioner.Env
@@ -3881,44 +3953,45 @@ var file_provisionersdk_proto_provisioner_proto_depIdxs = []int32{
1, // 13: provisioner.App.sharing_level:type_name -> provisioner.AppSharingLevel
2, // 14: provisioner.App.open_in:type_name -> provisioner.AppOpenIn
17, // 15: provisioner.Resource.agents:type_name -> provisioner.Agent
- 42, // 16: provisioner.Resource.metadata:type_name -> provisioner.Resource.Metadata
+ 43, // 16: provisioner.Resource.metadata:type_name -> provisioner.Resource.Metadata
3, // 17: provisioner.Metadata.workspace_transition:type_name -> provisioner.WorkspaceTransition
- 6, // 18: provisioner.ParseComplete.template_variables:type_name -> provisioner.TemplateVariable
- 43, // 19: provisioner.ParseComplete.workspace_tags:type_name -> provisioner.ParseComplete.WorkspaceTagsEntry
- 28, // 20: provisioner.PlanRequest.metadata:type_name -> provisioner.Metadata
- 9, // 21: provisioner.PlanRequest.rich_parameter_values:type_name -> provisioner.RichParameterValue
- 12, // 22: provisioner.PlanRequest.variable_values:type_name -> provisioner.VariableValue
- 16, // 23: provisioner.PlanRequest.external_auth_providers:type_name -> provisioner.ExternalAuthProvider
- 26, // 24: provisioner.PlanComplete.resources:type_name -> provisioner.Resource
- 8, // 25: provisioner.PlanComplete.parameters:type_name -> provisioner.RichParameter
- 15, // 26: provisioner.PlanComplete.external_auth_providers:type_name -> provisioner.ExternalAuthProviderResource
- 36, // 27: provisioner.PlanComplete.timings:type_name -> provisioner.Timing
- 27, // 28: provisioner.PlanComplete.modules:type_name -> provisioner.Module
- 10, // 29: provisioner.PlanComplete.presets:type_name -> provisioner.Preset
- 28, // 30: provisioner.ApplyRequest.metadata:type_name -> provisioner.Metadata
- 26, // 31: provisioner.ApplyComplete.resources:type_name -> provisioner.Resource
- 8, // 32: provisioner.ApplyComplete.parameters:type_name -> provisioner.RichParameter
- 15, // 33: provisioner.ApplyComplete.external_auth_providers:type_name -> provisioner.ExternalAuthProviderResource
- 36, // 34: provisioner.ApplyComplete.timings:type_name -> provisioner.Timing
- 44, // 35: provisioner.Timing.start:type_name -> google.protobuf.Timestamp
- 44, // 36: provisioner.Timing.end:type_name -> google.protobuf.Timestamp
- 4, // 37: provisioner.Timing.state:type_name -> provisioner.TimingState
- 29, // 38: provisioner.Request.config:type_name -> provisioner.Config
- 30, // 39: provisioner.Request.parse:type_name -> provisioner.ParseRequest
- 32, // 40: provisioner.Request.plan:type_name -> provisioner.PlanRequest
- 34, // 41: provisioner.Request.apply:type_name -> provisioner.ApplyRequest
- 37, // 42: provisioner.Request.cancel:type_name -> provisioner.CancelRequest
- 13, // 43: provisioner.Response.log:type_name -> provisioner.Log
- 31, // 44: provisioner.Response.parse:type_name -> provisioner.ParseComplete
- 33, // 45: provisioner.Response.plan:type_name -> provisioner.PlanComplete
- 35, // 46: provisioner.Response.apply:type_name -> provisioner.ApplyComplete
- 38, // 47: provisioner.Provisioner.Session:input_type -> provisioner.Request
- 39, // 48: provisioner.Provisioner.Session:output_type -> provisioner.Response
- 48, // [48:49] is the sub-list for method output_type
- 47, // [47:48] is the sub-list for method input_type
- 47, // [47:47] is the sub-list for extension type_name
- 47, // [47:47] is the sub-list for extension extendee
- 0, // [0:47] is the sub-list for field type_name
+ 28, // 18: provisioner.Metadata.workspace_owner_rbac_roles:type_name -> provisioner.Role
+ 6, // 19: provisioner.ParseComplete.template_variables:type_name -> provisioner.TemplateVariable
+ 44, // 20: provisioner.ParseComplete.workspace_tags:type_name -> provisioner.ParseComplete.WorkspaceTagsEntry
+ 29, // 21: provisioner.PlanRequest.metadata:type_name -> provisioner.Metadata
+ 9, // 22: provisioner.PlanRequest.rich_parameter_values:type_name -> provisioner.RichParameterValue
+ 12, // 23: provisioner.PlanRequest.variable_values:type_name -> provisioner.VariableValue
+ 16, // 24: provisioner.PlanRequest.external_auth_providers:type_name -> provisioner.ExternalAuthProvider
+ 26, // 25: provisioner.PlanComplete.resources:type_name -> provisioner.Resource
+ 8, // 26: provisioner.PlanComplete.parameters:type_name -> provisioner.RichParameter
+ 15, // 27: provisioner.PlanComplete.external_auth_providers:type_name -> provisioner.ExternalAuthProviderResource
+ 37, // 28: provisioner.PlanComplete.timings:type_name -> provisioner.Timing
+ 27, // 29: provisioner.PlanComplete.modules:type_name -> provisioner.Module
+ 10, // 30: provisioner.PlanComplete.presets:type_name -> provisioner.Preset
+ 29, // 31: provisioner.ApplyRequest.metadata:type_name -> provisioner.Metadata
+ 26, // 32: provisioner.ApplyComplete.resources:type_name -> provisioner.Resource
+ 8, // 33: provisioner.ApplyComplete.parameters:type_name -> provisioner.RichParameter
+ 15, // 34: provisioner.ApplyComplete.external_auth_providers:type_name -> provisioner.ExternalAuthProviderResource
+ 37, // 35: provisioner.ApplyComplete.timings:type_name -> provisioner.Timing
+ 45, // 36: provisioner.Timing.start:type_name -> google.protobuf.Timestamp
+ 45, // 37: provisioner.Timing.end:type_name -> google.protobuf.Timestamp
+ 4, // 38: provisioner.Timing.state:type_name -> provisioner.TimingState
+ 30, // 39: provisioner.Request.config:type_name -> provisioner.Config
+ 31, // 40: provisioner.Request.parse:type_name -> provisioner.ParseRequest
+ 33, // 41: provisioner.Request.plan:type_name -> provisioner.PlanRequest
+ 35, // 42: provisioner.Request.apply:type_name -> provisioner.ApplyRequest
+ 38, // 43: provisioner.Request.cancel:type_name -> provisioner.CancelRequest
+ 13, // 44: provisioner.Response.log:type_name -> provisioner.Log
+ 32, // 45: provisioner.Response.parse:type_name -> provisioner.ParseComplete
+ 34, // 46: provisioner.Response.plan:type_name -> provisioner.PlanComplete
+ 36, // 47: provisioner.Response.apply:type_name -> provisioner.ApplyComplete
+ 39, // 48: provisioner.Provisioner.Session:input_type -> provisioner.Request
+ 40, // 49: provisioner.Provisioner.Session:output_type -> provisioner.Response
+ 49, // [49:50] is the sub-list for method output_type
+ 48, // [48:49] is the sub-list for method input_type
+ 48, // [48:48] is the sub-list for extension type_name
+ 48, // [48:48] is the sub-list for extension extendee
+ 0, // [0:48] is the sub-list for field type_name
}
func init() { file_provisionersdk_proto_provisioner_proto_init() }
@@ -4204,7 +4277,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Metadata); i {
+ switch v := v.(*Role); i {
case 0:
return &v.state
case 1:
@@ -4216,7 +4289,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Config); i {
+ switch v := v.(*Metadata); i {
case 0:
return &v.state
case 1:
@@ -4228,7 +4301,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ParseRequest); i {
+ switch v := v.(*Config); i {
case 0:
return &v.state
case 1:
@@ -4240,7 +4313,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ParseComplete); i {
+ switch v := v.(*ParseRequest); i {
case 0:
return &v.state
case 1:
@@ -4252,7 +4325,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlanRequest); i {
+ switch v := v.(*ParseComplete); i {
case 0:
return &v.state
case 1:
@@ -4264,7 +4337,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlanComplete); i {
+ switch v := v.(*PlanRequest); i {
case 0:
return &v.state
case 1:
@@ -4276,7 +4349,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyRequest); i {
+ switch v := v.(*PlanComplete); i {
case 0:
return &v.state
case 1:
@@ -4288,7 +4361,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyComplete); i {
+ switch v := v.(*ApplyRequest); i {
case 0:
return &v.state
case 1:
@@ -4300,7 +4373,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Timing); i {
+ switch v := v.(*ApplyComplete); i {
case 0:
return &v.state
case 1:
@@ -4312,7 +4385,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CancelRequest); i {
+ switch v := v.(*Timing); i {
case 0:
return &v.state
case 1:
@@ -4324,7 +4397,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Request); i {
+ switch v := v.(*CancelRequest); i {
case 0:
return &v.state
case 1:
@@ -4336,7 +4409,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Response); i {
+ switch v := v.(*Request); i {
case 0:
return &v.state
case 1:
@@ -4348,6 +4421,18 @@ func file_provisionersdk_proto_provisioner_proto_init() {
}
}
file_provisionersdk_proto_provisioner_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Response); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_provisionersdk_proto_provisioner_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Agent_Metadata); i {
case 0:
return &v.state
@@ -4359,7 +4444,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
return nil
}
}
- file_provisionersdk_proto_provisioner_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ file_provisionersdk_proto_provisioner_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Resource_Metadata); i {
case 0:
return &v.state
@@ -4377,14 +4462,14 @@ func file_provisionersdk_proto_provisioner_proto_init() {
(*Agent_Token)(nil),
(*Agent_InstanceId)(nil),
}
- file_provisionersdk_proto_provisioner_proto_msgTypes[33].OneofWrappers = []interface{}{
+ file_provisionersdk_proto_provisioner_proto_msgTypes[34].OneofWrappers = []interface{}{
(*Request_Config)(nil),
(*Request_Parse)(nil),
(*Request_Plan)(nil),
(*Request_Apply)(nil),
(*Request_Cancel)(nil),
}
- file_provisionersdk_proto_provisioner_proto_msgTypes[34].OneofWrappers = []interface{}{
+ file_provisionersdk_proto_provisioner_proto_msgTypes[35].OneofWrappers = []interface{}{
(*Response_Log)(nil),
(*Response_Parse)(nil),
(*Response_Plan)(nil),
@@ -4396,7 +4481,7 @@ func file_provisionersdk_proto_provisioner_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_provisionersdk_proto_provisioner_proto_rawDesc,
NumEnums: 5,
- NumMessages: 39,
+ NumMessages: 40,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/provisionersdk/proto/provisioner.proto b/provisionersdk/proto/provisioner.proto
index 55d98e51fca7e..9573b84876116 100644
--- a/provisionersdk/proto/provisioner.proto
+++ b/provisionersdk/proto/provisioner.proto
@@ -255,6 +255,11 @@ enum WorkspaceTransition {
DESTROY = 2;
}
+message Role {
+ string name = 1;
+ string org_id = 2;
+}
+
// Metadata is information about a workspace used in the execution of a build
message Metadata {
string coder_url = 1;
@@ -275,6 +280,7 @@ message Metadata {
string workspace_owner_ssh_private_key = 16;
string workspace_build_id = 17;
string workspace_owner_login_type = 18;
+ repeated Role workspace_owner_rbac_roles = 19;
}
// Config represents execution configuration shared by all subsequent requests in the Session
diff --git a/site/e2e/provisionerGenerated.ts b/site/e2e/provisionerGenerated.ts
index 6943c54a30dae..737c291e8bfe1 100644
--- a/site/e2e/provisionerGenerated.ts
+++ b/site/e2e/provisionerGenerated.ts
@@ -269,6 +269,11 @@ export interface Module {
key: string;
}
+export interface Role {
+ name: string;
+ orgId: string;
+}
+
/** Metadata is information about a workspace used in the execution of a build */
export interface Metadata {
coderUrl: string;
@@ -289,6 +294,7 @@ export interface Metadata {
workspaceOwnerSshPrivateKey: string;
workspaceBuildId: string;
workspaceOwnerLoginType: string;
+ workspaceOwnerRbacRoles: Role[];
}
/** Config represents execution configuration shared by all subsequent requests in the Session */
@@ -905,6 +911,18 @@ export const Module = {
},
};
+export const Role = {
+ encode(message: Role, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer {
+ if (message.name !== "") {
+ writer.uint32(10).string(message.name);
+ }
+ if (message.orgId !== "") {
+ writer.uint32(18).string(message.orgId);
+ }
+ return writer;
+ },
+};
+
export const Metadata = {
encode(message: Metadata, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer {
if (message.coderUrl !== "") {
@@ -961,6 +979,9 @@ export const Metadata = {
if (message.workspaceOwnerLoginType !== "") {
writer.uint32(146).string(message.workspaceOwnerLoginType);
}
+ for (const v of message.workspaceOwnerRbacRoles) {
+ Role.encode(v!, writer.uint32(154).fork()).ldelim();
+ }
return writer;
},
};
From d0e20606924077497f8b1b327b04d601fa20f57e Mon Sep 17 00:00:00 2001
From: Thomas Kosiewski
Date: Mon, 3 Mar 2025 04:47:42 +0100
Subject: [PATCH 36/44] feat(agent): add second SSH listener on port 22
(#16627)
Fixes: https://github.com/coder/internal/issues/377
Added an additional SSH listener on port 22, so the agent now listens on both, port one and port 22.
---
Change-Id: Ifd986b260f8ac317e37d65111cd4e0bd1dc38af8
Signed-off-by: Thomas Kosiewski
---
agent/agent.go | 25 ++--
agent/agent_test.go | 199 ++++++++++++++++----------
agent/usershell/usershell_darwin.go | 2 +-
codersdk/workspacesdk/agentconn.go | 18 ++-
codersdk/workspacesdk/workspacesdk.go | 1 +
tailnet/conn.go | 3 +-
6 files changed, 153 insertions(+), 95 deletions(-)
diff --git a/agent/agent.go b/agent/agent.go
index 614ae0fdd0e65..40e5de7356d9c 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -1362,19 +1362,22 @@ func (a *agent) createTailnet(
return nil, xerrors.Errorf("update host signer: %w", err)
}
- sshListener, err := network.Listen("tcp", ":"+strconv.Itoa(workspacesdk.AgentSSHPort))
- if err != nil {
- return nil, xerrors.Errorf("listen on the ssh port: %w", err)
- }
- defer func() {
+ for _, port := range []int{workspacesdk.AgentSSHPort, workspacesdk.AgentStandardSSHPort} {
+ sshListener, err := network.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
- _ = sshListener.Close()
+ return nil, xerrors.Errorf("listen on the ssh port (%v): %w", port, err)
+ }
+ // nolint:revive // We do want to run the deferred functions when createTailnet returns.
+ defer func() {
+ if err != nil {
+ _ = sshListener.Close()
+ }
+ }()
+ if err = a.trackGoroutine(func() {
+ _ = a.sshServer.Serve(sshListener)
+ }); err != nil {
+ return nil, err
}
- }()
- if err = a.trackGoroutine(func() {
- _ = a.sshServer.Serve(sshListener)
- }); err != nil {
- return nil, err
}
reconnectingPTYListener, err := network.Listen("tcp", ":"+strconv.Itoa(workspacesdk.AgentReconnectingPTYPort))
diff --git a/agent/agent_test.go b/agent/agent_test.go
index 6e27f525f8cb4..8466c4e0961b4 100644
--- a/agent/agent_test.go
+++ b/agent/agent_test.go
@@ -65,38 +65,48 @@ func TestMain(m *testing.M) {
goleak.VerifyTestMain(m, testutil.GoleakOptions...)
}
+var sshPorts = []uint16{workspacesdk.AgentSSHPort, workspacesdk.AgentStandardSSHPort}
+
// NOTE: These tests only work when your default shell is bash for some reason.
func TestAgent_Stats_SSH(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
- defer cancel()
- //nolint:dogsled
- conn, _, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
+ for _, port := range sshPorts {
+ port := port
+ t.Run(fmt.Sprintf("(:%d)", port), func(t *testing.T) {
+ t.Parallel()
- sshClient, err := conn.SSHClient(ctx)
- require.NoError(t, err)
- defer sshClient.Close()
- session, err := sshClient.NewSession()
- require.NoError(t, err)
- defer session.Close()
- stdin, err := session.StdinPipe()
- require.NoError(t, err)
- err = session.Shell()
- require.NoError(t, err)
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
+ defer cancel()
- var s *proto.Stats
- require.Eventuallyf(t, func() bool {
- var ok bool
- s, ok = <-stats
- return ok && s.ConnectionCount > 0 && s.RxBytes > 0 && s.TxBytes > 0 && s.SessionCountSsh == 1
- }, testutil.WaitLong, testutil.IntervalFast,
- "never saw stats: %+v", s,
- )
- _ = stdin.Close()
- err = session.Wait()
- require.NoError(t, err)
+ //nolint:dogsled
+ conn, _, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
+
+ sshClient, err := conn.SSHClientOnPort(ctx, port)
+ require.NoError(t, err)
+ defer sshClient.Close()
+ session, err := sshClient.NewSession()
+ require.NoError(t, err)
+ defer session.Close()
+ stdin, err := session.StdinPipe()
+ require.NoError(t, err)
+ err = session.Shell()
+ require.NoError(t, err)
+
+ var s *proto.Stats
+ require.Eventuallyf(t, func() bool {
+ var ok bool
+ s, ok = <-stats
+ return ok && s.ConnectionCount > 0 && s.RxBytes > 0 && s.TxBytes > 0 && s.SessionCountSsh == 1
+ }, testutil.WaitLong, testutil.IntervalFast,
+ "never saw stats: %+v", s,
+ )
+ _ = stdin.Close()
+ err = session.Wait()
+ require.NoError(t, err)
+ })
+ }
}
func TestAgent_Stats_ReconnectingPTY(t *testing.T) {
@@ -278,15 +288,23 @@ func TestAgent_Stats_Magic(t *testing.T) {
func TestAgent_SessionExec(t *testing.T) {
t.Parallel()
- session := setupSSHSession(t, agentsdk.Manifest{}, codersdk.ServiceBannerConfig{}, nil)
- command := "echo test"
- if runtime.GOOS == "windows" {
- command = "cmd.exe /c echo test"
+ for _, port := range sshPorts {
+ port := port
+ t.Run(fmt.Sprintf("(:%d)", port), func(t *testing.T) {
+ t.Parallel()
+
+ session := setupSSHSessionOnPort(t, agentsdk.Manifest{}, codersdk.ServiceBannerConfig{}, nil, port)
+
+ command := "echo test"
+ if runtime.GOOS == "windows" {
+ command = "cmd.exe /c echo test"
+ }
+ output, err := session.Output(command)
+ require.NoError(t, err)
+ require.Equal(t, "test", strings.TrimSpace(string(output)))
+ })
}
- output, err := session.Output(command)
- require.NoError(t, err)
- require.Equal(t, "test", strings.TrimSpace(string(output)))
}
//nolint:tparallel // Sub tests need to run sequentially.
@@ -396,25 +414,33 @@ func TestAgent_SessionTTYShell(t *testing.T) {
// it seems like it could be either.
t.Skip("ConPTY appears to be inconsistent on Windows.")
}
- session := setupSSHSession(t, agentsdk.Manifest{}, codersdk.ServiceBannerConfig{}, nil)
- command := "sh"
- if runtime.GOOS == "windows" {
- command = "cmd.exe"
+
+ for _, port := range sshPorts {
+ port := port
+ t.Run(fmt.Sprintf("(%d)", port), func(t *testing.T) {
+ t.Parallel()
+
+ session := setupSSHSessionOnPort(t, agentsdk.Manifest{}, codersdk.ServiceBannerConfig{}, nil, port)
+ command := "sh"
+ if runtime.GOOS == "windows" {
+ command = "cmd.exe"
+ }
+ err := session.RequestPty("xterm", 128, 128, ssh.TerminalModes{})
+ require.NoError(t, err)
+ ptty := ptytest.New(t)
+ session.Stdout = ptty.Output()
+ session.Stderr = ptty.Output()
+ session.Stdin = ptty.Input()
+ err = session.Start(command)
+ require.NoError(t, err)
+ _ = ptty.Peek(ctx, 1) // wait for the prompt
+ ptty.WriteLine("echo test")
+ ptty.ExpectMatch("test")
+ ptty.WriteLine("exit")
+ err = session.Wait()
+ require.NoError(t, err)
+ })
}
- err := session.RequestPty("xterm", 128, 128, ssh.TerminalModes{})
- require.NoError(t, err)
- ptty := ptytest.New(t)
- session.Stdout = ptty.Output()
- session.Stderr = ptty.Output()
- session.Stdin = ptty.Input()
- err = session.Start(command)
- require.NoError(t, err)
- _ = ptty.Peek(ctx, 1) // wait for the prompt
- ptty.WriteLine("echo test")
- ptty.ExpectMatch("test")
- ptty.WriteLine("exit")
- err = session.Wait()
- require.NoError(t, err)
}
func TestAgent_SessionTTYExitCode(t *testing.T) {
@@ -608,37 +634,41 @@ func TestAgent_Session_TTY_MOTD_Update(t *testing.T) {
//nolint:dogsled // Allow the blank identifiers.
conn, client, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, setSBInterval)
- sshClient, err := conn.SSHClient(ctx)
- require.NoError(t, err)
- t.Cleanup(func() {
- _ = sshClient.Close()
- })
-
//nolint:paralleltest // These tests need to swap the banner func.
- for i, test := range tests {
- test := test
- t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
- // Set new banner func and wait for the agent to call it to update the
- // banner.
- ready := make(chan struct{}, 2)
- client.SetAnnouncementBannersFunc(func() ([]codersdk.BannerConfig, error) {
- select {
- case ready <- struct{}{}:
- default:
- }
- return []codersdk.BannerConfig{test.banner}, nil
- })
- <-ready
- <-ready // Wait for two updates to ensure the value has propagated.
-
- session, err := sshClient.NewSession()
- require.NoError(t, err)
- t.Cleanup(func() {
- _ = session.Close()
- })
+ for _, port := range sshPorts {
+ port := port
- testSessionOutput(t, session, test.expected, test.unexpected, nil)
+ sshClient, err := conn.SSHClientOnPort(ctx, port)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ _ = sshClient.Close()
})
+
+ for i, test := range tests {
+ test := test
+ t.Run(fmt.Sprintf("(:%d)/%d", port, i), func(t *testing.T) {
+ // Set new banner func and wait for the agent to call it to update the
+ // banner.
+ ready := make(chan struct{}, 2)
+ client.SetAnnouncementBannersFunc(func() ([]codersdk.BannerConfig, error) {
+ select {
+ case ready <- struct{}{}:
+ default:
+ }
+ return []codersdk.BannerConfig{test.banner}, nil
+ })
+ <-ready
+ <-ready // Wait for two updates to ensure the value has propagated.
+
+ session, err := sshClient.NewSession()
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ _ = session.Close()
+ })
+
+ testSessionOutput(t, session, test.expected, test.unexpected, nil)
+ })
+ }
}
}
@@ -2424,6 +2454,17 @@ func setupSSHSession(
banner codersdk.BannerConfig,
prepareFS func(fs afero.Fs),
opts ...func(*agenttest.Client, *agent.Options),
+) *ssh.Session {
+ return setupSSHSessionOnPort(t, manifest, banner, prepareFS, workspacesdk.AgentSSHPort, opts...)
+}
+
+func setupSSHSessionOnPort(
+ t *testing.T,
+ manifest agentsdk.Manifest,
+ banner codersdk.BannerConfig,
+ prepareFS func(fs afero.Fs),
+ port uint16,
+ opts ...func(*agenttest.Client, *agent.Options),
) *ssh.Session {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
@@ -2437,7 +2478,7 @@ func setupSSHSession(
if prepareFS != nil {
prepareFS(fs)
}
- sshClient, err := conn.SSHClient(ctx)
+ sshClient, err := conn.SSHClientOnPort(ctx, port)
require.NoError(t, err)
t.Cleanup(func() {
_ = sshClient.Close()
diff --git a/agent/usershell/usershell_darwin.go b/agent/usershell/usershell_darwin.go
index 5f221bc43ed39..acc990db83383 100644
--- a/agent/usershell/usershell_darwin.go
+++ b/agent/usershell/usershell_darwin.go
@@ -18,7 +18,7 @@ func Get(username string) (string, error) {
return "", xerrors.Errorf("username is nonlocal path: %s", username)
}
//nolint: gosec // input checked above
- out, _ := exec.Command("dscl", ".", "-read", filepath.Join("/Users", username), "UserShell").Output()
+ out, _ := exec.Command("dscl", ".", "-read", filepath.Join("/Users", username), "UserShell").Output() //nolint:gocritic
s, ok := strings.CutPrefix(string(out), "UserShell: ")
if ok {
return strings.TrimSpace(s), nil
diff --git a/codersdk/workspacesdk/agentconn.go b/codersdk/workspacesdk/agentconn.go
index 6fa06c0ab5bd6..ef0c292e010e9 100644
--- a/codersdk/workspacesdk/agentconn.go
+++ b/codersdk/workspacesdk/agentconn.go
@@ -165,6 +165,12 @@ func (c *AgentConn) ReconnectingPTY(ctx context.Context, id uuid.UUID, height, w
// SSH pipes the SSH protocol over the returned net.Conn.
// This connects to the built-in SSH server in the workspace agent.
func (c *AgentConn) SSH(ctx context.Context) (*gonet.TCPConn, error) {
+ return c.SSHOnPort(ctx, AgentSSHPort)
+}
+
+// SSHOnPort pipes the SSH protocol over the returned net.Conn.
+// This connects to the built-in SSH server in the workspace agent on the specified port.
+func (c *AgentConn) SSHOnPort(ctx context.Context, port uint16) (*gonet.TCPConn, error) {
ctx, span := tracing.StartSpan(ctx)
defer span.End()
@@ -172,17 +178,23 @@ func (c *AgentConn) SSH(ctx context.Context) (*gonet.TCPConn, error) {
return nil, xerrors.Errorf("workspace agent not reachable in time: %v", ctx.Err())
}
- c.Conn.SendConnectedTelemetry(c.agentAddress(), tailnet.TelemetryApplicationSSH)
- return c.Conn.DialContextTCP(ctx, netip.AddrPortFrom(c.agentAddress(), AgentSSHPort))
+ c.SendConnectedTelemetry(c.agentAddress(), tailnet.TelemetryApplicationSSH)
+ return c.DialContextTCP(ctx, netip.AddrPortFrom(c.agentAddress(), port))
}
// SSHClient calls SSH to create a client that uses a weak cipher
// to improve throughput.
func (c *AgentConn) SSHClient(ctx context.Context) (*ssh.Client, error) {
+ return c.SSHClientOnPort(ctx, AgentSSHPort)
+}
+
+// SSHClientOnPort calls SSH to create a client on a specific port
+// that uses a weak cipher to improve throughput.
+func (c *AgentConn) SSHClientOnPort(ctx context.Context, port uint16) (*ssh.Client, error) {
ctx, span := tracing.StartSpan(ctx)
defer span.End()
- netConn, err := c.SSH(ctx)
+ netConn, err := c.SSHOnPort(ctx, port)
if err != nil {
return nil, xerrors.Errorf("ssh: %w", err)
}
diff --git a/codersdk/workspacesdk/workspacesdk.go b/codersdk/workspacesdk/workspacesdk.go
index 9f50622635568..08aabe9d5f699 100644
--- a/codersdk/workspacesdk/workspacesdk.go
+++ b/codersdk/workspacesdk/workspacesdk.go
@@ -31,6 +31,7 @@ var ErrSkipClose = xerrors.New("skip tailnet close")
const (
AgentSSHPort = tailnet.WorkspaceAgentSSHPort
+ AgentStandardSSHPort = tailnet.WorkspaceAgentStandardSSHPort
AgentReconnectingPTYPort = tailnet.WorkspaceAgentReconnectingPTYPort
AgentSpeedtestPort = tailnet.WorkspaceAgentSpeedtestPort
// AgentHTTPAPIServerPort serves a HTTP server with endpoints for e.g.
diff --git a/tailnet/conn.go b/tailnet/conn.go
index 6487dff4e8550..8f7f8ef7287a2 100644
--- a/tailnet/conn.go
+++ b/tailnet/conn.go
@@ -52,6 +52,7 @@ const (
WorkspaceAgentSSHPort = 1
WorkspaceAgentReconnectingPTYPort = 2
WorkspaceAgentSpeedtestPort = 3
+ WorkspaceAgentStandardSSHPort = 22
)
// EnvMagicsockDebugLogging enables super-verbose logging for the magicsock
@@ -745,7 +746,7 @@ func (c *Conn) forwardTCP(src, dst netip.AddrPort) (handler func(net.Conn), opts
return nil, nil, false
}
// See: https://github.com/tailscale/tailscale/blob/c7cea825aea39a00aca71ea02bab7266afc03e7c/wgengine/netstack/netstack.go#L888
- if dst.Port() == WorkspaceAgentSSHPort || dst.Port() == 22 {
+ if dst.Port() == WorkspaceAgentSSHPort || dst.Port() == WorkspaceAgentStandardSSHPort {
opt := tcpip.KeepaliveIdleOption(72 * time.Hour)
opts = append(opts, &opt)
}
From c074f77a4f75704d872afcee0e99a12efc924e35 Mon Sep 17 00:00:00 2001
From: Vincent Vielle
Date: Mon, 3 Mar 2025 10:12:48 +0100
Subject: [PATCH 37/44] feat: add notifications inbox db (#16599)
This PR is linked [to the following
issue](https://github.com/coder/internal/issues/334).
The objective is to create the DB layer and migration for the new `Coder
Inbox`.
---
coderd/apidoc/docs.go | 2 +
coderd/apidoc/swagger.json | 2 +
coderd/database/dbauthz/dbauthz.go | 33 +++
coderd/database/dbauthz/dbauthz_test.go | 135 ++++++++++
coderd/database/dbgen/dbgen.go | 16 ++
coderd/database/dbmem/dbmem.go | 130 ++++++++++
coderd/database/dbmetrics/querymetrics.go | 42 ++++
coderd/database/dbmock/dbmock.go | 89 +++++++
coderd/database/dump.sql | 32 +++
coderd/database/foreign_key_constraint.go | 2 +
.../000297_notifications_inbox.down.sql | 3 +
.../000297_notifications_inbox.up.sql | 17 ++
.../000297_notifications_inbox.up.sql | 25 ++
coderd/database/modelmethods.go | 6 +
coderd/database/models.go | 74 ++++++
coderd/database/querier.go | 18 ++
coderd/database/queries.sql.go | 237 ++++++++++++++++++
.../database/queries/notificationsinbox.sql | 59 +++++
coderd/database/unique_constraint.go | 1 +
coderd/rbac/object_gen.go | 10 +
coderd/rbac/policy/policy.go | 7 +
coderd/rbac/roles_test.go | 11 +
codersdk/rbacresources_gen.go | 2 +
docs/reference/api/members.md | 5 +
docs/reference/api/schemas.md | 1 +
site/src/api/rbacresourcesGenerated.ts | 5 +
site/src/api/typesGenerated.ts | 2 +
27 files changed, 966 insertions(+)
create mode 100644 coderd/database/migrations/000297_notifications_inbox.down.sql
create mode 100644 coderd/database/migrations/000297_notifications_inbox.up.sql
create mode 100644 coderd/database/migrations/testdata/fixtures/000297_notifications_inbox.up.sql
create mode 100644 coderd/database/queries/notificationsinbox.sql
diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go
index 125cf4faa5ba1..2612083ba74dc 100644
--- a/coderd/apidoc/docs.go
+++ b/coderd/apidoc/docs.go
@@ -13740,6 +13740,7 @@ const docTemplate = `{
"group",
"group_member",
"idpsync_settings",
+ "inbox_notification",
"license",
"notification_message",
"notification_preference",
@@ -13775,6 +13776,7 @@ const docTemplate = `{
"ResourceGroup",
"ResourceGroupMember",
"ResourceIdpsyncSettings",
+ "ResourceInboxNotification",
"ResourceLicense",
"ResourceNotificationMessage",
"ResourceNotificationPreference",
diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json
index 104d6fd70e077..27fea243afdd9 100644
--- a/coderd/apidoc/swagger.json
+++ b/coderd/apidoc/swagger.json
@@ -12429,6 +12429,7 @@
"group",
"group_member",
"idpsync_settings",
+ "inbox_notification",
"license",
"notification_message",
"notification_preference",
@@ -12464,6 +12465,7 @@
"ResourceGroup",
"ResourceGroupMember",
"ResourceIdpsyncSettings",
+ "ResourceInboxNotification",
"ResourceLicense",
"ResourceNotificationMessage",
"ResourceNotificationPreference",
diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go
index 877727069ab76..a39ba8d4172f0 100644
--- a/coderd/database/dbauthz/dbauthz.go
+++ b/coderd/database/dbauthz/dbauthz.go
@@ -281,6 +281,7 @@ var (
DisplayName: "Notifier",
Site: rbac.Permissions(map[string][]policy.Action{
rbac.ResourceNotificationMessage.Type: {policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete},
+ rbac.ResourceInboxNotification.Type: {policy.ActionCreate},
}),
Org: map[string][]rbac.Permission{},
User: []rbac.Permission{},
@@ -1126,6 +1127,14 @@ func (q *querier) CleanTailnetTunnels(ctx context.Context) error {
return q.db.CleanTailnetTunnels(ctx)
}
+func (q *querier) CountUnreadInboxNotificationsByUserID(ctx context.Context, userID uuid.UUID) (int64, error) {
+ if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceInboxNotification.WithOwner(userID.String())); err != nil {
+ return 0, err
+ }
+ return q.db.CountUnreadInboxNotificationsByUserID(ctx, userID)
+}
+
+// TODO: Handle org scoped lookups
func (q *querier) CustomRoles(ctx context.Context, arg database.CustomRolesParams) ([]database.CustomRole, error) {
roleObject := rbac.ResourceAssignRole
if arg.OrganizationID != uuid.Nil {
@@ -1689,6 +1698,10 @@ func (q *querier) GetFileTemplates(ctx context.Context, fileID uuid.UUID) ([]dat
return q.db.GetFileTemplates(ctx, fileID)
}
+func (q *querier) GetFilteredInboxNotificationsByUserID(ctx context.Context, arg database.GetFilteredInboxNotificationsByUserIDParams) ([]database.InboxNotification, error) {
+ return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetFilteredInboxNotificationsByUserID)(ctx, arg)
+}
+
func (q *querier) GetGitSSHKey(ctx context.Context, userID uuid.UUID) (database.GitSSHKey, error) {
return fetchWithAction(q.log, q.auth, policy.ActionReadPersonal, q.db.GetGitSSHKey)(ctx, userID)
}
@@ -1748,6 +1761,14 @@ func (q *querier) GetHungProvisionerJobs(ctx context.Context, hungSince time.Tim
return q.db.GetHungProvisionerJobs(ctx, hungSince)
}
+func (q *querier) GetInboxNotificationByID(ctx context.Context, id uuid.UUID) (database.InboxNotification, error) {
+ return fetchWithAction(q.log, q.auth, policy.ActionRead, q.db.GetInboxNotificationByID)(ctx, id)
+}
+
+func (q *querier) GetInboxNotificationsByUserID(ctx context.Context, userID database.GetInboxNotificationsByUserIDParams) ([]database.InboxNotification, error) {
+ return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetInboxNotificationsByUserID)(ctx, userID)
+}
+
func (q *querier) GetJFrogXrayScanByWorkspaceAndAgentID(ctx context.Context, arg database.GetJFrogXrayScanByWorkspaceAndAgentIDParams) (database.JfrogXrayScan, error) {
if _, err := fetch(q.log, q.auth, q.db.GetWorkspaceByID)(ctx, arg.WorkspaceID); err != nil {
return database.JfrogXrayScan{}, err
@@ -3079,6 +3100,10 @@ func (q *querier) InsertGroupMember(ctx context.Context, arg database.InsertGrou
return update(q.log, q.auth, fetch, q.db.InsertGroupMember)(ctx, arg)
}
+func (q *querier) InsertInboxNotification(ctx context.Context, arg database.InsertInboxNotificationParams) (database.InboxNotification, error) {
+ return insert(q.log, q.auth, rbac.ResourceInboxNotification.WithOwner(arg.UserID.String()), q.db.InsertInboxNotification)(ctx, arg)
+}
+
func (q *querier) InsertLicense(ctx context.Context, arg database.InsertLicenseParams) (database.License, error) {
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceLicense); err != nil {
return database.License{}, err
@@ -3666,6 +3691,14 @@ func (q *querier) UpdateInactiveUsersToDormant(ctx context.Context, lastSeenAfte
return q.db.UpdateInactiveUsersToDormant(ctx, lastSeenAfter)
}
+func (q *querier) UpdateInboxNotificationReadStatus(ctx context.Context, args database.UpdateInboxNotificationReadStatusParams) error {
+ fetchFunc := func(ctx context.Context, args database.UpdateInboxNotificationReadStatusParams) (database.InboxNotification, error) {
+ return q.db.GetInboxNotificationByID(ctx, args.ID)
+ }
+
+ return update(q.log, q.auth, fetchFunc, q.db.UpdateInboxNotificationReadStatus)(ctx, args)
+}
+
func (q *querier) UpdateMemberRoles(ctx context.Context, arg database.UpdateMemberRolesParams) (database.OrganizationMember, error) {
// Authorized fetch will check that the actor has read access to the org member since the org member is returned.
member, err := database.ExpectOne(q.OrganizationMembers(ctx, database.OrganizationMembersParams{
diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go
index 1f2ae5eca62c4..12d6d8804e3e4 100644
--- a/coderd/database/dbauthz/dbauthz_test.go
+++ b/coderd/database/dbauthz/dbauthz_test.go
@@ -4466,6 +4466,141 @@ func (s *MethodTestSuite) TestNotifications() {
Disableds: []bool{true, false},
}).Asserts(rbac.ResourceNotificationPreference.WithOwner(user.ID.String()), policy.ActionUpdate)
}))
+
+ s.Run("GetInboxNotificationsByUserID", s.Subtest(func(db database.Store, check *expects) {
+ u := dbgen.User(s.T(), db, database.User{})
+
+ notifID := uuid.New()
+
+ notif := dbgen.NotificationInbox(s.T(), db, database.InsertInboxNotificationParams{
+ ID: notifID,
+ UserID: u.ID,
+ TemplateID: notifications.TemplateWorkspaceAutoUpdated,
+ Title: "test title",
+ Content: "test content notification",
+ Icon: "https://coder.com/favicon.ico",
+ Actions: json.RawMessage("{}"),
+ })
+
+ check.Args(database.GetInboxNotificationsByUserIDParams{
+ UserID: u.ID,
+ ReadStatus: database.InboxNotificationReadStatusAll,
+ }).Asserts(rbac.ResourceInboxNotification.WithID(notifID).WithOwner(u.ID.String()), policy.ActionRead).Returns([]database.InboxNotification{notif})
+ }))
+
+ s.Run("GetFilteredInboxNotificationsByUserID", s.Subtest(func(db database.Store, check *expects) {
+ u := dbgen.User(s.T(), db, database.User{})
+
+ notifID := uuid.New()
+
+ targets := []uuid.UUID{u.ID, notifications.TemplateWorkspaceAutoUpdated}
+
+ notif := dbgen.NotificationInbox(s.T(), db, database.InsertInboxNotificationParams{
+ ID: notifID,
+ UserID: u.ID,
+ TemplateID: notifications.TemplateWorkspaceAutoUpdated,
+ Targets: targets,
+ Title: "test title",
+ Content: "test content notification",
+ Icon: "https://coder.com/favicon.ico",
+ Actions: json.RawMessage("{}"),
+ })
+
+ check.Args(database.GetFilteredInboxNotificationsByUserIDParams{
+ UserID: u.ID,
+ Templates: []uuid.UUID{notifications.TemplateWorkspaceAutoUpdated},
+ Targets: []uuid.UUID{u.ID},
+ ReadStatus: database.InboxNotificationReadStatusAll,
+ }).Asserts(rbac.ResourceInboxNotification.WithID(notifID).WithOwner(u.ID.String()), policy.ActionRead).Returns([]database.InboxNotification{notif})
+ }))
+
+ s.Run("GetInboxNotificationByID", s.Subtest(func(db database.Store, check *expects) {
+ u := dbgen.User(s.T(), db, database.User{})
+
+ notifID := uuid.New()
+
+ targets := []uuid.UUID{u.ID, notifications.TemplateWorkspaceAutoUpdated}
+
+ notif := dbgen.NotificationInbox(s.T(), db, database.InsertInboxNotificationParams{
+ ID: notifID,
+ UserID: u.ID,
+ TemplateID: notifications.TemplateWorkspaceAutoUpdated,
+ Targets: targets,
+ Title: "test title",
+ Content: "test content notification",
+ Icon: "https://coder.com/favicon.ico",
+ Actions: json.RawMessage("{}"),
+ })
+
+ check.Args(notifID).Asserts(rbac.ResourceInboxNotification.WithID(notifID).WithOwner(u.ID.String()), policy.ActionRead).Returns(notif)
+ }))
+
+ s.Run("CountUnreadInboxNotificationsByUserID", s.Subtest(func(db database.Store, check *expects) {
+ u := dbgen.User(s.T(), db, database.User{})
+
+ notifID := uuid.New()
+
+ targets := []uuid.UUID{u.ID, notifications.TemplateWorkspaceAutoUpdated}
+
+ _ = dbgen.NotificationInbox(s.T(), db, database.InsertInboxNotificationParams{
+ ID: notifID,
+ UserID: u.ID,
+ TemplateID: notifications.TemplateWorkspaceAutoUpdated,
+ Targets: targets,
+ Title: "test title",
+ Content: "test content notification",
+ Icon: "https://coder.com/favicon.ico",
+ Actions: json.RawMessage("{}"),
+ })
+
+ check.Args(u.ID).Asserts(rbac.ResourceInboxNotification.WithOwner(u.ID.String()), policy.ActionRead).Returns(int64(1))
+ }))
+
+ s.Run("InsertInboxNotification", s.Subtest(func(db database.Store, check *expects) {
+ u := dbgen.User(s.T(), db, database.User{})
+
+ notifID := uuid.New()
+
+ targets := []uuid.UUID{u.ID, notifications.TemplateWorkspaceAutoUpdated}
+
+ check.Args(database.InsertInboxNotificationParams{
+ ID: notifID,
+ UserID: u.ID,
+ TemplateID: notifications.TemplateWorkspaceAutoUpdated,
+ Targets: targets,
+ Title: "test title",
+ Content: "test content notification",
+ Icon: "https://coder.com/favicon.ico",
+ Actions: json.RawMessage("{}"),
+ }).Asserts(rbac.ResourceInboxNotification.WithOwner(u.ID.String()), policy.ActionCreate)
+ }))
+
+ s.Run("UpdateInboxNotificationReadStatus", s.Subtest(func(db database.Store, check *expects) {
+ u := dbgen.User(s.T(), db, database.User{})
+
+ notifID := uuid.New()
+
+ targets := []uuid.UUID{u.ID, notifications.TemplateWorkspaceAutoUpdated}
+ readAt := dbtestutil.NowInDefaultTimezone()
+
+ notif := dbgen.NotificationInbox(s.T(), db, database.InsertInboxNotificationParams{
+ ID: notifID,
+ UserID: u.ID,
+ TemplateID: notifications.TemplateWorkspaceAutoUpdated,
+ Targets: targets,
+ Title: "test title",
+ Content: "test content notification",
+ Icon: "https://coder.com/favicon.ico",
+ Actions: json.RawMessage("{}"),
+ })
+
+ notif.ReadAt = sql.NullTime{Time: readAt, Valid: true}
+
+ check.Args(database.UpdateInboxNotificationReadStatusParams{
+ ID: notifID,
+ ReadAt: sql.NullTime{Time: readAt, Valid: true},
+ }).Asserts(rbac.ResourceInboxNotification.WithID(notifID).WithOwner(u.ID.String()), policy.ActionUpdate)
+ }))
}
func (s *MethodTestSuite) TestOAuth2ProviderApps() {
diff --git a/coderd/database/dbgen/dbgen.go b/coderd/database/dbgen/dbgen.go
index 9c4ebbe8bb8ca..3810fcb5052cf 100644
--- a/coderd/database/dbgen/dbgen.go
+++ b/coderd/database/dbgen/dbgen.go
@@ -450,6 +450,22 @@ func OrganizationMember(t testing.TB, db database.Store, orig database.Organizat
return mem
}
+func NotificationInbox(t testing.TB, db database.Store, orig database.InsertInboxNotificationParams) database.InboxNotification {
+ notification, err := db.InsertInboxNotification(genCtx, database.InsertInboxNotificationParams{
+ ID: takeFirst(orig.ID, uuid.New()),
+ UserID: takeFirst(orig.UserID, uuid.New()),
+ TemplateID: takeFirst(orig.TemplateID, uuid.New()),
+ Targets: takeFirstSlice(orig.Targets, []uuid.UUID{}),
+ Title: takeFirst(orig.Title, testutil.GetRandomName(t)),
+ Content: takeFirst(orig.Content, testutil.GetRandomName(t)),
+ Icon: takeFirst(orig.Icon, ""),
+ Actions: orig.Actions,
+ CreatedAt: takeFirst(orig.CreatedAt, dbtime.Now()),
+ })
+ require.NoError(t, err, "insert notification")
+ return notification
+}
+
func Group(t testing.TB, db database.Store, orig database.Group) database.Group {
t.Helper()
diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go
index 6fbafa562d087..65d24bb3434c2 100644
--- a/coderd/database/dbmem/dbmem.go
+++ b/coderd/database/dbmem/dbmem.go
@@ -67,6 +67,7 @@ func New() database.Store {
gitSSHKey: make([]database.GitSSHKey, 0),
notificationMessages: make([]database.NotificationMessage, 0),
notificationPreferences: make([]database.NotificationPreference, 0),
+ InboxNotification: make([]database.InboxNotification, 0),
parameterSchemas: make([]database.ParameterSchema, 0),
provisionerDaemons: make([]database.ProvisionerDaemon, 0),
provisionerKeys: make([]database.ProvisionerKey, 0),
@@ -206,6 +207,7 @@ type data struct {
notificationMessages []database.NotificationMessage
notificationPreferences []database.NotificationPreference
notificationReportGeneratorLogs []database.NotificationReportGeneratorLog
+ InboxNotification []database.InboxNotification
oauth2ProviderApps []database.OAuth2ProviderApp
oauth2ProviderAppSecrets []database.OAuth2ProviderAppSecret
oauth2ProviderAppCodes []database.OAuth2ProviderAppCode
@@ -1606,6 +1608,26 @@ func (*FakeQuerier) CleanTailnetTunnels(context.Context) error {
return ErrUnimplemented
}
+func (q *FakeQuerier) CountUnreadInboxNotificationsByUserID(_ context.Context, userID uuid.UUID) (int64, error) {
+ q.mutex.RLock()
+ defer q.mutex.RUnlock()
+
+ var count int64
+ for _, notification := range q.InboxNotification {
+ if notification.UserID != userID {
+ continue
+ }
+
+ if notification.ReadAt.Valid {
+ continue
+ }
+
+ count++
+ }
+
+ return count, nil
+}
+
func (q *FakeQuerier) CustomRoles(_ context.Context, arg database.CustomRolesParams) ([]database.CustomRole, error) {
q.mutex.Lock()
defer q.mutex.Unlock()
@@ -3130,6 +3152,45 @@ func (q *FakeQuerier) GetFileTemplates(_ context.Context, id uuid.UUID) ([]datab
return rows, nil
}
+func (q *FakeQuerier) GetFilteredInboxNotificationsByUserID(_ context.Context, arg database.GetFilteredInboxNotificationsByUserIDParams) ([]database.InboxNotification, error) {
+ q.mutex.RLock()
+ defer q.mutex.RUnlock()
+
+ notifications := make([]database.InboxNotification, 0)
+ for _, notification := range q.InboxNotification {
+ if notification.UserID == arg.UserID {
+ for _, template := range arg.Templates {
+ templateFound := false
+ if notification.TemplateID == template {
+ templateFound = true
+ }
+
+ if !templateFound {
+ continue
+ }
+ }
+
+ for _, target := range arg.Targets {
+ isFound := false
+ for _, insertedTarget := range notification.Targets {
+ if insertedTarget == target {
+ isFound = true
+ break
+ }
+ }
+
+ if !isFound {
+ continue
+ }
+
+ notifications = append(notifications, notification)
+ }
+ }
+ }
+
+ return notifications, nil
+}
+
func (q *FakeQuerier) GetGitSSHKey(_ context.Context, userID uuid.UUID) (database.GitSSHKey, error) {
q.mutex.RLock()
defer q.mutex.RUnlock()
@@ -3328,6 +3389,33 @@ func (q *FakeQuerier) GetHungProvisionerJobs(_ context.Context, hungSince time.T
return hungJobs, nil
}
+func (q *FakeQuerier) GetInboxNotificationByID(_ context.Context, id uuid.UUID) (database.InboxNotification, error) {
+ q.mutex.RLock()
+ defer q.mutex.RUnlock()
+
+ for _, notification := range q.InboxNotification {
+ if notification.ID == id {
+ return notification, nil
+ }
+ }
+
+ return database.InboxNotification{}, sql.ErrNoRows
+}
+
+func (q *FakeQuerier) GetInboxNotificationsByUserID(_ context.Context, params database.GetInboxNotificationsByUserIDParams) ([]database.InboxNotification, error) {
+ q.mutex.RLock()
+ defer q.mutex.RUnlock()
+
+ notifications := make([]database.InboxNotification, 0)
+ for _, notification := range q.InboxNotification {
+ if notification.UserID == params.UserID {
+ notifications = append(notifications, notification)
+ }
+ }
+
+ return notifications, nil
+}
+
func (q *FakeQuerier) GetJFrogXrayScanByWorkspaceAndAgentID(_ context.Context, arg database.GetJFrogXrayScanByWorkspaceAndAgentIDParams) (database.JfrogXrayScan, error) {
err := validateDatabaseType(arg)
if err != nil {
@@ -7965,6 +8053,30 @@ func (q *FakeQuerier) InsertGroupMember(_ context.Context, arg database.InsertGr
return nil
}
+func (q *FakeQuerier) InsertInboxNotification(_ context.Context, arg database.InsertInboxNotificationParams) (database.InboxNotification, error) {
+ if err := validateDatabaseType(arg); err != nil {
+ return database.InboxNotification{}, err
+ }
+
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+
+ notification := database.InboxNotification{
+ ID: arg.ID,
+ UserID: arg.UserID,
+ TemplateID: arg.TemplateID,
+ Targets: arg.Targets,
+ Title: arg.Title,
+ Content: arg.Content,
+ Icon: arg.Icon,
+ Actions: arg.Actions,
+ CreatedAt: time.Now(),
+ }
+
+ q.InboxNotification = append(q.InboxNotification, notification)
+ return notification, nil
+}
+
func (q *FakeQuerier) InsertLicense(
_ context.Context, arg database.InsertLicenseParams,
) (database.License, error) {
@@ -9679,6 +9791,24 @@ func (q *FakeQuerier) UpdateInactiveUsersToDormant(_ context.Context, params dat
return updated, nil
}
+func (q *FakeQuerier) UpdateInboxNotificationReadStatus(_ context.Context, arg database.UpdateInboxNotificationReadStatusParams) error {
+ err := validateDatabaseType(arg)
+ if err != nil {
+ return err
+ }
+
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+
+ for i := range q.InboxNotification {
+ if q.InboxNotification[i].ID == arg.ID {
+ q.InboxNotification[i].ReadAt = arg.ReadAt
+ }
+ }
+
+ return nil
+}
+
func (q *FakeQuerier) UpdateMemberRoles(_ context.Context, arg database.UpdateMemberRolesParams) (database.OrganizationMember, error) {
if err := validateDatabaseType(arg); err != nil {
return database.OrganizationMember{}, err
diff --git a/coderd/database/dbmetrics/querymetrics.go b/coderd/database/dbmetrics/querymetrics.go
index 31fbcced1b7f2..d05ec5f5acdf9 100644
--- a/coderd/database/dbmetrics/querymetrics.go
+++ b/coderd/database/dbmetrics/querymetrics.go
@@ -178,6 +178,13 @@ func (m queryMetricsStore) CleanTailnetTunnels(ctx context.Context) error {
return r0
}
+func (m queryMetricsStore) CountUnreadInboxNotificationsByUserID(ctx context.Context, userID uuid.UUID) (int64, error) {
+ start := time.Now()
+ r0, r1 := m.s.CountUnreadInboxNotificationsByUserID(ctx, userID)
+ m.queryLatencies.WithLabelValues("CountUnreadInboxNotificationsByUserID").Observe(time.Since(start).Seconds())
+ return r0, r1
+}
+
func (m queryMetricsStore) CustomRoles(ctx context.Context, arg database.CustomRolesParams) ([]database.CustomRole, error) {
start := time.Now()
r0, r1 := m.s.CustomRoles(ctx, arg)
@@ -710,6 +717,13 @@ func (m queryMetricsStore) GetFileTemplates(ctx context.Context, fileID uuid.UUI
return rows, err
}
+func (m queryMetricsStore) GetFilteredInboxNotificationsByUserID(ctx context.Context, arg database.GetFilteredInboxNotificationsByUserIDParams) ([]database.InboxNotification, error) {
+ start := time.Now()
+ r0, r1 := m.s.GetFilteredInboxNotificationsByUserID(ctx, arg)
+ m.queryLatencies.WithLabelValues("GetFilteredInboxNotificationsByUserID").Observe(time.Since(start).Seconds())
+ return r0, r1
+}
+
func (m queryMetricsStore) GetGitSSHKey(ctx context.Context, userID uuid.UUID) (database.GitSSHKey, error) {
start := time.Now()
key, err := m.s.GetGitSSHKey(ctx, userID)
@@ -773,6 +787,20 @@ func (m queryMetricsStore) GetHungProvisionerJobs(ctx context.Context, hungSince
return jobs, err
}
+func (m queryMetricsStore) GetInboxNotificationByID(ctx context.Context, id uuid.UUID) (database.InboxNotification, error) {
+ start := time.Now()
+ r0, r1 := m.s.GetInboxNotificationByID(ctx, id)
+ m.queryLatencies.WithLabelValues("GetInboxNotificationByID").Observe(time.Since(start).Seconds())
+ return r0, r1
+}
+
+func (m queryMetricsStore) GetInboxNotificationsByUserID(ctx context.Context, userID database.GetInboxNotificationsByUserIDParams) ([]database.InboxNotification, error) {
+ start := time.Now()
+ r0, r1 := m.s.GetInboxNotificationsByUserID(ctx, userID)
+ m.queryLatencies.WithLabelValues("GetInboxNotificationsByUserID").Observe(time.Since(start).Seconds())
+ return r0, r1
+}
+
func (m queryMetricsStore) GetJFrogXrayScanByWorkspaceAndAgentID(ctx context.Context, arg database.GetJFrogXrayScanByWorkspaceAndAgentIDParams) (database.JfrogXrayScan, error) {
start := time.Now()
r0, r1 := m.s.GetJFrogXrayScanByWorkspaceAndAgentID(ctx, arg)
@@ -1879,6 +1907,13 @@ func (m queryMetricsStore) InsertGroupMember(ctx context.Context, arg database.I
return err
}
+func (m queryMetricsStore) InsertInboxNotification(ctx context.Context, arg database.InsertInboxNotificationParams) (database.InboxNotification, error) {
+ start := time.Now()
+ r0, r1 := m.s.InsertInboxNotification(ctx, arg)
+ m.queryLatencies.WithLabelValues("InsertInboxNotification").Observe(time.Since(start).Seconds())
+ return r0, r1
+}
+
func (m queryMetricsStore) InsertLicense(ctx context.Context, arg database.InsertLicenseParams) (database.License, error) {
start := time.Now()
license, err := m.s.InsertLicense(ctx, arg)
@@ -2334,6 +2369,13 @@ func (m queryMetricsStore) UpdateInactiveUsersToDormant(ctx context.Context, las
return r0, r1
}
+func (m queryMetricsStore) UpdateInboxNotificationReadStatus(ctx context.Context, arg database.UpdateInboxNotificationReadStatusParams) error {
+ start := time.Now()
+ r0 := m.s.UpdateInboxNotificationReadStatus(ctx, arg)
+ m.queryLatencies.WithLabelValues("UpdateInboxNotificationReadStatus").Observe(time.Since(start).Seconds())
+ return r0
+}
+
func (m queryMetricsStore) UpdateMemberRoles(ctx context.Context, arg database.UpdateMemberRolesParams) (database.OrganizationMember, error) {
start := time.Now()
member, err := m.s.UpdateMemberRoles(ctx, arg)
diff --git a/coderd/database/dbmock/dbmock.go b/coderd/database/dbmock/dbmock.go
index f92bbf13246d7..39f148d90e20e 100644
--- a/coderd/database/dbmock/dbmock.go
+++ b/coderd/database/dbmock/dbmock.go
@@ -232,6 +232,21 @@ func (mr *MockStoreMockRecorder) CleanTailnetTunnels(ctx any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanTailnetTunnels", reflect.TypeOf((*MockStore)(nil).CleanTailnetTunnels), ctx)
}
+// CountUnreadInboxNotificationsByUserID mocks base method.
+func (m *MockStore) CountUnreadInboxNotificationsByUserID(ctx context.Context, userID uuid.UUID) (int64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CountUnreadInboxNotificationsByUserID", ctx, userID)
+ ret0, _ := ret[0].(int64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CountUnreadInboxNotificationsByUserID indicates an expected call of CountUnreadInboxNotificationsByUserID.
+func (mr *MockStoreMockRecorder) CountUnreadInboxNotificationsByUserID(ctx, userID any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountUnreadInboxNotificationsByUserID", reflect.TypeOf((*MockStore)(nil).CountUnreadInboxNotificationsByUserID), ctx, userID)
+}
+
// CustomRoles mocks base method.
func (m *MockStore) CustomRoles(ctx context.Context, arg database.CustomRolesParams) ([]database.CustomRole, error) {
m.ctrl.T.Helper()
@@ -1417,6 +1432,21 @@ func (mr *MockStoreMockRecorder) GetFileTemplates(ctx, fileID any) *gomock.Call
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileTemplates", reflect.TypeOf((*MockStore)(nil).GetFileTemplates), ctx, fileID)
}
+// GetFilteredInboxNotificationsByUserID mocks base method.
+func (m *MockStore) GetFilteredInboxNotificationsByUserID(ctx context.Context, arg database.GetFilteredInboxNotificationsByUserIDParams) ([]database.InboxNotification, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetFilteredInboxNotificationsByUserID", ctx, arg)
+ ret0, _ := ret[0].([]database.InboxNotification)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetFilteredInboxNotificationsByUserID indicates an expected call of GetFilteredInboxNotificationsByUserID.
+func (mr *MockStoreMockRecorder) GetFilteredInboxNotificationsByUserID(ctx, arg any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFilteredInboxNotificationsByUserID", reflect.TypeOf((*MockStore)(nil).GetFilteredInboxNotificationsByUserID), ctx, arg)
+}
+
// GetGitSSHKey mocks base method.
func (m *MockStore) GetGitSSHKey(ctx context.Context, userID uuid.UUID) (database.GitSSHKey, error) {
m.ctrl.T.Helper()
@@ -1552,6 +1582,36 @@ func (mr *MockStoreMockRecorder) GetHungProvisionerJobs(ctx, updatedAt any) *gom
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHungProvisionerJobs", reflect.TypeOf((*MockStore)(nil).GetHungProvisionerJobs), ctx, updatedAt)
}
+// GetInboxNotificationByID mocks base method.
+func (m *MockStore) GetInboxNotificationByID(ctx context.Context, id uuid.UUID) (database.InboxNotification, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetInboxNotificationByID", ctx, id)
+ ret0, _ := ret[0].(database.InboxNotification)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetInboxNotificationByID indicates an expected call of GetInboxNotificationByID.
+func (mr *MockStoreMockRecorder) GetInboxNotificationByID(ctx, id any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInboxNotificationByID", reflect.TypeOf((*MockStore)(nil).GetInboxNotificationByID), ctx, id)
+}
+
+// GetInboxNotificationsByUserID mocks base method.
+func (m *MockStore) GetInboxNotificationsByUserID(ctx context.Context, arg database.GetInboxNotificationsByUserIDParams) ([]database.InboxNotification, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetInboxNotificationsByUserID", ctx, arg)
+ ret0, _ := ret[0].([]database.InboxNotification)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetInboxNotificationsByUserID indicates an expected call of GetInboxNotificationsByUserID.
+func (mr *MockStoreMockRecorder) GetInboxNotificationsByUserID(ctx, arg any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInboxNotificationsByUserID", reflect.TypeOf((*MockStore)(nil).GetInboxNotificationsByUserID), ctx, arg)
+}
+
// GetJFrogXrayScanByWorkspaceAndAgentID mocks base method.
func (m *MockStore) GetJFrogXrayScanByWorkspaceAndAgentID(ctx context.Context, arg database.GetJFrogXrayScanByWorkspaceAndAgentIDParams) (database.JfrogXrayScan, error) {
m.ctrl.T.Helper()
@@ -3962,6 +4022,21 @@ func (mr *MockStoreMockRecorder) InsertGroupMember(ctx, arg any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertGroupMember", reflect.TypeOf((*MockStore)(nil).InsertGroupMember), ctx, arg)
}
+// InsertInboxNotification mocks base method.
+func (m *MockStore) InsertInboxNotification(ctx context.Context, arg database.InsertInboxNotificationParams) (database.InboxNotification, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InsertInboxNotification", ctx, arg)
+ ret0, _ := ret[0].(database.InboxNotification)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// InsertInboxNotification indicates an expected call of InsertInboxNotification.
+func (mr *MockStoreMockRecorder) InsertInboxNotification(ctx, arg any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertInboxNotification", reflect.TypeOf((*MockStore)(nil).InsertInboxNotification), ctx, arg)
+}
+
// InsertLicense mocks base method.
func (m *MockStore) InsertLicense(ctx context.Context, arg database.InsertLicenseParams) (database.License, error) {
m.ctrl.T.Helper()
@@ -4951,6 +5026,20 @@ func (mr *MockStoreMockRecorder) UpdateInactiveUsersToDormant(ctx, arg any) *gom
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInactiveUsersToDormant", reflect.TypeOf((*MockStore)(nil).UpdateInactiveUsersToDormant), ctx, arg)
}
+// UpdateInboxNotificationReadStatus mocks base method.
+func (m *MockStore) UpdateInboxNotificationReadStatus(ctx context.Context, arg database.UpdateInboxNotificationReadStatusParams) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateInboxNotificationReadStatus", ctx, arg)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// UpdateInboxNotificationReadStatus indicates an expected call of UpdateInboxNotificationReadStatus.
+func (mr *MockStoreMockRecorder) UpdateInboxNotificationReadStatus(ctx, arg any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInboxNotificationReadStatus", reflect.TypeOf((*MockStore)(nil).UpdateInboxNotificationReadStatus), ctx, arg)
+}
+
// UpdateMemberRoles mocks base method.
func (m *MockStore) UpdateMemberRoles(ctx context.Context, arg database.UpdateMemberRolesParams) (database.OrganizationMember, error) {
m.ctrl.T.Helper()
diff --git a/coderd/database/dump.sql b/coderd/database/dump.sql
index e05d3a06d31f5..c35a30ae2d866 100644
--- a/coderd/database/dump.sql
+++ b/coderd/database/dump.sql
@@ -66,6 +66,12 @@ CREATE TYPE group_source AS ENUM (
'oidc'
);
+CREATE TYPE inbox_notification_read_status AS ENUM (
+ 'all',
+ 'unread',
+ 'read'
+);
+
CREATE TYPE log_level AS ENUM (
'trace',
'debug',
@@ -899,6 +905,19 @@ CREATE VIEW group_members_expanded AS
COMMENT ON VIEW group_members_expanded IS 'Joins group members with user information, organization ID, group name. Includes both regular group members and organization members (as part of the "Everyone" group).';
+CREATE TABLE inbox_notifications (
+ id uuid NOT NULL,
+ user_id uuid NOT NULL,
+ template_id uuid NOT NULL,
+ targets uuid[],
+ title text NOT NULL,
+ content text NOT NULL,
+ icon text NOT NULL,
+ actions jsonb NOT NULL,
+ read_at timestamp with time zone,
+ created_at timestamp with time zone DEFAULT now() NOT NULL
+);
+
CREATE TABLE jfrog_xray_scans (
agent_id uuid NOT NULL,
workspace_id uuid NOT NULL,
@@ -2048,6 +2067,9 @@ ALTER TABLE ONLY groups
ALTER TABLE ONLY groups
ADD CONSTRAINT groups_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY inbox_notifications
+ ADD CONSTRAINT inbox_notifications_pkey PRIMARY KEY (id);
+
ALTER TABLE ONLY jfrog_xray_scans
ADD CONSTRAINT jfrog_xray_scans_pkey PRIMARY KEY (agent_id, workspace_id);
@@ -2278,6 +2300,10 @@ CREATE INDEX idx_custom_roles_id ON custom_roles USING btree (id);
CREATE UNIQUE INDEX idx_custom_roles_name_lower ON custom_roles USING btree (lower(name));
+CREATE INDEX idx_inbox_notifications_user_id_read_at ON inbox_notifications USING btree (user_id, read_at);
+
+CREATE INDEX idx_inbox_notifications_user_id_template_id_targets ON inbox_notifications USING btree (user_id, template_id, targets);
+
CREATE INDEX idx_notification_messages_status ON notification_messages USING btree (status);
CREATE INDEX idx_organization_member_organization_id_uuid ON organization_members USING btree (organization_id);
@@ -2474,6 +2500,12 @@ ALTER TABLE ONLY group_members
ALTER TABLE ONLY groups
ADD CONSTRAINT groups_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
+ALTER TABLE ONLY inbox_notifications
+ ADD CONSTRAINT inbox_notifications_template_id_fkey FOREIGN KEY (template_id) REFERENCES notification_templates(id) ON DELETE CASCADE;
+
+ALTER TABLE ONLY inbox_notifications
+ ADD CONSTRAINT inbox_notifications_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
+
ALTER TABLE ONLY jfrog_xray_scans
ADD CONSTRAINT jfrog_xray_scans_agent_id_fkey FOREIGN KEY (agent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE;
diff --git a/coderd/database/foreign_key_constraint.go b/coderd/database/foreign_key_constraint.go
index 66c379a749e01..525d240f25267 100644
--- a/coderd/database/foreign_key_constraint.go
+++ b/coderd/database/foreign_key_constraint.go
@@ -14,6 +14,8 @@ const (
ForeignKeyGroupMembersGroupID ForeignKeyConstraint = "group_members_group_id_fkey" // ALTER TABLE ONLY group_members ADD CONSTRAINT group_members_group_id_fkey FOREIGN KEY (group_id) REFERENCES groups(id) ON DELETE CASCADE;
ForeignKeyGroupMembersUserID ForeignKeyConstraint = "group_members_user_id_fkey" // ALTER TABLE ONLY group_members ADD CONSTRAINT group_members_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
ForeignKeyGroupsOrganizationID ForeignKeyConstraint = "groups_organization_id_fkey" // ALTER TABLE ONLY groups ADD CONSTRAINT groups_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
+ ForeignKeyInboxNotificationsTemplateID ForeignKeyConstraint = "inbox_notifications_template_id_fkey" // ALTER TABLE ONLY inbox_notifications ADD CONSTRAINT inbox_notifications_template_id_fkey FOREIGN KEY (template_id) REFERENCES notification_templates(id) ON DELETE CASCADE;
+ ForeignKeyInboxNotificationsUserID ForeignKeyConstraint = "inbox_notifications_user_id_fkey" // ALTER TABLE ONLY inbox_notifications ADD CONSTRAINT inbox_notifications_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
ForeignKeyJfrogXrayScansAgentID ForeignKeyConstraint = "jfrog_xray_scans_agent_id_fkey" // ALTER TABLE ONLY jfrog_xray_scans ADD CONSTRAINT jfrog_xray_scans_agent_id_fkey FOREIGN KEY (agent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE;
ForeignKeyJfrogXrayScansWorkspaceID ForeignKeyConstraint = "jfrog_xray_scans_workspace_id_fkey" // ALTER TABLE ONLY jfrog_xray_scans ADD CONSTRAINT jfrog_xray_scans_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE;
ForeignKeyNotificationMessagesNotificationTemplateID ForeignKeyConstraint = "notification_messages_notification_template_id_fkey" // ALTER TABLE ONLY notification_messages ADD CONSTRAINT notification_messages_notification_template_id_fkey FOREIGN KEY (notification_template_id) REFERENCES notification_templates(id) ON DELETE CASCADE;
diff --git a/coderd/database/migrations/000297_notifications_inbox.down.sql b/coderd/database/migrations/000297_notifications_inbox.down.sql
new file mode 100644
index 0000000000000..9d39b226c8a2c
--- /dev/null
+++ b/coderd/database/migrations/000297_notifications_inbox.down.sql
@@ -0,0 +1,3 @@
+DROP TABLE IF EXISTS inbox_notifications;
+
+DROP TYPE IF EXISTS inbox_notification_read_status;
diff --git a/coderd/database/migrations/000297_notifications_inbox.up.sql b/coderd/database/migrations/000297_notifications_inbox.up.sql
new file mode 100644
index 0000000000000..c3754c53674df
--- /dev/null
+++ b/coderd/database/migrations/000297_notifications_inbox.up.sql
@@ -0,0 +1,17 @@
+CREATE TYPE inbox_notification_read_status AS ENUM ('all', 'unread', 'read');
+
+CREATE TABLE inbox_notifications (
+ id UUID PRIMARY KEY,
+ user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ template_id UUID NOT NULL REFERENCES notification_templates(id) ON DELETE CASCADE,
+ targets UUID[],
+ title TEXT NOT NULL,
+ content TEXT NOT NULL,
+ icon TEXT NOT NULL,
+ actions JSONB NOT NULL,
+ read_at TIMESTAMP WITH TIME ZONE,
+ created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
+);
+
+CREATE INDEX idx_inbox_notifications_user_id_read_at ON inbox_notifications(user_id, read_at);
+CREATE INDEX idx_inbox_notifications_user_id_template_id_targets ON inbox_notifications(user_id, template_id, targets);
diff --git a/coderd/database/migrations/testdata/fixtures/000297_notifications_inbox.up.sql b/coderd/database/migrations/testdata/fixtures/000297_notifications_inbox.up.sql
new file mode 100644
index 0000000000000..fb4cecf096eae
--- /dev/null
+++ b/coderd/database/migrations/testdata/fixtures/000297_notifications_inbox.up.sql
@@ -0,0 +1,25 @@
+INSERT INTO
+ inbox_notifications (
+ id,
+ user_id,
+ template_id,
+ targets,
+ title,
+ content,
+ icon,
+ actions,
+ read_at,
+ created_at
+ )
+ VALUES (
+ '68b396aa-7f53-4bf1-b8d8-4cbf5fa244e5', -- uuid
+ '5755e622-fadd-44ca-98da-5df070491844', -- uuid
+ 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11', -- uuid
+ ARRAY[]::UUID[], -- uuid[]
+ 'Test Notification',
+ 'This is a test notification',
+ 'https://test.coder.com/favicon.ico',
+ '{}',
+ '2025-01-01 00:00:00',
+ '2025-01-01 00:00:00'
+ );
diff --git a/coderd/database/modelmethods.go b/coderd/database/modelmethods.go
index 803cfbf01ced2..d9013b1f08c0c 100644
--- a/coderd/database/modelmethods.go
+++ b/coderd/database/modelmethods.go
@@ -168,6 +168,12 @@ func (TemplateVersion) RBACObject(template Template) rbac.Object {
return template.RBACObject()
}
+func (i InboxNotification) RBACObject() rbac.Object {
+ return rbac.ResourceInboxNotification.
+ WithID(i.ID).
+ WithOwner(i.UserID.String())
+}
+
// RBACObjectNoTemplate is for orphaned template versions.
func (v TemplateVersion) RBACObjectNoTemplate() rbac.Object {
return rbac.ResourceTemplate.InOrg(v.OrganizationID)
diff --git a/coderd/database/models.go b/coderd/database/models.go
index 4e3353f844a02..3e0f59e6e9391 100644
--- a/coderd/database/models.go
+++ b/coderd/database/models.go
@@ -543,6 +543,67 @@ func AllGroupSourceValues() []GroupSource {
}
}
+type InboxNotificationReadStatus string
+
+const (
+ InboxNotificationReadStatusAll InboxNotificationReadStatus = "all"
+ InboxNotificationReadStatusUnread InboxNotificationReadStatus = "unread"
+ InboxNotificationReadStatusRead InboxNotificationReadStatus = "read"
+)
+
+func (e *InboxNotificationReadStatus) Scan(src interface{}) error {
+ switch s := src.(type) {
+ case []byte:
+ *e = InboxNotificationReadStatus(s)
+ case string:
+ *e = InboxNotificationReadStatus(s)
+ default:
+ return fmt.Errorf("unsupported scan type for InboxNotificationReadStatus: %T", src)
+ }
+ return nil
+}
+
+type NullInboxNotificationReadStatus struct {
+ InboxNotificationReadStatus InboxNotificationReadStatus `json:"inbox_notification_read_status"`
+ Valid bool `json:"valid"` // Valid is true if InboxNotificationReadStatus is not NULL
+}
+
+// Scan implements the Scanner interface.
+func (ns *NullInboxNotificationReadStatus) Scan(value interface{}) error {
+ if value == nil {
+ ns.InboxNotificationReadStatus, ns.Valid = "", false
+ return nil
+ }
+ ns.Valid = true
+ return ns.InboxNotificationReadStatus.Scan(value)
+}
+
+// Value implements the driver Valuer interface.
+func (ns NullInboxNotificationReadStatus) Value() (driver.Value, error) {
+ if !ns.Valid {
+ return nil, nil
+ }
+ return string(ns.InboxNotificationReadStatus), nil
+}
+
+func (e InboxNotificationReadStatus) Valid() bool {
+ switch e {
+ case InboxNotificationReadStatusAll,
+ InboxNotificationReadStatusUnread,
+ InboxNotificationReadStatusRead:
+ return true
+ }
+ return false
+}
+
+func AllInboxNotificationReadStatusValues() []InboxNotificationReadStatus {
+ return []InboxNotificationReadStatus{
+ InboxNotificationReadStatusAll,
+ InboxNotificationReadStatusUnread,
+ InboxNotificationReadStatusRead,
+ }
+}
+
type LogLevel string
const (
@@ -2557,6 +2618,19 @@ type GroupMemberTable struct {
GroupID uuid.UUID `db:"group_id" json:"group_id"`
}
+type InboxNotification struct {
+ ID uuid.UUID `db:"id" json:"id"`
+ UserID uuid.UUID `db:"user_id" json:"user_id"`
+ TemplateID uuid.UUID `db:"template_id" json:"template_id"`
+ Targets []uuid.UUID `db:"targets" json:"targets"`
+ Title string `db:"title" json:"title"`
+ Content string `db:"content" json:"content"`
+ Icon string `db:"icon" json:"icon"`
+ Actions json.RawMessage `db:"actions" json:"actions"`
+ ReadAt sql.NullTime `db:"read_at" json:"read_at"`
+ CreatedAt time.Time `db:"created_at" json:"created_at"`
+}
+
type JfrogXrayScan struct {
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
diff --git a/coderd/database/querier.go b/coderd/database/querier.go
index 527ee955819d8..6bae27ec1f3d4 100644
--- a/coderd/database/querier.go
+++ b/coderd/database/querier.go
@@ -63,6 +63,7 @@ type sqlcQuerier interface {
CleanTailnetCoordinators(ctx context.Context) error
CleanTailnetLostPeers(ctx context.Context) error
CleanTailnetTunnels(ctx context.Context) error
+ CountUnreadInboxNotificationsByUserID(ctx context.Context, userID uuid.UUID) (int64, error)
CustomRoles(ctx context.Context, arg CustomRolesParams) ([]CustomRole, error)
DeleteAPIKeyByID(ctx context.Context, id string) error
DeleteAPIKeysByUserID(ctx context.Context, userID uuid.UUID) error
@@ -158,6 +159,14 @@ type sqlcQuerier interface {
GetFileByID(ctx context.Context, id uuid.UUID) (File, error)
// Get all templates that use a file.
GetFileTemplates(ctx context.Context, fileID uuid.UUID) ([]GetFileTemplatesRow, error)
+ // Fetches inbox notifications for a user filtered by templates and targets
+ // param user_id: The user ID
+ // param templates: The template IDs to filter by - the template_id = ANY(@templates::UUID[]) condition checks if the template_id is in the @templates array
+ // param targets: The target IDs to filter by - the targets @> COALESCE(@targets, ARRAY[]::UUID[]) condition checks if the targets array (from the DB) contains all the elements in the @targets array
+ // param read_status: The read status to filter by - can be any of 'ALL', 'UNREAD', 'READ'
+ // param created_at_opt: The created_at timestamp to filter by. This parameter is usd for pagination - it fetches notifications created before the specified timestamp if it is not the zero value
+ // param limit_opt: The limit of notifications to fetch. If the limit is not specified, it defaults to 25
+ GetFilteredInboxNotificationsByUserID(ctx context.Context, arg GetFilteredInboxNotificationsByUserIDParams) ([]InboxNotification, error)
GetGitSSHKey(ctx context.Context, userID uuid.UUID) (GitSSHKey, error)
GetGroupByID(ctx context.Context, id uuid.UUID) (Group, error)
GetGroupByOrgAndName(ctx context.Context, arg GetGroupByOrgAndNameParams) (Group, error)
@@ -170,6 +179,13 @@ type sqlcQuerier interface {
GetGroups(ctx context.Context, arg GetGroupsParams) ([]GetGroupsRow, error)
GetHealthSettings(ctx context.Context) (string, error)
GetHungProvisionerJobs(ctx context.Context, updatedAt time.Time) ([]ProvisionerJob, error)
+ GetInboxNotificationByID(ctx context.Context, id uuid.UUID) (InboxNotification, error)
+ // Fetches inbox notifications for a user filtered by templates and targets
+ // param user_id: The user ID
+ // param read_status: The read status to filter by - can be any of 'ALL', 'UNREAD', 'READ'
+ // param created_at_opt: The created_at timestamp to filter by. This parameter is usd for pagination - it fetches notifications created before the specified timestamp if it is not the zero value
+ // param limit_opt: The limit of notifications to fetch. If the limit is not specified, it defaults to 25
+ GetInboxNotificationsByUserID(ctx context.Context, arg GetInboxNotificationsByUserIDParams) ([]InboxNotification, error)
GetJFrogXrayScanByWorkspaceAndAgentID(ctx context.Context, arg GetJFrogXrayScanByWorkspaceAndAgentIDParams) (JfrogXrayScan, error)
GetLastUpdateCheck(ctx context.Context) (string, error)
GetLatestCryptoKeyByFeature(ctx context.Context, feature CryptoKeyFeature) (CryptoKey, error)
@@ -396,6 +412,7 @@ type sqlcQuerier interface {
InsertGitSSHKey(ctx context.Context, arg InsertGitSSHKeyParams) (GitSSHKey, error)
InsertGroup(ctx context.Context, arg InsertGroupParams) (Group, error)
InsertGroupMember(ctx context.Context, arg InsertGroupMemberParams) error
+ InsertInboxNotification(ctx context.Context, arg InsertInboxNotificationParams) (InboxNotification, error)
InsertLicense(ctx context.Context, arg InsertLicenseParams) (License, error)
InsertMemoryResourceMonitor(ctx context.Context, arg InsertMemoryResourceMonitorParams) (WorkspaceAgentMemoryResourceMonitor, error)
// Inserts any group by name that does not exist. All new groups are given
@@ -479,6 +496,7 @@ type sqlcQuerier interface {
UpdateGitSSHKey(ctx context.Context, arg UpdateGitSSHKeyParams) (GitSSHKey, error)
UpdateGroupByID(ctx context.Context, arg UpdateGroupByIDParams) (Group, error)
UpdateInactiveUsersToDormant(ctx context.Context, arg UpdateInactiveUsersToDormantParams) ([]UpdateInactiveUsersToDormantRow, error)
+ UpdateInboxNotificationReadStatus(ctx context.Context, arg UpdateInboxNotificationReadStatusParams) error
UpdateMemberRoles(ctx context.Context, arg UpdateMemberRolesParams) (OrganizationMember, error)
UpdateMemoryResourceMonitor(ctx context.Context, arg UpdateMemoryResourceMonitorParams) error
UpdateNotificationTemplateMethodByID(ctx context.Context, arg UpdateNotificationTemplateMethodByIDParams) (NotificationTemplate, error)
diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go
index 56ee5cfa3a9af..0891bc8c9fcc6 100644
--- a/coderd/database/queries.sql.go
+++ b/coderd/database/queries.sql.go
@@ -4298,6 +4298,243 @@ func (q *sqlQuerier) UpsertNotificationReportGeneratorLog(ctx context.Context, a
return err
}
+const countUnreadInboxNotificationsByUserID = `-- name: CountUnreadInboxNotificationsByUserID :one
+SELECT COUNT(*) FROM inbox_notifications WHERE user_id = $1 AND read_at IS NULL
+`
+
+func (q *sqlQuerier) CountUnreadInboxNotificationsByUserID(ctx context.Context, userID uuid.UUID) (int64, error) {
+ row := q.db.QueryRowContext(ctx, countUnreadInboxNotificationsByUserID, userID)
+ var count int64
+ err := row.Scan(&count)
+ return count, err
+}
+
+const getFilteredInboxNotificationsByUserID = `-- name: GetFilteredInboxNotificationsByUserID :many
+SELECT id, user_id, template_id, targets, title, content, icon, actions, read_at, created_at FROM inbox_notifications WHERE
+ user_id = $1 AND
+ template_id = ANY($2::UUID[]) AND
+ targets @> COALESCE($3, ARRAY[]::UUID[]) AND
+ ($4::inbox_notification_read_status = 'all' OR ($4::inbox_notification_read_status = 'unread' AND read_at IS NULL) OR ($4::inbox_notification_read_status = 'read' AND read_at IS NOT NULL)) AND
+ ($5::TIMESTAMPTZ = '0001-01-01 00:00:00Z' OR created_at < $5::TIMESTAMPTZ)
+ ORDER BY created_at DESC
+ LIMIT (COALESCE(NULLIF($6 :: INT, 0), 25))
+`
+
+type GetFilteredInboxNotificationsByUserIDParams struct {
+ UserID uuid.UUID `db:"user_id" json:"user_id"`
+ Templates []uuid.UUID `db:"templates" json:"templates"`
+ Targets []uuid.UUID `db:"targets" json:"targets"`
+ ReadStatus InboxNotificationReadStatus `db:"read_status" json:"read_status"`
+ CreatedAtOpt time.Time `db:"created_at_opt" json:"created_at_opt"`
+ LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
+}
+
+// Fetches inbox notifications for a user filtered by templates and targets
+// param user_id: The user ID
+// param templates: The template IDs to filter by - the template_id = ANY(@templates::UUID[]) condition checks if the template_id is in the @templates array
+// param targets: The target IDs to filter by - the targets @> COALESCE(@targets, ARRAY[]::UUID[]) condition checks if the targets array (from the DB) contains all the elements in the @targets array
+// param read_status: The read status to filter by - can be any of 'ALL', 'UNREAD', 'READ'
+// param created_at_opt: The created_at timestamp to filter by. This parameter is usd for pagination - it fetches notifications created before the specified timestamp if it is not the zero value
+// param limit_opt: The limit of notifications to fetch. If the limit is not specified, it defaults to 25
+func (q *sqlQuerier) GetFilteredInboxNotificationsByUserID(ctx context.Context, arg GetFilteredInboxNotificationsByUserIDParams) ([]InboxNotification, error) {
+ rows, err := q.db.QueryContext(ctx, getFilteredInboxNotificationsByUserID,
+ arg.UserID,
+ pq.Array(arg.Templates),
+ pq.Array(arg.Targets),
+ arg.ReadStatus,
+ arg.CreatedAtOpt,
+ arg.LimitOpt,
+ )
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ var items []InboxNotification
+ for rows.Next() {
+ var i InboxNotification
+ if err := rows.Scan(
+ &i.ID,
+ &i.UserID,
+ &i.TemplateID,
+ pq.Array(&i.Targets),
+ &i.Title,
+ &i.Content,
+ &i.Icon,
+ &i.Actions,
+ &i.ReadAt,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Close(); err != nil {
+ return nil, err
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getInboxNotificationByID = `-- name: GetInboxNotificationByID :one
+SELECT id, user_id, template_id, targets, title, content, icon, actions, read_at, created_at FROM inbox_notifications WHERE id = $1
+`
+
+func (q *sqlQuerier) GetInboxNotificationByID(ctx context.Context, id uuid.UUID) (InboxNotification, error) {
+ row := q.db.QueryRowContext(ctx, getInboxNotificationByID, id)
+ var i InboxNotification
+ err := row.Scan(
+ &i.ID,
+ &i.UserID,
+ &i.TemplateID,
+ pq.Array(&i.Targets),
+ &i.Title,
+ &i.Content,
+ &i.Icon,
+ &i.Actions,
+ &i.ReadAt,
+ &i.CreatedAt,
+ )
+ return i, err
+}
+
+const getInboxNotificationsByUserID = `-- name: GetInboxNotificationsByUserID :many
+SELECT id, user_id, template_id, targets, title, content, icon, actions, read_at, created_at FROM inbox_notifications WHERE
+ user_id = $1 AND
+ ($2::inbox_notification_read_status = 'all' OR ($2::inbox_notification_read_status = 'unread' AND read_at IS NULL) OR ($2::inbox_notification_read_status = 'read' AND read_at IS NOT NULL)) AND
+ ($3::TIMESTAMPTZ = '0001-01-01 00:00:00Z' OR created_at < $3::TIMESTAMPTZ)
+ ORDER BY created_at DESC
+ LIMIT (COALESCE(NULLIF($4 :: INT, 0), 25))
+`
+
+type GetInboxNotificationsByUserIDParams struct {
+ UserID uuid.UUID `db:"user_id" json:"user_id"`
+ ReadStatus InboxNotificationReadStatus `db:"read_status" json:"read_status"`
+ CreatedAtOpt time.Time `db:"created_at_opt" json:"created_at_opt"`
+ LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
+}
+
+// Fetches inbox notifications for a user filtered by templates and targets
+// param user_id: The user ID
+// param read_status: The read status to filter by - can be any of 'ALL', 'UNREAD', 'READ'
+// param created_at_opt: The created_at timestamp to filter by. This parameter is usd for pagination - it fetches notifications created before the specified timestamp if it is not the zero value
+// param limit_opt: The limit of notifications to fetch. If the limit is not specified, it defaults to 25
+func (q *sqlQuerier) GetInboxNotificationsByUserID(ctx context.Context, arg GetInboxNotificationsByUserIDParams) ([]InboxNotification, error) {
+ rows, err := q.db.QueryContext(ctx, getInboxNotificationsByUserID,
+ arg.UserID,
+ arg.ReadStatus,
+ arg.CreatedAtOpt,
+ arg.LimitOpt,
+ )
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ var items []InboxNotification
+ for rows.Next() {
+ var i InboxNotification
+ if err := rows.Scan(
+ &i.ID,
+ &i.UserID,
+ &i.TemplateID,
+ pq.Array(&i.Targets),
+ &i.Title,
+ &i.Content,
+ &i.Icon,
+ &i.Actions,
+ &i.ReadAt,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Close(); err != nil {
+ return nil, err
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const insertInboxNotification = `-- name: InsertInboxNotification :one
+INSERT INTO
+ inbox_notifications (
+ id,
+ user_id,
+ template_id,
+ targets,
+ title,
+ content,
+ icon,
+ actions,
+ created_at
+ )
+VALUES
+ ($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING id, user_id, template_id, targets, title, content, icon, actions, read_at, created_at
+`
+
+type InsertInboxNotificationParams struct {
+ ID uuid.UUID `db:"id" json:"id"`
+ UserID uuid.UUID `db:"user_id" json:"user_id"`
+ TemplateID uuid.UUID `db:"template_id" json:"template_id"`
+ Targets []uuid.UUID `db:"targets" json:"targets"`
+ Title string `db:"title" json:"title"`
+ Content string `db:"content" json:"content"`
+ Icon string `db:"icon" json:"icon"`
+ Actions json.RawMessage `db:"actions" json:"actions"`
+ CreatedAt time.Time `db:"created_at" json:"created_at"`
+}
+
+func (q *sqlQuerier) InsertInboxNotification(ctx context.Context, arg InsertInboxNotificationParams) (InboxNotification, error) {
+ row := q.db.QueryRowContext(ctx, insertInboxNotification,
+ arg.ID,
+ arg.UserID,
+ arg.TemplateID,
+ pq.Array(arg.Targets),
+ arg.Title,
+ arg.Content,
+ arg.Icon,
+ arg.Actions,
+ arg.CreatedAt,
+ )
+ var i InboxNotification
+ err := row.Scan(
+ &i.ID,
+ &i.UserID,
+ &i.TemplateID,
+ pq.Array(&i.Targets),
+ &i.Title,
+ &i.Content,
+ &i.Icon,
+ &i.Actions,
+ &i.ReadAt,
+ &i.CreatedAt,
+ )
+ return i, err
+}
+
+const updateInboxNotificationReadStatus = `-- name: UpdateInboxNotificationReadStatus :exec
+UPDATE
+ inbox_notifications
+SET
+ read_at = $1
+WHERE
+ id = $2
+`
+
+type UpdateInboxNotificationReadStatusParams struct {
+ ReadAt sql.NullTime `db:"read_at" json:"read_at"`
+ ID uuid.UUID `db:"id" json:"id"`
+}
+
+func (q *sqlQuerier) UpdateInboxNotificationReadStatus(ctx context.Context, arg UpdateInboxNotificationReadStatusParams) error {
+ _, err := q.db.ExecContext(ctx, updateInboxNotificationReadStatus, arg.ReadAt, arg.ID)
+ return err
+}
+
const deleteOAuth2ProviderAppByID = `-- name: DeleteOAuth2ProviderAppByID :exec
DELETE FROM oauth2_provider_apps WHERE id = $1
`
diff --git a/coderd/database/queries/notificationsinbox.sql b/coderd/database/queries/notificationsinbox.sql
new file mode 100644
index 0000000000000..cdaf1cf78cb7f
--- /dev/null
+++ b/coderd/database/queries/notificationsinbox.sql
@@ -0,0 +1,59 @@
+-- name: GetInboxNotificationsByUserID :many
+-- Fetches inbox notifications for a user filtered by templates and targets
+-- param user_id: The user ID
+-- param read_status: The read status to filter by - can be any of 'ALL', 'UNREAD', 'READ'
+-- param created_at_opt: The created_at timestamp to filter by. This parameter is usd for pagination - it fetches notifications created before the specified timestamp if it is not the zero value
+-- param limit_opt: The limit of notifications to fetch. If the limit is not specified, it defaults to 25
+SELECT * FROM inbox_notifications WHERE
+ user_id = @user_id AND
+ (@read_status::inbox_notification_read_status = 'all' OR (@read_status::inbox_notification_read_status = 'unread' AND read_at IS NULL) OR (@read_status::inbox_notification_read_status = 'read' AND read_at IS NOT NULL)) AND
+ (@created_at_opt::TIMESTAMPTZ = '0001-01-01 00:00:00Z' OR created_at < @created_at_opt::TIMESTAMPTZ)
+ ORDER BY created_at DESC
+ LIMIT (COALESCE(NULLIF(@limit_opt :: INT, 0), 25));
+
+-- name: GetFilteredInboxNotificationsByUserID :many
+-- Fetches inbox notifications for a user filtered by templates and targets
+-- param user_id: The user ID
+-- param templates: The template IDs to filter by - the template_id = ANY(@templates::UUID[]) condition checks if the template_id is in the @templates array
+-- param targets: The target IDs to filter by - the targets @> COALESCE(@targets, ARRAY[]::UUID[]) condition checks if the targets array (from the DB) contains all the elements in the @targets array
+-- param read_status: The read status to filter by - can be any of 'ALL', 'UNREAD', 'READ'
+-- param created_at_opt: The created_at timestamp to filter by. This parameter is usd for pagination - it fetches notifications created before the specified timestamp if it is not the zero value
+-- param limit_opt: The limit of notifications to fetch. If the limit is not specified, it defaults to 25
+SELECT * FROM inbox_notifications WHERE
+ user_id = @user_id AND
+ template_id = ANY(@templates::UUID[]) AND
+ targets @> COALESCE(@targets, ARRAY[]::UUID[]) AND
+ (@read_status::inbox_notification_read_status = 'all' OR (@read_status::inbox_notification_read_status = 'unread' AND read_at IS NULL) OR (@read_status::inbox_notification_read_status = 'read' AND read_at IS NOT NULL)) AND
+ (@created_at_opt::TIMESTAMPTZ = '0001-01-01 00:00:00Z' OR created_at < @created_at_opt::TIMESTAMPTZ)
+ ORDER BY created_at DESC
+ LIMIT (COALESCE(NULLIF(@limit_opt :: INT, 0), 25));
+
+-- name: GetInboxNotificationByID :one
+SELECT * FROM inbox_notifications WHERE id = $1;
+
+-- name: CountUnreadInboxNotificationsByUserID :one
+SELECT COUNT(*) FROM inbox_notifications WHERE user_id = $1 AND read_at IS NULL;
+
+-- name: InsertInboxNotification :one
+INSERT INTO
+ inbox_notifications (
+ id,
+ user_id,
+ template_id,
+ targets,
+ title,
+ content,
+ icon,
+ actions,
+ created_at
+ )
+VALUES
+ ($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING *;
+
+-- name: UpdateInboxNotificationReadStatus :exec
+UPDATE
+ inbox_notifications
+SET
+ read_at = $1
+WHERE
+ id = $2;
diff --git a/coderd/database/unique_constraint.go b/coderd/database/unique_constraint.go
index db68849777247..eb61e2f39a2c8 100644
--- a/coderd/database/unique_constraint.go
+++ b/coderd/database/unique_constraint.go
@@ -21,6 +21,7 @@ const (
UniqueGroupMembersUserIDGroupIDKey UniqueConstraint = "group_members_user_id_group_id_key" // ALTER TABLE ONLY group_members ADD CONSTRAINT group_members_user_id_group_id_key UNIQUE (user_id, group_id);
UniqueGroupsNameOrganizationIDKey UniqueConstraint = "groups_name_organization_id_key" // ALTER TABLE ONLY groups ADD CONSTRAINT groups_name_organization_id_key UNIQUE (name, organization_id);
UniqueGroupsPkey UniqueConstraint = "groups_pkey" // ALTER TABLE ONLY groups ADD CONSTRAINT groups_pkey PRIMARY KEY (id);
+ UniqueInboxNotificationsPkey UniqueConstraint = "inbox_notifications_pkey" // ALTER TABLE ONLY inbox_notifications ADD CONSTRAINT inbox_notifications_pkey PRIMARY KEY (id);
UniqueJfrogXrayScansPkey UniqueConstraint = "jfrog_xray_scans_pkey" // ALTER TABLE ONLY jfrog_xray_scans ADD CONSTRAINT jfrog_xray_scans_pkey PRIMARY KEY (agent_id, workspace_id);
UniqueLicensesJWTKey UniqueConstraint = "licenses_jwt_key" // ALTER TABLE ONLY licenses ADD CONSTRAINT licenses_jwt_key UNIQUE (jwt);
UniqueLicensesPkey UniqueConstraint = "licenses_pkey" // ALTER TABLE ONLY licenses ADD CONSTRAINT licenses_pkey PRIMARY KEY (id);
diff --git a/coderd/rbac/object_gen.go b/coderd/rbac/object_gen.go
index 86faa5f9456dc..47b8c58a6f32b 100644
--- a/coderd/rbac/object_gen.go
+++ b/coderd/rbac/object_gen.go
@@ -119,6 +119,15 @@ var (
Type: "idpsync_settings",
}
+ // ResourceInboxNotification
+ // Valid Actions
+ // - "ActionCreate" :: create inbox notifications
+ // - "ActionRead" :: read inbox notifications
+ // - "ActionUpdate" :: update inbox notifications
+ ResourceInboxNotification = Object{
+ Type: "inbox_notification",
+ }
+
// ResourceLicense
// Valid Actions
// - "ActionCreate" :: create a license
@@ -334,6 +343,7 @@ func AllResources() []Objecter {
ResourceGroup,
ResourceGroupMember,
ResourceIdpsyncSettings,
+ ResourceInboxNotification,
ResourceLicense,
ResourceNotificationMessage,
ResourceNotificationPreference,
diff --git a/coderd/rbac/policy/policy.go b/coderd/rbac/policy/policy.go
index 0988401e3849c..7f9736eaad751 100644
--- a/coderd/rbac/policy/policy.go
+++ b/coderd/rbac/policy/policy.go
@@ -280,6 +280,13 @@ var RBACPermissions = map[string]PermissionDefinition{
ActionUpdate: actDef("update notification preferences"),
},
},
+ "inbox_notification": {
+ Actions: map[Action]ActionDefinition{
+ ActionCreate: actDef("create inbox notifications"),
+ ActionRead: actDef("read inbox notifications"),
+ ActionUpdate: actDef("update inbox notifications"),
+ },
+ },
"crypto_key": {
Actions: map[Action]ActionDefinition{
ActionRead: actDef("read crypto keys"),
diff --git a/coderd/rbac/roles_test.go b/coderd/rbac/roles_test.go
index 51eb15def9739..dd5c090786b0e 100644
--- a/coderd/rbac/roles_test.go
+++ b/coderd/rbac/roles_test.go
@@ -365,6 +365,17 @@ func TestRolePermissions(t *testing.T) {
false: {setOtherOrg, setOrgNotMe, templateAdmin, userAdmin},
},
},
+ {
+ Name: "InboxNotification",
+ Actions: []policy.Action{
+ policy.ActionCreate, policy.ActionRead, policy.ActionUpdate,
+ },
+ Resource: rbac.ResourceInboxNotification.WithID(uuid.New()).InOrg(orgID).WithOwner(currentUser.String()),
+ AuthorizeMap: map[bool][]hasAuthSubjects{
+ true: {owner, orgMemberMe, orgAdmin},
+ false: {setOtherOrg, orgUserAdmin, orgTemplateAdmin, orgAuditor, templateAdmin, userAdmin, memberMe},
+ },
+ },
{
Name: "UserData",
Actions: []policy.Action{policy.ActionReadPersonal, policy.ActionUpdatePersonal},
diff --git a/codersdk/rbacresources_gen.go b/codersdk/rbacresources_gen.go
index 68b765db3f8a6..345da8d812167 100644
--- a/codersdk/rbacresources_gen.go
+++ b/codersdk/rbacresources_gen.go
@@ -17,6 +17,7 @@ const (
ResourceGroup RBACResource = "group"
ResourceGroupMember RBACResource = "group_member"
ResourceIdpsyncSettings RBACResource = "idpsync_settings"
+ ResourceInboxNotification RBACResource = "inbox_notification"
ResourceLicense RBACResource = "license"
ResourceNotificationMessage RBACResource = "notification_message"
ResourceNotificationPreference RBACResource = "notification_preference"
@@ -74,6 +75,7 @@ var RBACResourceActions = map[RBACResource][]RBACAction{
ResourceGroup: {ActionCreate, ActionDelete, ActionRead, ActionUpdate},
ResourceGroupMember: {ActionRead},
ResourceIdpsyncSettings: {ActionRead, ActionUpdate},
+ ResourceInboxNotification: {ActionCreate, ActionRead, ActionUpdate},
ResourceLicense: {ActionCreate, ActionDelete, ActionRead},
ResourceNotificationMessage: {ActionCreate, ActionDelete, ActionRead, ActionUpdate},
ResourceNotificationPreference: {ActionRead, ActionUpdate},
diff --git a/docs/reference/api/members.md b/docs/reference/api/members.md
index d29774663bc32..5dc39cee2d088 100644
--- a/docs/reference/api/members.md
+++ b/docs/reference/api/members.md
@@ -193,6 +193,7 @@ Status Code **200**
| `resource_type` | `group` |
| `resource_type` | `group_member` |
| `resource_type` | `idpsync_settings` |
+| `resource_type` | `inbox_notification` |
| `resource_type` | `license` |
| `resource_type` | `notification_message` |
| `resource_type` | `notification_preference` |
@@ -356,6 +357,7 @@ Status Code **200**
| `resource_type` | `group` |
| `resource_type` | `group_member` |
| `resource_type` | `idpsync_settings` |
+| `resource_type` | `inbox_notification` |
| `resource_type` | `license` |
| `resource_type` | `notification_message` |
| `resource_type` | `notification_preference` |
@@ -519,6 +521,7 @@ Status Code **200**
| `resource_type` | `group` |
| `resource_type` | `group_member` |
| `resource_type` | `idpsync_settings` |
+| `resource_type` | `inbox_notification` |
| `resource_type` | `license` |
| `resource_type` | `notification_message` |
| `resource_type` | `notification_preference` |
@@ -651,6 +654,7 @@ Status Code **200**
| `resource_type` | `group` |
| `resource_type` | `group_member` |
| `resource_type` | `idpsync_settings` |
+| `resource_type` | `inbox_notification` |
| `resource_type` | `license` |
| `resource_type` | `notification_message` |
| `resource_type` | `notification_preference` |
@@ -915,6 +919,7 @@ Status Code **200**
| `resource_type` | `group` |
| `resource_type` | `group_member` |
| `resource_type` | `idpsync_settings` |
+| `resource_type` | `inbox_notification` |
| `resource_type` | `license` |
| `resource_type` | `notification_message` |
| `resource_type` | `notification_preference` |
diff --git a/docs/reference/api/schemas.md b/docs/reference/api/schemas.md
index b3e4821c2e39e..ffb440675cb21 100644
--- a/docs/reference/api/schemas.md
+++ b/docs/reference/api/schemas.md
@@ -5137,6 +5137,7 @@ Git clone makes use of this by parsing the URL from: 'Username for "https://gith
| `group` |
| `group_member` |
| `idpsync_settings` |
+| `inbox_notification` |
| `license` |
| `notification_message` |
| `notification_preference` |
diff --git a/site/src/api/rbacresourcesGenerated.ts b/site/src/api/rbacresourcesGenerated.ts
index bfd1a46861090..dc37e2b04d4fe 100644
--- a/site/src/api/rbacresourcesGenerated.ts
+++ b/site/src/api/rbacresourcesGenerated.ts
@@ -64,6 +64,11 @@ export const RBACResourceActions: Partial<
read: "read IdP sync settings",
update: "update IdP sync settings",
},
+ inbox_notification: {
+ create: "create inbox notifications",
+ read: "read inbox notifications",
+ update: "update inbox notifications",
+ },
license: {
create: "create a license",
delete: "delete license",
diff --git a/site/src/api/typesGenerated.ts b/site/src/api/typesGenerated.ts
index 8c350d8f5bc31..0535b2b8b50de 100644
--- a/site/src/api/typesGenerated.ts
+++ b/site/src/api/typesGenerated.ts
@@ -1895,6 +1895,7 @@ export type RBACResource =
| "group"
| "group_member"
| "idpsync_settings"
+ | "inbox_notification"
| "license"
| "notification_message"
| "notification_preference"
@@ -1930,6 +1931,7 @@ export const RBACResources: RBACResource[] = [
"group",
"group_member",
"idpsync_settings",
+ "inbox_notification",
"license",
"notification_message",
"notification_preference",
From a5842e5ad186d74612af5e04b26aadd51aa057bd Mon Sep 17 00:00:00 2001
From: Hugo Dutka
Date: Mon, 3 Mar 2025 12:31:56 +0100
Subject: [PATCH 38/44] docs: document default GitHub OAuth2 configuration and
device flow (#16663)
Document the changes made in https://github.com/coder/coder/pull/16629
and https://github.com/coder/coder/pull/16585.
---
docs/admin/users/github-auth.md | 36 +++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/docs/admin/users/github-auth.md b/docs/admin/users/github-auth.md
index 97e700e262ff8..1bacc36462326 100644
--- a/docs/admin/users/github-auth.md
+++ b/docs/admin/users/github-auth.md
@@ -1,5 +1,28 @@
# GitHub
+## Default Configuration
+
+By default, new Coder deployments use a Coder-managed GitHub app to authenticate
+users. We provide it for convenience, allowing you to experiment with Coder
+without setting up your own GitHub OAuth app. Once you authenticate with it, you
+grant Coder server read access to:
+
+- Your GitHub user email
+- Your GitHub organization membership
+- Other metadata listed during the authentication flow
+
+This access is necessary for the Coder server to complete the authentication
+process. To the best of our knowledge, Coder, the company, does not gain access
+to this data by administering the GitHub app.
+
+For production deployments, we recommend configuring your own GitHub OAuth app
+as outlined below. The default is automatically disabled if you configure your
+own app or set:
+
+```env
+CODER_OAUTH2_GITHUB_DEFAULT_PROVIDER_ENABLE=false
+```
+
## Step 1: Configure the OAuth application in GitHub
First,
@@ -82,3 +105,16 @@ helm upgrade coder-v2/coder -n -f values.yaml
> We recommend requiring and auditing MFA usage for all users in your GitHub
> organizations. This can be enforced from the organization settings page in the
> "Authentication security" sidebar tab.
+
+## Device Flow
+
+Coder supports
+[device flow](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps#device-flow)
+for GitHub OAuth. To enable it, set:
+
+```env
+CODER_OAUTH2_GITHUB_DEVICE_FLOW=true
+```
+
+This is optional. We recommend using the standard OAuth flow instead, as it is
+more convenient for end users.
From 9c5d4966eeab6cff53302e34ea50bb47ada34b02 Mon Sep 17 00:00:00 2001
From: Hugo Dutka
Date: Mon, 3 Mar 2025 12:32:27 +0100
Subject: [PATCH 39/44] docs: suggest disabling the default GitHub OAuth2
provider on k8s (#16758)
For production deployments we recommend disabling the default GitHub
OAuth2 app managed by Coder. This PR mentions it in k8s installation
docs and the helm README so users can stumble upon it more easily.
---
docs/install/kubernetes.md | 4 ++++
helm/coder/README.md | 4 ++++
2 files changed, 8 insertions(+)
diff --git a/docs/install/kubernetes.md b/docs/install/kubernetes.md
index 785c48252951c..9c53eb3dc29ae 100644
--- a/docs/install/kubernetes.md
+++ b/docs/install/kubernetes.md
@@ -101,6 +101,10 @@ coder:
# postgres://coder:password@postgres:5432/coder?sslmode=disable
name: coder-db-url
key: url
+ # For production deployments, we recommend configuring your own GitHub
+ # OAuth2 provider and disabling the default one.
+ - name: CODER_OAUTH2_GITHUB_DEFAULT_PROVIDER_ENABLE
+ value: "false"
# (Optional) For production deployments the access URL should be set.
# If you're just trying Coder, access the dashboard via the service IP.
diff --git a/helm/coder/README.md b/helm/coder/README.md
index 015c2e7039088..172f880c83045 100644
--- a/helm/coder/README.md
+++ b/helm/coder/README.md
@@ -47,6 +47,10 @@ coder:
# This env enables the Prometheus metrics endpoint.
- name: CODER_PROMETHEUS_ADDRESS
value: "0.0.0.0:2112"
+ # For production deployments, we recommend configuring your own GitHub
+ # OAuth2 provider and disabling the default one.
+ - name: CODER_OAUTH2_GITHUB_DEFAULT_PROVIDER_ENABLE
+ value: "false"
tls:
secretNames:
- my-tls-secret-name
From 0f4f6bd147799fd31aec38409692c0406d57f002 Mon Sep 17 00:00:00 2001
From: Hugo Dutka
Date: Mon, 3 Mar 2025 13:23:12 +0100
Subject: [PATCH 40/44] docs: describe default sign up behavior with GitHub
(#16765)
Document the sign up behavior with the default GitHub OAuth2 app.
---
docs/admin/users/github-auth.md | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/docs/admin/users/github-auth.md b/docs/admin/users/github-auth.md
index 1bacc36462326..21cd121c13b3d 100644
--- a/docs/admin/users/github-auth.md
+++ b/docs/admin/users/github-auth.md
@@ -15,6 +15,19 @@ This access is necessary for the Coder server to complete the authentication
process. To the best of our knowledge, Coder, the company, does not gain access
to this data by administering the GitHub app.
+By default, only the admin user can sign up. To allow additional users to sign
+up with GitHub, add the following environment variable:
+
+```env
+CODER_OAUTH2_GITHUB_ALLOW_SIGNUPS=true
+```
+
+To limit sign ups to members of specific GitHub organizations, set:
+
+```env
+CODER_OAUTH2_GITHUB_ALLOWED_ORGS="your-org"
+```
+
For production deployments, we recommend configuring your own GitHub OAuth app
as outlined below. The default is automatically disabled if you configure your
own app or set:
From 88f0131abbc9c6df646ac74abecf482b167dba58 Mon Sep 17 00:00:00 2001
From: Ethan <39577870+ethanndickson@users.noreply.github.com>
Date: Tue, 4 Mar 2025 00:42:13 +1100
Subject: [PATCH 41/44] fix: use dbtime in dbmem query to fix flake (#16773)
Closes https://github.com/coder/internal/issues/447.
The test was failing 30% of the time on Windows without the rounding
applied by `dbtime`. `dbtime` was used on the timestamps inserted into
the DB, but not within the query. Once using `dbtime` within the query
there were no failures in 200 runs.
---
coderd/database/dbmem/dbmem.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go
index 65d24bb3434c2..cc559a7e77f16 100644
--- a/coderd/database/dbmem/dbmem.go
+++ b/coderd/database/dbmem/dbmem.go
@@ -7014,7 +7014,7 @@ func (q *FakeQuerier) GetWorkspaceAgentUsageStatsAndLabels(_ context.Context, cr
}
// WHERE usage = true AND created_at > now() - '1 minute'::interval
// GROUP BY user_id, agent_id, workspace_id
- if agentStat.Usage && agentStat.CreatedAt.After(time.Now().Add(-time.Minute)) {
+ if agentStat.Usage && agentStat.CreatedAt.After(dbtime.Now().Add(-time.Minute)) {
val, ok := latestAgentStats[key]
if !ok {
latestAgentStats[key] = agentStat
From 04c33968cfc2edf03cd7e725c4e5aa3e99f56f14 Mon Sep 17 00:00:00 2001
From: Eng Zer Jun
Date: Mon, 3 Mar 2025 21:46:49 +0800
Subject: [PATCH 42/44] refactor: replace `golang.org/x/exp/slices` with
`slices` (#16772)
The experimental functions in `golang.org/x/exp/slices` are now
available in the standard library since Go 1.21.
Reference: https://go.dev/doc/go1.21#slices
Signed-off-by: Eng Zer Jun
---
agent/agent.go | 2 +-
agent/agent_test.go | 2 +-
agent/agentssh/agentssh.go | 2 +-
agent/agenttest/client.go | 2 +-
agent/reconnectingpty/buffered.go | 2 +-
cli/configssh.go | 2 +-
cli/create.go | 2 +-
cli/exp_scaletest.go | 2 +-
cli/root.go | 2 +-
cli/tokens.go | 2 +-
coderd/agentapi/lifecycle.go | 2 +-
coderd/audit/audit.go | 2 +-
coderd/database/db2sdk/db2sdk.go | 2 +-
coderd/database/dbauthz/dbauthz.go | 2 +-
coderd/database/dbmem/dbmem.go | 2 +-
coderd/database/dbmetrics/dbmetrics.go | 2 +-
coderd/database/dbmetrics/querymetrics.go | 2 +-
coderd/database/dbpurge/dbpurge_test.go | 2 +-
coderd/database/gentest/modelqueries_test.go | 2 +-
coderd/database/migrations/migrate_test.go | 2 +-
coderd/debug.go | 2 +-
coderd/devtunnel/servers.go | 2 +-
coderd/entitlements/entitlements.go | 2 +-
coderd/healthcheck/database.go | 3 +--
coderd/healthcheck/derphealth/derp.go | 2 +-
coderd/httpmw/apikey_test.go | 2 +-
coderd/idpsync/group_test.go | 2 +-
coderd/idpsync/role.go | 2 +-
coderd/idpsync/role_test.go | 2 +-
coderd/insights.go | 5 ++---
coderd/notifications_test.go | 2 +-
coderd/prometheusmetrics/insights/metricscollector.go | 2 +-
coderd/provisionerdserver/acquirer.go | 2 +-
coderd/provisionerdserver/acquirer_test.go | 2 +-
coderd/provisionerdserver/provisionerdserver.go | 2 +-
coderd/userpassword/userpassword.go | 2 +-
coderd/users_test.go | 2 +-
coderd/workspaceagents.go | 2 +-
coderd/workspaceapps/db.go | 2 +-
coderd/workspaceapps/stats_test.go | 2 +-
coderd/workspacebuilds.go | 2 +-
coderd/workspacebuilds_test.go | 2 +-
codersdk/agentsdk/logs_internal_test.go | 2 +-
codersdk/agentsdk/logs_test.go | 2 +-
codersdk/healthsdk/interfaces_internal_test.go | 2 +-
codersdk/provisionerdaemons.go | 2 +-
enterprise/coderd/license/license_test.go | 2 +-
pty/ptytest/ptytest.go | 2 +-
scaletest/workspacetraffic/run_test.go | 2 +-
site/site.go | 2 +-
tailnet/node.go | 2 +-
tailnet/node_internal_test.go | 2 +-
52 files changed, 53 insertions(+), 55 deletions(-)
diff --git a/agent/agent.go b/agent/agent.go
index 40e5de7356d9c..c42bf3a815e18 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -14,6 +14,7 @@ import (
"os"
"os/user"
"path/filepath"
+ "slices"
"sort"
"strconv"
"strings"
@@ -26,7 +27,6 @@ import (
"github.com/prometheus/common/expfmt"
"github.com/spf13/afero"
"go.uber.org/atomic"
- "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"google.golang.org/protobuf/types/known/timestamppb"
diff --git a/agent/agent_test.go b/agent/agent_test.go
index 8466c4e0961b4..44112b6524fc9 100644
--- a/agent/agent_test.go
+++ b/agent/agent_test.go
@@ -19,6 +19,7 @@ import (
"path/filepath"
"regexp"
"runtime"
+ "slices"
"strconv"
"strings"
"sync/atomic"
@@ -41,7 +42,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/ssh"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog"
diff --git a/agent/agentssh/agentssh.go b/agent/agentssh/agentssh.go
index b1a1f32baf032..816bdf55556e9 100644
--- a/agent/agentssh/agentssh.go
+++ b/agent/agentssh/agentssh.go
@@ -12,6 +12,7 @@ import (
"os/user"
"path/filepath"
"runtime"
+ "slices"
"strings"
"sync"
"time"
@@ -24,7 +25,6 @@ import (
"github.com/spf13/afero"
"go.uber.org/atomic"
gossh "golang.org/x/crypto/ssh"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog"
diff --git a/agent/agenttest/client.go b/agent/agenttest/client.go
index b5fa6ea8c2189..a1d14e32a2c55 100644
--- a/agent/agenttest/client.go
+++ b/agent/agenttest/client.go
@@ -3,6 +3,7 @@ package agenttest
import (
"context"
"io"
+ "slices"
"sync"
"sync/atomic"
"testing"
@@ -12,7 +13,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/emptypb"
diff --git a/agent/reconnectingpty/buffered.go b/agent/reconnectingpty/buffered.go
index 6f314333a725e..fb3c9907f4f8c 100644
--- a/agent/reconnectingpty/buffered.go
+++ b/agent/reconnectingpty/buffered.go
@@ -5,11 +5,11 @@ import (
"errors"
"io"
"net"
+ "slices"
"time"
"github.com/armon/circbuf"
"github.com/prometheus/client_golang/prometheus"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog"
diff --git a/cli/configssh.go b/cli/configssh.go
index a7aed33eba1df..b3c29f711bdb6 100644
--- a/cli/configssh.go
+++ b/cli/configssh.go
@@ -11,6 +11,7 @@ import (
"os"
"path/filepath"
"runtime"
+ "slices"
"strconv"
"strings"
@@ -19,7 +20,6 @@ import (
"github.com/pkg/diff"
"github.com/pkg/diff/write"
"golang.org/x/exp/constraints"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/cliui"
diff --git a/cli/create.go b/cli/create.go
index f3709314cd2be..bb2e8dde0255a 100644
--- a/cli/create.go
+++ b/cli/create.go
@@ -4,11 +4,11 @@ import (
"context"
"fmt"
"io"
+ "slices"
"strings"
"time"
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/coder/pretty"
diff --git a/cli/exp_scaletest.go b/cli/exp_scaletest.go
index a7bd0f396b5aa..a844a7e8c6258 100644
--- a/cli/exp_scaletest.go
+++ b/cli/exp_scaletest.go
@@ -12,6 +12,7 @@ import (
"net/url"
"os"
"os/signal"
+ "slices"
"strconv"
"strings"
"sync"
@@ -21,7 +22,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.opentelemetry.io/otel/trace"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog"
diff --git a/cli/root.go b/cli/root.go
index 09044ad3e28ca..816d7b769eb0d 100644
--- a/cli/root.go
+++ b/cli/root.go
@@ -17,6 +17,7 @@ import (
"path/filepath"
"runtime"
"runtime/trace"
+ "slices"
"strings"
"sync"
"syscall"
@@ -25,7 +26,6 @@ import (
"github.com/mattn/go-isatty"
"github.com/mitchellh/go-wordwrap"
- "golang.org/x/exp/slices"
"golang.org/x/mod/semver"
"golang.org/x/xerrors"
diff --git a/cli/tokens.go b/cli/tokens.go
index d132547576d32..7873882e3ae05 100644
--- a/cli/tokens.go
+++ b/cli/tokens.go
@@ -3,10 +3,10 @@ package cli
import (
"fmt"
"os"
+ "slices"
"strings"
"time"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/cliui"
diff --git a/coderd/agentapi/lifecycle.go b/coderd/agentapi/lifecycle.go
index 5dd5e7b0c1b06..6bb3fedc5174c 100644
--- a/coderd/agentapi/lifecycle.go
+++ b/coderd/agentapi/lifecycle.go
@@ -3,10 +3,10 @@ package agentapi
import (
"context"
"database/sql"
+ "slices"
"time"
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"golang.org/x/mod/semver"
"golang.org/x/xerrors"
"google.golang.org/protobuf/types/known/timestamppb"
diff --git a/coderd/audit/audit.go b/coderd/audit/audit.go
index 097b0c6f49588..a965c27a004c6 100644
--- a/coderd/audit/audit.go
+++ b/coderd/audit/audit.go
@@ -2,11 +2,11 @@ package audit
import (
"context"
+ "slices"
"sync"
"testing"
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"github.com/coder/coder/v2/coderd/database"
)
diff --git a/coderd/database/db2sdk/db2sdk.go b/coderd/database/db2sdk/db2sdk.go
index 2249e0c9f32ec..53cd272b3235e 100644
--- a/coderd/database/db2sdk/db2sdk.go
+++ b/coderd/database/db2sdk/db2sdk.go
@@ -5,13 +5,13 @@ import (
"encoding/json"
"fmt"
"net/url"
+ "slices"
"sort"
"strconv"
"strings"
"time"
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"tailscale.com/tailcfg"
diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go
index a39ba8d4172f0..b09c629959392 100644
--- a/coderd/database/dbauthz/dbauthz.go
+++ b/coderd/database/dbauthz/dbauthz.go
@@ -5,13 +5,13 @@ import (
"database/sql"
"encoding/json"
"errors"
+ "slices"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/open-policy-agent/opa/topdown"
diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go
index cc559a7e77f16..125cca81e184f 100644
--- a/coderd/database/dbmem/dbmem.go
+++ b/coderd/database/dbmem/dbmem.go
@@ -10,6 +10,7 @@ import (
"math"
"reflect"
"regexp"
+ "slices"
"sort"
"strings"
"sync"
@@ -19,7 +20,6 @@ import (
"github.com/lib/pq"
"golang.org/x/exp/constraints"
"golang.org/x/exp/maps"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/coderd/notifications/types"
diff --git a/coderd/database/dbmetrics/dbmetrics.go b/coderd/database/dbmetrics/dbmetrics.go
index b0309f9f2e2eb..fbf4a3cae6931 100644
--- a/coderd/database/dbmetrics/dbmetrics.go
+++ b/coderd/database/dbmetrics/dbmetrics.go
@@ -2,11 +2,11 @@ package dbmetrics
import (
"context"
+ "slices"
"strconv"
"time"
"github.com/prometheus/client_golang/prometheus"
- "golang.org/x/exp/slices"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/database"
diff --git a/coderd/database/dbmetrics/querymetrics.go b/coderd/database/dbmetrics/querymetrics.go
index d05ec5f5acdf9..3855db4382751 100644
--- a/coderd/database/dbmetrics/querymetrics.go
+++ b/coderd/database/dbmetrics/querymetrics.go
@@ -5,11 +5,11 @@ package dbmetrics
import (
"context"
+ "slices"
"time"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
- "golang.org/x/exp/slices"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/database"
diff --git a/coderd/database/dbpurge/dbpurge_test.go b/coderd/database/dbpurge/dbpurge_test.go
index 3b21b1076cceb..2422bcc91dcfa 100644
--- a/coderd/database/dbpurge/dbpurge_test.go
+++ b/coderd/database/dbpurge/dbpurge_test.go
@@ -7,6 +7,7 @@ import (
"database/sql"
"encoding/json"
"fmt"
+ "slices"
"testing"
"time"
@@ -14,7 +15,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
- "golang.org/x/exp/slices"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
diff --git a/coderd/database/gentest/modelqueries_test.go b/coderd/database/gentest/modelqueries_test.go
index 52a99b54405ec..1025aaf324002 100644
--- a/coderd/database/gentest/modelqueries_test.go
+++ b/coderd/database/gentest/modelqueries_test.go
@@ -5,11 +5,11 @@ import (
"go/ast"
"go/parser"
"go/token"
+ "slices"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
)
// TestCustomQueriesSynced makes sure the manual custom queries in modelqueries.go
diff --git a/coderd/database/migrations/migrate_test.go b/coderd/database/migrations/migrate_test.go
index bd347af0be1ea..62e301a422e55 100644
--- a/coderd/database/migrations/migrate_test.go
+++ b/coderd/database/migrations/migrate_test.go
@@ -6,6 +6,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "slices"
"sync"
"testing"
@@ -17,7 +18,6 @@ import (
"github.com/lib/pq"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
- "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
diff --git a/coderd/debug.go b/coderd/debug.go
index a34e211ef00b9..0ae62282a22d8 100644
--- a/coderd/debug.go
+++ b/coderd/debug.go
@@ -7,10 +7,10 @@ import (
"encoding/json"
"fmt"
"net/http"
+ "slices"
"time"
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog"
diff --git a/coderd/devtunnel/servers.go b/coderd/devtunnel/servers.go
index 498ba74e42017..79be97db875ef 100644
--- a/coderd/devtunnel/servers.go
+++ b/coderd/devtunnel/servers.go
@@ -2,11 +2,11 @@ package devtunnel
import (
"runtime"
+ "slices"
"sync"
"time"
ping "github.com/prometheus-community/pro-bing"
- "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
diff --git a/coderd/entitlements/entitlements.go b/coderd/entitlements/entitlements.go
index e141a861a9045..6bbe32ade4a1b 100644
--- a/coderd/entitlements/entitlements.go
+++ b/coderd/entitlements/entitlements.go
@@ -4,10 +4,10 @@ import (
"context"
"encoding/json"
"net/http"
+ "slices"
"sync"
"time"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/codersdk"
diff --git a/coderd/healthcheck/database.go b/coderd/healthcheck/database.go
index 275124c5b1808..97b4783231acc 100644
--- a/coderd/healthcheck/database.go
+++ b/coderd/healthcheck/database.go
@@ -2,10 +2,9 @@ package healthcheck
import (
"context"
+ "slices"
"time"
- "golang.org/x/exp/slices"
-
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/healthcheck/health"
"github.com/coder/coder/v2/codersdk/healthsdk"
diff --git a/coderd/healthcheck/derphealth/derp.go b/coderd/healthcheck/derphealth/derp.go
index f74db243cbc18..fa24ebe7574c6 100644
--- a/coderd/healthcheck/derphealth/derp.go
+++ b/coderd/healthcheck/derphealth/derp.go
@@ -6,12 +6,12 @@ import (
"net"
"net/netip"
"net/url"
+ "slices"
"strings"
"sync"
"sync/atomic"
"time"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
diff --git a/coderd/httpmw/apikey_test.go b/coderd/httpmw/apikey_test.go
index c2e69eb7ae686..bd979e88235ad 100644
--- a/coderd/httpmw/apikey_test.go
+++ b/coderd/httpmw/apikey_test.go
@@ -9,6 +9,7 @@ import (
"net"
"net/http"
"net/http/httptest"
+ "slices"
"strings"
"sync/atomic"
"testing"
@@ -17,7 +18,6 @@ import (
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"golang.org/x/oauth2"
"github.com/coder/coder/v2/coderd/database"
diff --git a/coderd/idpsync/group_test.go b/coderd/idpsync/group_test.go
index 2baafd53ff03c..7fbfd3bfe4250 100644
--- a/coderd/idpsync/group_test.go
+++ b/coderd/idpsync/group_test.go
@@ -4,12 +4,12 @@ import (
"context"
"database/sql"
"regexp"
+ "slices"
"testing"
"github.com/golang-jwt/jwt/v4"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog/sloggers/slogtest"
diff --git a/coderd/idpsync/role.go b/coderd/idpsync/role.go
index 5cb0ac172581c..22e0edc3bc662 100644
--- a/coderd/idpsync/role.go
+++ b/coderd/idpsync/role.go
@@ -3,10 +3,10 @@ package idpsync
import (
"context"
"encoding/json"
+ "slices"
"github.com/golang-jwt/jwt/v4"
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog"
diff --git a/coderd/idpsync/role_test.go b/coderd/idpsync/role_test.go
index 45e9edd6c1dd4..7d686442144b1 100644
--- a/coderd/idpsync/role_test.go
+++ b/coderd/idpsync/role_test.go
@@ -3,13 +3,13 @@ package idpsync_test
import (
"context"
"encoding/json"
+ "slices"
"testing"
"github.com/golang-jwt/jwt/v4"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
- "golang.org/x/exp/slices"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/database"
diff --git a/coderd/insights.go b/coderd/insights.go
index 9c9fdcfa3c200..9f2bbf5d8b463 100644
--- a/coderd/insights.go
+++ b/coderd/insights.go
@@ -5,18 +5,17 @@ import (
"database/sql"
"fmt"
"net/http"
+ "slices"
"strings"
"time"
- "github.com/coder/coder/v2/coderd/database/dbtime"
-
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/db2sdk"
+ "github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/coderd/rbac/policy"
diff --git a/coderd/notifications_test.go b/coderd/notifications_test.go
index 2e8d851522744..d50464869298b 100644
--- a/coderd/notifications_test.go
+++ b/coderd/notifications_test.go
@@ -2,10 +2,10 @@ package coderd_test
import (
"net/http"
+ "slices"
"testing"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"github.com/coder/serpent"
diff --git a/coderd/prometheusmetrics/insights/metricscollector.go b/coderd/prometheusmetrics/insights/metricscollector.go
index 7dcf6025f2fa2..f7ecb06e962f0 100644
--- a/coderd/prometheusmetrics/insights/metricscollector.go
+++ b/coderd/prometheusmetrics/insights/metricscollector.go
@@ -2,12 +2,12 @@ package insights
import (
"context"
+ "slices"
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
- "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
diff --git a/coderd/provisionerdserver/acquirer.go b/coderd/provisionerdserver/acquirer.go
index 4c2fe6b1d49a9..a655edebfdd98 100644
--- a/coderd/provisionerdserver/acquirer.go
+++ b/coderd/provisionerdserver/acquirer.go
@@ -4,13 +4,13 @@ import (
"context"
"database/sql"
"encoding/json"
+ "slices"
"strings"
"sync"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog"
diff --git a/coderd/provisionerdserver/acquirer_test.go b/coderd/provisionerdserver/acquirer_test.go
index 6e4d6a4ff7e03..22794c72657cc 100644
--- a/coderd/provisionerdserver/acquirer_test.go
+++ b/coderd/provisionerdserver/acquirer_test.go
@@ -5,6 +5,7 @@ import (
"database/sql"
"encoding/json"
"fmt"
+ "slices"
"strings"
"sync"
"testing"
@@ -15,7 +16,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
- "golang.org/x/exp/slices"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbmem"
diff --git a/coderd/provisionerdserver/provisionerdserver.go b/coderd/provisionerdserver/provisionerdserver.go
index 3c9650ffc82e0..3c82a41d9323d 100644
--- a/coderd/provisionerdserver/provisionerdserver.go
+++ b/coderd/provisionerdserver/provisionerdserver.go
@@ -9,6 +9,7 @@ import (
"net/http"
"net/url"
"reflect"
+ "slices"
"sort"
"strconv"
"strings"
@@ -20,7 +21,6 @@ import (
semconv "go.opentelemetry.io/otel/semconv/v1.14.0"
"go.opentelemetry.io/otel/trace"
"golang.org/x/exp/maps"
- "golang.org/x/exp/slices"
"golang.org/x/oauth2"
"golang.org/x/xerrors"
protobuf "google.golang.org/protobuf/proto"
diff --git a/coderd/userpassword/userpassword.go b/coderd/userpassword/userpassword.go
index fa16a2c89edf4..2fb01a76d258f 100644
--- a/coderd/userpassword/userpassword.go
+++ b/coderd/userpassword/userpassword.go
@@ -7,12 +7,12 @@ import (
"encoding/base64"
"fmt"
"os"
+ "slices"
"strconv"
"strings"
passwordvalidator "github.com/wagslane/go-password-validator"
"golang.org/x/crypto/pbkdf2"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/coderd/util/lazy"
diff --git a/coderd/users_test.go b/coderd/users_test.go
index 74c27da7ef6f5..2d85a9823a587 100644
--- a/coderd/users_test.go
+++ b/coderd/users_test.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
+ "slices"
"strings"
"testing"
"time"
@@ -19,7 +20,6 @@ import (
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
diff --git a/coderd/workspaceagents.go b/coderd/workspaceagents.go
index ddfb21a751671..ff16735af9aea 100644
--- a/coderd/workspaceagents.go
+++ b/coderd/workspaceagents.go
@@ -9,6 +9,7 @@ import (
"io"
"net/http"
"net/url"
+ "slices"
"sort"
"strconv"
"strings"
@@ -17,7 +18,6 @@ import (
"github.com/google/uuid"
"github.com/sqlc-dev/pqtype"
"golang.org/x/exp/maps"
- "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"tailscale.com/tailcfg"
diff --git a/coderd/workspaceapps/db.go b/coderd/workspaceapps/db.go
index 1aa4dfe91bdd0..602983959948d 100644
--- a/coderd/workspaceapps/db.go
+++ b/coderd/workspaceapps/db.go
@@ -7,10 +7,10 @@ import (
"net/http"
"net/url"
"path"
+ "slices"
"strings"
"time"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/go-jose/go-jose/v4/jwt"
diff --git a/coderd/workspaceapps/stats_test.go b/coderd/workspaceapps/stats_test.go
index c2c722929ea83..51a6d9eebf169 100644
--- a/coderd/workspaceapps/stats_test.go
+++ b/coderd/workspaceapps/stats_test.go
@@ -2,6 +2,7 @@ package workspaceapps_test
import (
"context"
+ "slices"
"sync"
"sync/atomic"
"testing"
@@ -10,7 +11,6 @@ import (
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/coderd/database/dbtime"
diff --git a/coderd/workspacebuilds.go b/coderd/workspacebuilds.go
index 76166bfcb6164..735d6025dd16f 100644
--- a/coderd/workspacebuilds.go
+++ b/coderd/workspacebuilds.go
@@ -7,13 +7,13 @@ import (
"fmt"
"math"
"net/http"
+ "slices"
"sort"
"strconv"
"time"
"github.com/go-chi/chi/v5"
"github.com/google/uuid"
- "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
diff --git a/coderd/workspacebuilds_test.go b/coderd/workspacebuilds_test.go
index f6bfcfd2ead28..84efaa7ed0e23 100644
--- a/coderd/workspacebuilds_test.go
+++ b/coderd/workspacebuilds_test.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"net/http"
+ "slices"
"strconv"
"testing"
"time"
@@ -14,7 +15,6 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/propagation"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog"
diff --git a/codersdk/agentsdk/logs_internal_test.go b/codersdk/agentsdk/logs_internal_test.go
index 48149b83c497d..6333ffa19fbf5 100644
--- a/codersdk/agentsdk/logs_internal_test.go
+++ b/codersdk/agentsdk/logs_internal_test.go
@@ -2,12 +2,12 @@ package agentsdk
import (
"context"
+ "slices"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
protobuf "google.golang.org/protobuf/proto"
diff --git a/codersdk/agentsdk/logs_test.go b/codersdk/agentsdk/logs_test.go
index bb4948cb90dff..2b3b934c8db3c 100644
--- a/codersdk/agentsdk/logs_test.go
+++ b/codersdk/agentsdk/logs_test.go
@@ -4,13 +4,13 @@ import (
"context"
"fmt"
"net/http"
+ "slices"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/agentsdk"
diff --git a/codersdk/healthsdk/interfaces_internal_test.go b/codersdk/healthsdk/interfaces_internal_test.go
index 2996c6e1f09e3..f870e543166e1 100644
--- a/codersdk/healthsdk/interfaces_internal_test.go
+++ b/codersdk/healthsdk/interfaces_internal_test.go
@@ -3,11 +3,11 @@ package healthsdk
import (
"net"
"net/netip"
+ "slices"
"strings"
"testing"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"tailscale.com/net/interfaces"
"github.com/coder/coder/v2/coderd/healthcheck/health"
diff --git a/codersdk/provisionerdaemons.go b/codersdk/provisionerdaemons.go
index 2a9472f1cb36a..014a68bbce72e 100644
--- a/codersdk/provisionerdaemons.go
+++ b/codersdk/provisionerdaemons.go
@@ -7,13 +7,13 @@ import (
"io"
"net/http"
"net/http/cookiejar"
+ "slices"
"strings"
"time"
"github.com/google/uuid"
"github.com/hashicorp/yamux"
"golang.org/x/exp/maps"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/buildinfo"
diff --git a/enterprise/coderd/license/license_test.go b/enterprise/coderd/license/license_test.go
index ad7fc68f58600..b8b25b9535a2f 100644
--- a/enterprise/coderd/license/license_test.go
+++ b/enterprise/coderd/license/license_test.go
@@ -3,13 +3,13 @@ package license_test
import (
"context"
"fmt"
+ "slices"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbmem"
diff --git a/pty/ptytest/ptytest.go b/pty/ptytest/ptytest.go
index a871a0ddcafa0..3c86970ec0006 100644
--- a/pty/ptytest/ptytest.go
+++ b/pty/ptytest/ptytest.go
@@ -8,6 +8,7 @@ import (
"io"
"regexp"
"runtime"
+ "slices"
"strings"
"sync"
"testing"
@@ -16,7 +17,6 @@ import (
"github.com/acarl005/stripansi"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/pty"
diff --git a/scaletest/workspacetraffic/run_test.go b/scaletest/workspacetraffic/run_test.go
index 980e0d62ed21b..fe3fd389df082 100644
--- a/scaletest/workspacetraffic/run_test.go
+++ b/scaletest/workspacetraffic/run_test.go
@@ -7,6 +7,7 @@ import (
"net/http"
"net/http/httptest"
"runtime"
+ "slices"
"strings"
"sync"
"testing"
@@ -15,7 +16,6 @@ import (
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"github.com/coder/coder/v2/agent/agenttest"
"github.com/coder/coder/v2/coderd/coderdtest"
diff --git a/site/site.go b/site/site.go
index e2209b4052929..e0e9a1328508b 100644
--- a/site/site.go
+++ b/site/site.go
@@ -19,6 +19,7 @@ import (
"os"
"path"
"path/filepath"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -29,7 +30,6 @@ import (
"github.com/justinas/nosurf"
"github.com/klauspost/compress/zstd"
"github.com/unrolled/secure"
- "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/singleflight"
"golang.org/x/xerrors"
diff --git a/tailnet/node.go b/tailnet/node.go
index 858af3ad71e24..1077a7d69c44c 100644
--- a/tailnet/node.go
+++ b/tailnet/node.go
@@ -3,11 +3,11 @@ package tailnet
import (
"context"
"net/netip"
+ "slices"
"sync"
"time"
"golang.org/x/exp/maps"
- "golang.org/x/exp/slices"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/wgengine"
diff --git a/tailnet/node_internal_test.go b/tailnet/node_internal_test.go
index 7a2222536620c..0c04a668090d3 100644
--- a/tailnet/node_internal_test.go
+++ b/tailnet/node_internal_test.go
@@ -2,13 +2,13 @@ package tailnet
import (
"net/netip"
+ "slices"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"
- "golang.org/x/exp/slices"
"golang.org/x/xerrors"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
From ca23abcc3037aaa226ac3af35ae36756bdb7da8c Mon Sep 17 00:00:00 2001
From: Cian Johnston
Date: Mon, 3 Mar 2025 14:15:25 +0000
Subject: [PATCH 43/44] chore(cli): fix test flake in
TestSSH_Container/NotFound (#16771)
If you hit the list containers endpoint with no containers running, the
response is different. This uses a mock lister to ensure a consistent
response from the agent endpoint.
---
cli/ssh_test.go | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/cli/ssh_test.go b/cli/ssh_test.go
index 8a8d2d6ef3f6f..1fd4069ae3aea 100644
--- a/cli/ssh_test.go
+++ b/cli/ssh_test.go
@@ -29,6 +29,7 @@ import (
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
"golang.org/x/crypto/ssh"
gosshagent "golang.org/x/crypto/ssh/agent"
"golang.org/x/sync/errgroup"
@@ -36,6 +37,7 @@ import (
"github.com/coder/coder/v2/agent"
"github.com/coder/coder/v2/agent/agentcontainers"
+ "github.com/coder/coder/v2/agent/agentcontainers/acmock"
"github.com/coder/coder/v2/agent/agentssh"
"github.com/coder/coder/v2/agent/agenttest"
agentproto "github.com/coder/coder/v2/agent/proto"
@@ -1986,13 +1988,26 @@ func TestSSH_Container(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitShort)
client, workspace, agentToken := setupWorkspaceForAgent(t)
+ ctrl := gomock.NewController(t)
+ mLister := acmock.NewMockLister(ctrl)
_ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
o.ExperimentalDevcontainersEnabled = true
- o.ContainerLister = agentcontainers.NewDocker(o.Execer)
+ o.ContainerLister = mLister
})
_ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
- inv, root := clitest.New(t, "ssh", workspace.Name, "-c", uuid.NewString())
+ mLister.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
+ Containers: []codersdk.WorkspaceAgentDevcontainer{
+ {
+ ID: uuid.NewString(),
+ FriendlyName: "something_completely_different",
+ },
+ },
+ Warnings: nil,
+ }, nil)
+
+ cID := uuid.NewString()
+ inv, root := clitest.New(t, "ssh", workspace.Name, "-c", cID)
clitest.SetupConfig(t, client, root)
ptty := ptytest.New(t).Attach(inv)
@@ -2001,7 +2016,8 @@ func TestSSH_Container(t *testing.T) {
assert.NoError(t, err)
})
- ptty.ExpectMatch("Container not found:")
+ ptty.ExpectMatch(fmt.Sprintf("Container not found: %q", cID))
+ ptty.ExpectMatch("Available containers: [something_completely_different]")
<-cmdDone
})
From 7dc05ccd110ae2f0395de62dac06c5edb4acdaab Mon Sep 17 00:00:00 2001
From: Mathias Fredriksson
Date: Mon, 3 Mar 2025 15:57:17 +0000
Subject: [PATCH 44/44] feat: enable agent connection reports by default,
remove flag
---
agent/agent.go | 8 --------
agent/agent_test.go | 23 +++++------------------
cli/agent.go | 14 --------------
3 files changed, 5 insertions(+), 40 deletions(-)
diff --git a/agent/agent.go b/agent/agent.go
index c42bf3a815e18..acd959582280f 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -91,7 +91,6 @@ type Options struct {
Execer agentexec.Execer
ContainerLister agentcontainers.Lister
- ExperimentalConnectionReports bool
ExperimentalDevcontainersEnabled bool
}
@@ -196,7 +195,6 @@ func New(options Options) Agent {
lister: options.ContainerLister,
experimentalDevcontainersEnabled: options.ExperimentalDevcontainersEnabled,
- experimentalConnectionReports: options.ExperimentalConnectionReports,
}
// Initially, we have a closed channel, reflecting the fact that we are not initially connected.
// Each time we connect we replace the channel (while holding the closeMutex) with a new one
@@ -273,7 +271,6 @@ type agent struct {
lister agentcontainers.Lister
experimentalDevcontainersEnabled bool
- experimentalConnectionReports bool
}
func (a *agent) TailnetConn() *tailnet.Conn {
@@ -797,11 +794,6 @@ const (
)
func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_Type, ip string) (disconnected func(code int, reason string)) {
- // If the experiment hasn't been enabled, we don't report connections.
- if !a.experimentalConnectionReports {
- return func(int, string) {} // Noop.
- }
-
// Remove the port from the IP because ports are not supported in coderd.
if host, _, err := net.SplitHostPort(ip); err != nil {
a.logger.Error(a.hardCtx, "split host and port for connection report failed", slog.F("ip", ip), slog.Error(err))
diff --git a/agent/agent_test.go b/agent/agent_test.go
index 44112b6524fc9..d6c8e4d97644c 100644
--- a/agent/agent_test.go
+++ b/agent/agent_test.go
@@ -173,9 +173,7 @@ func TestAgent_Stats_Magic(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
//nolint:dogsled
- conn, agentClient, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
- o.ExperimentalConnectionReports = true
- })
+ conn, agentClient, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
defer sshClient.Close()
@@ -243,9 +241,7 @@ func TestAgent_Stats_Magic(t *testing.T) {
remotePort := sc.Text()
//nolint:dogsled
- conn, agentClient, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
- o.ExperimentalConnectionReports = true
- })
+ conn, agentClient, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
@@ -960,9 +956,7 @@ func TestAgent_SFTP(t *testing.T) {
home = "/" + strings.ReplaceAll(home, "\\", "/")
}
//nolint:dogsled
- conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
- o.ExperimentalConnectionReports = true
- })
+ conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
defer sshClient.Close()
@@ -998,9 +992,7 @@ func TestAgent_SCP(t *testing.T) {
defer cancel()
//nolint:dogsled
- conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
- o.ExperimentalConnectionReports = true
- })
+ conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
defer sshClient.Close()
@@ -1043,7 +1035,6 @@ func TestAgent_FileTransferBlocked(t *testing.T) {
//nolint:dogsled
conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
o.BlockFileTransfer = true
- o.ExperimentalConnectionReports = true
})
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
@@ -1064,7 +1055,6 @@ func TestAgent_FileTransferBlocked(t *testing.T) {
//nolint:dogsled
conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
o.BlockFileTransfer = true
- o.ExperimentalConnectionReports = true
})
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
@@ -1093,7 +1083,6 @@ func TestAgent_FileTransferBlocked(t *testing.T) {
//nolint:dogsled
conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
o.BlockFileTransfer = true
- o.ExperimentalConnectionReports = true
})
sshClient, err := conn.SSHClient(ctx)
require.NoError(t, err)
@@ -1724,9 +1713,7 @@ func TestAgent_ReconnectingPTY(t *testing.T) {
defer cancel()
//nolint:dogsled
- conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
- o.ExperimentalConnectionReports = true
- })
+ conn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
id := uuid.New()
// Test that the connection is reported. This must be tested in the
diff --git a/cli/agent.go b/cli/agent.go
index 5466ba9a5bc67..0a9031aed57c1 100644
--- a/cli/agent.go
+++ b/cli/agent.go
@@ -54,7 +54,6 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
agentHeaderCommand string
agentHeader []string
- experimentalConnectionReports bool
experimentalDevcontainersEnabled bool
)
cmd := &serpent.Command{
@@ -327,10 +326,6 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
containerLister = agentcontainers.NewDocker(execer)
}
- if experimentalConnectionReports {
- logger.Info(ctx, "experimental connection reports enabled")
- }
-
agnt := agent.New(agent.Options{
Client: client,
Logger: logger,
@@ -359,7 +354,6 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
ContainerLister: containerLister,
ExperimentalDevcontainersEnabled: experimentalDevcontainersEnabled,
- ExperimentalConnectionReports: experimentalConnectionReports,
})
promHandler := agent.PrometheusMetricsHandler(prometheusRegistry, logger)
@@ -489,14 +483,6 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
Description: "Allow the agent to automatically detect running devcontainers.",
Value: serpent.BoolOf(&experimentalDevcontainersEnabled),
},
- {
- Flag: "experimental-connection-reports-enable",
- Hidden: true,
- Default: "false",
- Env: "CODER_AGENT_EXPERIMENTAL_CONNECTION_REPORTS_ENABLE",
- Description: "Enable experimental connection reports.",
- Value: serpent.BoolOf(&experimentalConnectionReports),
- },
}
return cmd