Skip to content

feat: Workspace Proxy picker show latency to each proxy #7486

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 23 commits into from
May 11, 2023
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions coderd/coderd.go
Original file line number Diff line number Diff line change
Expand Up @@ -805,6 +805,17 @@ func New(options *Options) *API {
return []string{}
})
r.NotFound(cspMW(compressHandler(http.HandlerFunc(api.siteHandler.ServeHTTP))).ServeHTTP)

// This must be before all middleware to improve the response time.
// So make a new router, and mount the old one as the root.
rootRouter := chi.NewRouter()
// This is the only route we add before all the middleware.
// We want to time the latency of the request, so any middleware will
// interfere with that timing.
rootRouter.Get("/latency-check", LatencyCheck(api.AccessURL))
rootRouter.Mount("/", r)
api.RootHandler = rootRouter

return api
}

Expand Down
9 changes: 9 additions & 0 deletions coderd/coderd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,15 @@ func TestDERPLatencyCheck(t *testing.T) {
require.Equal(t, http.StatusOK, res.StatusCode)
}

func TestFastLatencyCheck(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, nil)
res, err := client.Request(context.Background(), http.MethodGet, "/latency-check", nil)
require.NoError(t, err)
defer res.Body.Close()
require.Equal(t, http.StatusOK, res.StatusCode)
}

func TestHealthz(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, nil)
Expand Down
24 changes: 24 additions & 0 deletions coderd/latencycheck.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
package coderd

import (
"net/http"
"net/url"
"strings"
)

func LatencyCheck(allowedOrigins ...*url.URL) http.HandlerFunc {
allowed := make([]string, 0, len(allowedOrigins))
for _, origin := range allowedOrigins {
// Allow the origin without a path
tmp := *origin
tmp.Path = ""
allowed = append(allowed, strings.TrimSuffix(origin.String(), "/"))
}
origins := strings.Join(allowed, ",")
return func(rw http.ResponseWriter, r *http.Request) {
// Allowing timing information to be shared. This allows the browser
// to exclude TLS handshake timing.
rw.Header().Set("Timing-Allow-Origin", origins)
rw.WriteHeader(http.StatusOK)
}
}
2 changes: 1 addition & 1 deletion enterprise/coderd/workspaceproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ func (api *API) workspaceProxyRegister(rw http.ResponseWriter, r *http.Request)
// Log: api.Logger,
// Request: r,
// Action: database.AuditActionWrite,
//})
// })
)
// aReq.Old = proxy
// defer commitAudit()
Expand Down
38 changes: 24 additions & 14 deletions enterprise/wsproxy/wsproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (

"cdr.dev/slog"
"github.com/coder/coder/buildinfo"
"github.com/coder/coder/coderd"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/coderd/httpmw"
"github.com/coder/coder/coderd/tracing"
Expand Down Expand Up @@ -186,6 +187,21 @@ func New(ctx context.Context, opts *Options) (*Server, error) {
SecureAuthCookie: opts.SecureAuthCookie,
}

// The primary coderd dashboard needs to make some GET requests to
// the workspace proxies to check latency.
corsMW := cors.Handler(cors.Options{
AllowedOrigins: []string{
// Allow the dashboard to make requests to the proxy for latency
// checks.
opts.DashboardURL.String(),
},
// Only allow GET requests for latency checks.
AllowedMethods: []string{http.MethodOptions, http.MethodGet},
AllowedHeaders: []string{"Accept", "Content-Type", "X-LATENCY-CHECK", "X-CSRF-TOKEN"},
// Do not send any cookies
AllowCredentials: false,
})

// Routes
apiRateLimiter := httpmw.RateLimit(opts.APIRateLimit, time.Minute)
// Persistent middlewares to all routes
Expand All @@ -198,20 +214,7 @@ func New(ctx context.Context, opts *Options) (*Server, error) {
httpmw.ExtractRealIP(s.Options.RealIPConfig),
httpmw.Logger(s.Logger),
httpmw.Prometheus(s.PrometheusRegistry),
// The primary coderd dashboard needs to make some GET requests to
// the workspace proxies to check latency.
cors.Handler(cors.Options{
AllowedOrigins: []string{
// Allow the dashboard to make requests to the proxy for latency
// checks.
opts.DashboardURL.String(),
},
// Only allow GET requests for latency checks.
AllowedMethods: []string{http.MethodGet},
AllowedHeaders: []string{"Accept", "Content-Type"},
// Do not send any cookies
AllowCredentials: false,
}),
corsMW,

// HandleSubdomain is a middleware that handles all requests to the
// subdomain-based workspace apps.
Expand Down Expand Up @@ -260,6 +263,13 @@ func New(ctx context.Context, opts *Options) (*Server, error) {
})
})

// See coderd/coderd.go for why we need this.
rootRouter := chi.NewRouter()
// Make sure to add the cors middleware to the latency check route.
rootRouter.Get("/latency-check", corsMW(coderd.LatencyCheck(s.DashboardURL, s.AppServer.AccessURL)).ServeHTTP)
rootRouter.Mount("/", r)
s.Handler = rootRouter

return s, nil
}

Expand Down
24 changes: 24 additions & 0 deletions site/jest.setup.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,33 @@ import "jest-location-mock"
import { TextEncoder, TextDecoder } from "util"
import { Blob } from "buffer"
import jestFetchMock from "jest-fetch-mock"
import { ProxyLatencyReport } from "contexts/useProxyLatency"
import { RegionsResponse } from "api/typesGenerated"

jestFetchMock.enableMocks()

// useProxyLatency does some http requests to determine latency.
// This would fail unit testing, or at least make it very slow with
// actual network requests. So just globally mock this hook.
jest.mock("contexts/useProxyLatency", () => ({
useProxyLatency: (proxies?: RegionsResponse) => {
if (!proxies) {
return {} as Record<string, ProxyLatencyReport>
}

return proxies.regions.reduce((acc, proxy) => {
acc[proxy.id] = {
accurate: true,
// Return a constant latency of 8ms.
// If you make this random it could break stories.
latencyMS: 8,
at: new Date(),
}
return acc
}, {} as Record<string, ProxyLatencyReport>)
},
}))

global.TextEncoder = TextEncoder
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- Polyfill for jsdom
global.TextDecoder = TextDecoder as any
Expand Down
1 change: 1 addition & 0 deletions site/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
"dependencies": {
"@emoji-mart/data": "1.0.5",
"@emoji-mart/react": "1.0.1",
"@fastly/performance-observer-polyfill": "^2.0.0",
"@emotion/react": "^11.10.8",
"@emotion/styled": "^11.10.8",
"@fontsource/ibm-plex-mono": "4.5.10",
Expand Down
13 changes: 3 additions & 10 deletions site/src/components/Resources/AgentLatency.tsx
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import { useRef, useState, FC } from "react"
import { makeStyles, useTheme } from "@mui/styles"
import { Theme } from "@mui/material/styles"
import {
HelpTooltipText,
HelpPopover,
HelpTooltipTitle,
} from "components/Tooltips/HelpTooltip"
import { Stack } from "components/Stack/Stack"
import { WorkspaceAgent, DERPRegion } from "api/typesGenerated"
import { Theme } from "@mui/material/styles"
import { getLatencyColor } from "utils/colors"

const getDisplayLatency = (theme: Theme, agent: WorkspaceAgent) => {
// Find the right latency to display
Expand All @@ -22,17 +23,9 @@ const getDisplayLatency = (theme: Theme, agent: WorkspaceAgent) => {
return undefined
}

// Get the color
let color = theme.palette.success.light
if (latency.latency_ms >= 150 && latency.latency_ms < 300) {
color = theme.palette.warning.light
} else if (latency.latency_ms >= 300) {
color = theme.palette.error.light
}

return {
...latency,
color,
color: getLatencyColor(theme, latency.latency_ms),
}
}

Expand Down
7 changes: 7 additions & 0 deletions site/src/contexts/ProxyContext.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,12 @@ import {
useContext,
useState,
} from "react"
import { ProxyLatencyReport, useProxyLatency } from "./useProxyLatency"

interface ProxyContextValue {
proxy: PreferredProxy
proxies?: Region[]
proxyLatencies?: Record<string, ProxyLatencyReport>
// isfetched is true when the proxy api call is complete.
isFetched: boolean
// isLoading is true if the proxy is in the process of being fetched.
Expand Down Expand Up @@ -72,6 +74,10 @@ export const ProxyProvider: FC<PropsWithChildren> = ({ children }) => {
},
})

// Everytime we get a new proxiesResponse, update the latency check
// to each workspace proxy.
const proxyLatencies = useProxyLatency(proxiesResp)

const setAndSaveProxy = (
selectedProxy?: Region,
// By default the proxies come from the api call above.
Expand All @@ -95,6 +101,7 @@ export const ProxyProvider: FC<PropsWithChildren> = ({ children }) => {
return (
<ProxyContext.Provider
value={{
proxyLatencies: proxyLatencies,
proxy: experimentEnabled
? proxy
: {
Expand Down
152 changes: 152 additions & 0 deletions site/src/contexts/useProxyLatency.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
import { Region, RegionsResponse } from "api/typesGenerated"
import { useEffect, useReducer } from "react"
import PerformanceObserver from "@fastly/performance-observer-polyfill"
import axios from "axios"
import { generateRandomString } from "utils/random"

export interface ProxyLatencyReport {
// accurate identifies if the latency was calculated using the
// PerformanceResourceTiming API. If this is false, then the
// latency is calculated using the total duration of the request
// and will be off by a good margin.
accurate: boolean
latencyMS: number
// at is when the latency was recorded.
at: Date
}

interface ProxyLatencyAction {
proxyID: string
report: ProxyLatencyReport
}

const proxyLatenciesReducer = (
state: Record<string, ProxyLatencyReport>,
action: ProxyLatencyAction,
): Record<string, ProxyLatencyReport> => {
// Just overwrite any existing latency.
state[action.proxyID] = action.report
return state
}

export const useProxyLatency = (
proxies?: RegionsResponse,
): Record<string, ProxyLatencyReport> => {
const [proxyLatencies, dispatchProxyLatencies] = useReducer(
proxyLatenciesReducer,
{},
)

// Only run latency updates when the proxies change.
useEffect(() => {
if (!proxies) {
return
}

// proxyMap is a map of the proxy path_app_url to the proxy object.
// This is for the observer to know which requests are important to
// record.
const proxyChecks = proxies.regions.reduce((acc, proxy) => {
// Only run the latency check on healthy proxies.
if (!proxy.healthy) {
return acc
}

// Add a random query param to the url to make sure we don't get a cached response.
// This is important in case there is some caching layer between us and the proxy.
const url = new URL(
`/latency-check?cache_bust=${generateRandomString(6)}`,
proxy.path_app_url,
)
acc[url.toString()] = proxy
return acc
}, {} as Record<string, Region>)

// dispatchProxyLatenciesGuarded will assign the latency to the proxy
// via the reducer. But it will only do so if the performance entry is
// a resource entry that we care about.
const dispatchProxyLatenciesGuarded = (entry: PerformanceEntry): void => {
if (entry.entryType !== "resource") {
// We should never get these, but just in case.
return
}

// The entry.name is the url of the request.
const check = proxyChecks[entry.name]
if (!check) {
// This is not a proxy request, so ignore it.
return
}

// These docs are super useful.
// https://developer.mozilla.org/en-US/docs/Web/API/Performance_API/Resource_timing
let latencyMS = 0
let accurate = false
if (
"requestStart" in entry &&
(entry as PerformanceResourceTiming).requestStart !== 0
) {
// This is the preferred logic to get the latency.
const timingEntry = entry as PerformanceResourceTiming
latencyMS = timingEntry.responseStart - timingEntry.requestStart
accurate = true
} else {
// This is the total duration of the request and will be off by a good margin.
// This is a fallback if the better timing is not available.
// eslint-disable-next-line no-console -- We can remove this when we display the "accurate" bool on the UI
console.log(
`Using fallback latency calculation for "${entry.name}". Latency will be incorrect and larger then actual.`,
)
latencyMS = entry.duration
}
dispatchProxyLatencies({
proxyID: check.id,
report: {
latencyMS,
accurate,
at: new Date(),
},
})

return
}

// Start a new performance observer to record of all the requests
// to the proxies.
const observer = new PerformanceObserver((list) => {
// If we get entries via this callback, then dispatch the events to the latency reducer.
list.getEntries().forEach((entry) => {
dispatchProxyLatenciesGuarded(entry)
})
})

// The resource requests include xmlhttp requests.
observer.observe({ entryTypes: ["resource"] })

const proxyRequests = Object.keys(proxyChecks).map((latencyURL) => {
return axios.get(latencyURL, {
withCredentials: false,
// Must add a custom header to make the request not a "simple request"
// https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#simple_requests
headers: { "X-LATENCY-CHECK": "true" },
})
})

// When all the proxy requests finish
Promise.all(proxyRequests)
// TODO: If there is an error on any request, we might want to store some indicator of that?
.finally(() => {
// takeRecords will return any entries that were not called via the callback yet.
// We want to call this before we disconnect the observer to make sure we get all the
// proxy requests recorded.
observer.takeRecords().forEach((entry) => {
dispatchProxyLatenciesGuarded(entry)
})
// At this point, we can be confident that all the proxy requests have been recorded
// via the performance observer. So we can disconnect the observer.
observer.disconnect()
})
}, [proxies])

return proxyLatencies
}
Loading