Skip to content

Commit eb0497f

Browse files
authored
feat: fetch proxy latencies at most once per 30s (#8277)
* feat: fetch proxy latencies at most once per 30s
1 parent f0bd258 commit eb0497f

File tree

1 file changed

+42
-2
lines changed

1 file changed

+42
-2
lines changed

site/src/contexts/useProxyLatency.ts

Lines changed: 42 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ import PerformanceObserver from "@fastly/performance-observer-polyfill"
44
import axios from "axios"
55
import { generateRandomString } from "utils/random"
66

7+
const proxyIntervalSeconds = 30 // seconds
8+
79
export interface ProxyLatencyReport {
810
// accurate identifies if the latency was calculated using the
911
// PerformanceResourceTiming API. If this is false, then the
@@ -17,6 +19,8 @@ export interface ProxyLatencyReport {
1719

1820
interface ProxyLatencyAction {
1921
proxyID: string
22+
// cached indicates if the latency was loaded from a cache (local storage)
23+
cached: boolean
2024
report: ProxyLatencyReport
2125
}
2226

@@ -59,8 +63,13 @@ export const useProxyLatency = (
5963

6064
// This latestFetchRequest is used to trigger a refetch of the proxy latencies.
6165
const [latestFetchRequest, setLatestFetchRequest] = useState(
62-
new Date().toISOString(),
66+
// The initial state is the current time minus the interval. Any proxies that have a latency after this
67+
// in the cache are still valid.
68+
new Date(new Date().getTime() - proxyIntervalSeconds * 1000).toISOString(),
6369
)
70+
71+
// Refetch will always set the latestFetchRequest to the current time, making all the cached latencies
72+
// stale and triggering a refetch of all proxies in the list.
6473
const refetch = () => {
6574
const d = new Date()
6675
setLatestFetchRequest(d.toISOString())
@@ -73,6 +82,8 @@ export const useProxyLatency = (
7382
return
7483
}
7584

85+
const storedLatencies = loadStoredLatencies()
86+
7687
// proxyMap is a map of the proxy path_app_url to the proxy object.
7788
// This is for the observer to know which requests are important to
7889
// record.
@@ -82,6 +93,28 @@ export const useProxyLatency = (
8293
return acc
8394
}
8495

96+
// Do not run latency checks if a cached check exists below the latestFetchRequest Date.
97+
// This prevents fetching latencies too often.
98+
// 1. Fetch the latest stored latency for the given proxy.
99+
// 2. If the latest latency is after the latestFetchRequest, then skip the latency check.
100+
if (storedLatencies && storedLatencies[proxy.id]) {
101+
const fetchRequestDate = new Date(latestFetchRequest)
102+
const latest = storedLatencies[proxy.id].reduce((prev, next) =>
103+
prev.at > next.at ? prev : next,
104+
)
105+
106+
if (latest && latest.at > fetchRequestDate) {
107+
// dispatch the cached latency. This latency already went through the
108+
// guard logic below, so we can just dispatch it again directly.
109+
dispatchProxyLatencies({
110+
proxyID: proxy.id,
111+
cached: true,
112+
report: latest,
113+
})
114+
return acc
115+
}
116+
}
117+
85118
// Add a random query param to the url to make sure we don't get a cached response.
86119
// This is important in case there is some caching layer between us and the proxy.
87120
const url = new URL(
@@ -131,6 +164,7 @@ export const useProxyLatency = (
131164
}
132165
const update = {
133166
proxyID: check.id,
167+
cached: false,
134168
report: {
135169
latencyMS,
136170
accurate,
@@ -203,7 +237,13 @@ const loadStoredLatencies = (): Record<string, ProxyLatencyReport[]> => {
203237
return {}
204238
}
205239

206-
return JSON.parse(str)
240+
return JSON.parse(str, (key, value) => {
241+
// By default json loads dates as strings. We want to convert them back to 'Date's.
242+
if (key === "at") {
243+
return new Date(value)
244+
}
245+
return value
246+
})
207247
}
208248

209249
const updateStoredLatencies = (action: ProxyLatencyAction): void => {

0 commit comments

Comments
 (0)