diff --git a/LICENSE b/LICENSE
index 6a66aea5ea..2a7cf70da6 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/README.md b/README.md
index a15f253dff..235c8d8b30 100644
--- a/README.md
+++ b/README.md
@@ -2,17 +2,15 @@
[](https://pkg.go.dev/golang.org/x/net)
-This repository holds supplementary Go networking libraries.
+This repository holds supplementary Go networking packages.
-## Download/Install
+## Report Issues / Send Patches
-The easiest way to install is to run `go get -u golang.org/x/net`. You can
-also manually git clone the repository to `$GOPATH/src/golang.org/x/net`.
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://go.dev/doc/contribute.
-## Report Issues / Send Patches
+The git repository is https://go.googlesource.com/net.
-This repository uses Gerrit for code changes. To learn how to submit
-changes to this repository, see https://golang.org/doc/contribute.html.
The main issue tracker for the net repository is located at
-https://github.com/golang/go/issues. Prefix your issue with "x/net:" in the
+https://go.dev/issues. Prefix your issue with "x/net:" in the
subject line, so it is easy to find.
diff --git a/bpf/instructions_test.go b/bpf/instructions_test.go
index 69b25c5415..f5111c66fe 100644
--- a/bpf/instructions_test.go
+++ b/bpf/instructions_test.go
@@ -6,7 +6,7 @@ package bpf
import (
"fmt"
- "io/ioutil"
+ "os"
"reflect"
"strconv"
"strings"
@@ -98,7 +98,7 @@ func TestInterop(t *testing.T) {
}
t.Logf("Assembled program is %d instructions long", len(out))
- bs, err := ioutil.ReadFile(allInstructionsExpected)
+ bs, err := os.ReadFile(allInstructionsExpected)
if err != nil {
t.Fatalf("reading %s: %s", allInstructionsExpected, err)
}
diff --git a/context/ctxhttp/ctxhttp_test.go b/context/ctxhttp/ctxhttp_test.go
index d585f117f0..ace33f2002 100644
--- a/context/ctxhttp/ctxhttp_test.go
+++ b/context/ctxhttp/ctxhttp_test.go
@@ -9,7 +9,6 @@ package ctxhttp
import (
"context"
"io"
- "io/ioutil"
"net/http"
"net/http/httptest"
"testing"
@@ -49,7 +48,7 @@ func TestNoTimeout(t *testing.T) {
t.Fatal(err)
}
defer res.Body.Close()
- slurp, err := ioutil.ReadAll(res.Body)
+ slurp, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
@@ -102,7 +101,7 @@ func TestCancelAfterHangingRequest(t *testing.T) {
done := make(chan struct{})
go func() {
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
if len(b) != 0 || err == nil {
t.Errorf(`Read got (%q, %v); want ("", error)`, b, err)
}
diff --git a/dns/dnsmessage/message_test.go b/dns/dnsmessage/message_test.go
index 2555305980..1fa93e63ad 100644
--- a/dns/dnsmessage/message_test.go
+++ b/dns/dnsmessage/message_test.go
@@ -7,7 +7,7 @@ package dnsmessage
import (
"bytes"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"reflect"
"strings"
@@ -1611,7 +1611,7 @@ func TestNoFmt(t *testing.T) {
// Could use something complex like go/build or x/tools/go/packages,
// but there's no reason for "fmt" to appear (in quotes) in the source
// otherwise, so just use a simple substring search.
- data, err := ioutil.ReadFile(file)
+ data, err := os.ReadFile(file)
if err != nil {
t.Fatal(err)
}
diff --git a/go.mod b/go.mod
index 36207106dc..26852e7822 100644
--- a/go.mod
+++ b/go.mod
@@ -3,8 +3,8 @@ module golang.org/x/net
go 1.18
require (
- golang.org/x/crypto v0.21.0
- golang.org/x/sys v0.18.0
- golang.org/x/term v0.18.0
- golang.org/x/text v0.14.0
+ golang.org/x/crypto v0.31.0
+ golang.org/x/sys v0.28.0
+ golang.org/x/term v0.27.0
+ golang.org/x/text v0.21.0
)
diff --git a/go.sum b/go.sum
index 69fb104980..43b5f03464 100644
--- a/go.sum
+++ b/go.sum
@@ -1,8 +1,8 @@
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
diff --git a/html/atom/gen.go b/html/atom/gen.go
index 5d85c604d1..1e249d163c 100644
--- a/html/atom/gen.go
+++ b/html/atom/gen.go
@@ -14,7 +14,6 @@ import (
"flag"
"fmt"
"go/format"
- "io/ioutil"
"math/rand"
"os"
"sort"
@@ -48,7 +47,7 @@ func genFile(name string, buf *bytes.Buffer) {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
- if err := ioutil.WriteFile(name, b, 0644); err != nil {
+ if err := os.WriteFile(name, b, 0644); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
diff --git a/html/charset/charset_test.go b/html/charset/charset_test.go
index b71eb43f70..c2f62445c7 100644
--- a/html/charset/charset_test.go
+++ b/html/charset/charset_test.go
@@ -7,7 +7,8 @@ package charset
import (
"bytes"
"encoding/xml"
- "io/ioutil"
+ "io"
+ "os"
"runtime"
"strings"
"testing"
@@ -17,7 +18,7 @@ import (
func transformString(t transform.Transformer, s string) (string, error) {
r := transform.NewReader(strings.NewReader(s), t)
- b, err := ioutil.ReadAll(r)
+ b, err := io.ReadAll(r)
return string(b), err
}
@@ -142,7 +143,7 @@ func TestSniff(t *testing.T) {
}
for _, tc := range sniffTestCases {
- content, err := ioutil.ReadFile("testdata/" + tc.filename)
+ content, err := os.ReadFile("testdata/" + tc.filename)
if err != nil {
t.Errorf("%s: error reading file: %v", tc.filename, err)
continue
@@ -163,7 +164,7 @@ func TestReader(t *testing.T) {
}
for _, tc := range sniffTestCases {
- content, err := ioutil.ReadFile("testdata/" + tc.filename)
+ content, err := os.ReadFile("testdata/" + tc.filename)
if err != nil {
t.Errorf("%s: error reading file: %v", tc.filename, err)
continue
@@ -175,14 +176,14 @@ func TestReader(t *testing.T) {
continue
}
- got, err := ioutil.ReadAll(r)
+ got, err := io.ReadAll(r)
if err != nil {
t.Errorf("%s: error reading from charset.NewReader: %v", tc.filename, err)
continue
}
e, _ := Lookup(tc.want)
- want, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder()))
+ want, err := io.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder()))
if err != nil {
t.Errorf("%s: error decoding with hard-coded charset name: %v", tc.filename, err)
continue
diff --git a/html/doc.go b/html/doc.go
index 2466ae3d9a..885c4c5936 100644
--- a/html/doc.go
+++ b/html/doc.go
@@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order:
if err != nil {
// ...
}
- var f func(*html.Node)
- f = func(n *html.Node) {
+ for n := range doc.Descendants() {
if n.Type == html.ElementNode && n.Data == "a" {
// Do something with n...
}
- for c := n.FirstChild; c != nil; c = c.NextSibling {
- f(c)
- }
}
- f(doc)
The relevant specifications include:
https://html.spec.whatwg.org/multipage/syntax.html and
@@ -104,7 +99,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML
parsing specification respectively. While the tokenizer parses and normalizes
individual HTML tokens, only the parser constructs the DOM tree from the
tokenized HTML, as described in the tree construction stage of the
-specification, dynamically modifying or extending the docuemnt's DOM tree.
+specification, dynamically modifying or extending the document's DOM tree.
If your use case requires semantically well-formed HTML documents, as defined by
the WHATWG specification, the parser should be used rather than the tokenizer.
diff --git a/html/doctype.go b/html/doctype.go
index c484e5a94f..bca3ae9a0c 100644
--- a/html/doctype.go
+++ b/html/doctype.go
@@ -87,7 +87,7 @@ func parseDoctype(s string) (n *Node, quirks bool) {
}
}
if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
- strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
+ strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") {
quirks = true
}
}
diff --git a/html/example_test.go b/html/example_test.go
index 0b06ed7730..830f0b27af 100644
--- a/html/example_test.go
+++ b/html/example_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build go1.23
+
// This example demonstrates parsing HTML data and walking the resulting tree.
package html_test
@@ -11,6 +13,7 @@ import (
"strings"
"golang.org/x/net/html"
+ "golang.org/x/net/html/atom"
)
func ExampleParse() {
@@ -19,9 +22,8 @@ func ExampleParse() {
if err != nil {
log.Fatal(err)
}
- var f func(*html.Node)
- f = func(n *html.Node) {
- if n.Type == html.ElementNode && n.Data == "a" {
+ for n := range doc.Descendants() {
+ if n.Type == html.ElementNode && n.DataAtom == atom.A {
for _, a := range n.Attr {
if a.Key == "href" {
fmt.Println(a.Val)
@@ -29,11 +31,8 @@ func ExampleParse() {
}
}
}
- for c := n.FirstChild; c != nil; c = c.NextSibling {
- f(c)
- }
}
- f(doc)
+
// Output:
// foo
// /bar/baz
diff --git a/html/foreign.go b/html/foreign.go
index 9da9e9dc42..e8515d8e88 100644
--- a/html/foreign.go
+++ b/html/foreign.go
@@ -40,8 +40,7 @@ func htmlIntegrationPoint(n *Node) bool {
if n.Data == "annotation-xml" {
for _, a := range n.Attr {
if a.Key == "encoding" {
- val := strings.ToLower(a.Val)
- if val == "text/html" || val == "application/xhtml+xml" {
+ if strings.EqualFold(a.Val, "text/html") || strings.EqualFold(a.Val, "application/xhtml+xml") {
return true
}
}
diff --git a/html/iter.go b/html/iter.go
new file mode 100644
index 0000000000..54be8fd30f
--- /dev/null
+++ b/html/iter.go
@@ -0,0 +1,56 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.23
+
+package html
+
+import "iter"
+
+// Ancestors returns an iterator over the ancestors of n, starting with n.Parent.
+//
+// Mutating a Node or its parents while iterating may have unexpected results.
+func (n *Node) Ancestors() iter.Seq[*Node] {
+ _ = n.Parent // eager nil check
+
+ return func(yield func(*Node) bool) {
+ for p := n.Parent; p != nil && yield(p); p = p.Parent {
+ }
+ }
+}
+
+// ChildNodes returns an iterator over the immediate children of n,
+// starting with n.FirstChild.
+//
+// Mutating a Node or its children while iterating may have unexpected results.
+func (n *Node) ChildNodes() iter.Seq[*Node] {
+ _ = n.FirstChild // eager nil check
+
+ return func(yield func(*Node) bool) {
+ for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling {
+ }
+ }
+
+}
+
+// Descendants returns an iterator over all nodes recursively beneath
+// n, excluding n itself. Nodes are visited in depth-first preorder.
+//
+// Mutating a Node or its descendants while iterating may have unexpected results.
+func (n *Node) Descendants() iter.Seq[*Node] {
+ _ = n.FirstChild // eager nil check
+
+ return func(yield func(*Node) bool) {
+ n.descendants(yield)
+ }
+}
+
+func (n *Node) descendants(yield func(*Node) bool) bool {
+ for c := range n.ChildNodes() {
+ if !yield(c) || !c.descendants(yield) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/html/iter_test.go b/html/iter_test.go
new file mode 100644
index 0000000000..cca7f82f54
--- /dev/null
+++ b/html/iter_test.go
@@ -0,0 +1,96 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.23
+
+package html
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestNode_ChildNodes(t *testing.T) {
+ tests := []struct {
+ in string
+ want string
+ }{
+ {"", ""},
+ {" ", "a"},
+ {"a", "a"},
+ {" ", "a b"},
+ {"a c", "a b c"},
+ {"a d", "a b d"},
+ {"ce fi ", "a f g h"},
+ }
+ for _, test := range tests {
+ doc, err := Parse(strings.NewReader(test.in))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Drill to
+ n := doc.FirstChild.FirstChild.NextSibling
+ var results []string
+ for c := range n.ChildNodes() {
+ results = append(results, c.Data)
+ }
+ if got := strings.Join(results, " "); got != test.want {
+ t.Errorf("ChildNodes = %q, want %q", got, test.want)
+ }
+ }
+}
+
+func TestNode_Descendants(t *testing.T) {
+ tests := []struct {
+ in string
+ want string
+ }{
+ {"", ""},
+ {" ", "a"},
+ {" ", "a b"},
+ {"b ", "a b"},
+ {" ", "a b"},
+ {"b d ", "a b c d"},
+ {"b e ", "a b c d e"},
+ {"df gj ", "a b c d e f g h i j"},
+ }
+ for _, test := range tests {
+ doc, err := Parse(strings.NewReader(test.in))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Drill to
+ n := doc.FirstChild.FirstChild.NextSibling
+ var results []string
+ for c := range n.Descendants() {
+ results = append(results, c.Data)
+ }
+ if got := strings.Join(results, " "); got != test.want {
+ t.Errorf("Descendants = %q; want: %q", got, test.want)
+ }
+ }
+}
+
+func TestNode_Ancestors(t *testing.T) {
+ for _, size := range []int{0, 1, 2, 10, 100, 10_000} {
+ n := buildChain(size)
+ nParents := 0
+ for _ = range n.Ancestors() {
+ nParents++
+ }
+ if nParents != size {
+ t.Errorf("number of Ancestors = %d; want: %d", nParents, size)
+ }
+ }
+}
+
+func buildChain(size int) *Node {
+ child := new(Node)
+ for range size {
+ parent := child
+ child = new(Node)
+ parent.AppendChild(child)
+ }
+ return child
+}
diff --git a/html/node.go b/html/node.go
index 1350eef22c..77741a1950 100644
--- a/html/node.go
+++ b/html/node.go
@@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode}
// that it looks like "a ",
"",
+ " ",
}
for _, src := range srcs {
// The next line shouldn't infinite-loop.
@@ -477,7 +477,7 @@ func TestParseFragmentForeignContentTemplates(t *testing.T) {
}
func BenchmarkParser(b *testing.B) {
- buf, err := ioutil.ReadFile("testdata/go1.html")
+ buf, err := os.ReadFile("testdata/go1.html")
if err != nil {
b.Fatalf("could not read testdata/go1.html: %v", err)
}
diff --git a/html/token_test.go b/html/token_test.go
index 8b0d5aab63..a36d112d74 100644
--- a/html/token_test.go
+++ b/html/token_test.go
@@ -7,7 +7,7 @@ package html
import (
"bytes"
"io"
- "io/ioutil"
+ "os"
"reflect"
"runtime"
"strings"
@@ -680,7 +680,7 @@ tests:
}
}
// Anything tokenized along with untokenized input or data left in the reader.
- assembled, err := ioutil.ReadAll(io.MultiReader(&tokenized, bytes.NewReader(z.Buffered()), r))
+ assembled, err := io.ReadAll(io.MultiReader(&tokenized, bytes.NewReader(z.Buffered()), r))
if err != nil {
t.Errorf("%s: ReadAll: %v", test.desc, err)
continue tests
@@ -866,7 +866,7 @@ const (
)
func benchmarkTokenizer(b *testing.B, level int) {
- buf, err := ioutil.ReadFile("testdata/go1.html")
+ buf, err := os.ReadFile("testdata/go1.html")
if err != nil {
b.Fatalf("could not read testdata/go1.html: %v", err)
}
diff --git a/http/httpguts/httplex.go b/http/httpguts/httplex.go
index 6e071e8524..9b4de94019 100644
--- a/http/httpguts/httplex.go
+++ b/http/httpguts/httplex.go
@@ -12,7 +12,7 @@ import (
"golang.org/x/net/idna"
)
-var isTokenTable = [127]bool{
+var isTokenTable = [256]bool{
'!': true,
'#': true,
'$': true,
@@ -93,12 +93,7 @@ var isTokenTable = [127]bool{
}
func IsTokenRune(r rune) bool {
- i := int(r)
- return i < len(isTokenTable) && isTokenTable[i]
-}
-
-func isNotToken(r rune) bool {
- return !IsTokenRune(r)
+ return r < utf8.RuneSelf && isTokenTable[byte(r)]
}
// HeaderValuesContainsToken reports whether any string in values
@@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool {
if len(v) == 0 {
return false
}
- for _, r := range v {
- if !IsTokenRune(r) {
+ for i := 0; i < len(v); i++ {
+ if !isTokenTable[v[i]] {
return false
}
}
diff --git a/http/httpguts/httplex_test.go b/http/httpguts/httplex_test.go
index a2c57f3927..791440b1a7 100644
--- a/http/httpguts/httplex_test.go
+++ b/http/httpguts/httplex_test.go
@@ -20,7 +20,7 @@ func isSeparator(c rune) bool {
return false
}
-func TestIsToken(t *testing.T) {
+func TestIsTokenRune(t *testing.T) {
for i := 0; i <= 130; i++ {
r := rune(i)
expected := isChar(r) && !isCtl(r) && !isSeparator(r)
@@ -30,6 +30,15 @@ func TestIsToken(t *testing.T) {
}
}
+func BenchmarkIsTokenRune(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var r rune
+ for ; r < 1024; r++ {
+ IsTokenRune(r)
+ }
+ }
+}
+
func TestHeaderValuesContainsToken(t *testing.T) {
tests := []struct {
vals []string
@@ -100,6 +109,44 @@ func TestHeaderValuesContainsToken(t *testing.T) {
}
}
+func TestValidHeaderFieldName(t *testing.T) {
+ tests := []struct {
+ in string
+ want bool
+ }{
+ {"", false},
+ {"Accept Charset", false},
+ {"Accept-Charset", true},
+ {"AccepT-EncodinG", true},
+ {"CONNECTION", true},
+ {"résumé", false},
+ }
+ for _, tt := range tests {
+ got := ValidHeaderFieldName(tt.in)
+ if tt.want != got {
+ t.Errorf("ValidHeaderFieldName(%q) = %t; want %t", tt.in, got, tt.want)
+ }
+ }
+}
+
+func BenchmarkValidHeaderFieldName(b *testing.B) {
+ names := []string{
+ "",
+ "Accept Charset",
+ "Accept-Charset",
+ "AccepT-EncodinG",
+ "CONNECTION",
+ "résumé",
+ }
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, name := range names {
+ ValidHeaderFieldName(name)
+ }
+ }
+}
+
func TestPunycodeHostPort(t *testing.T) {
tests := []struct {
in, want string
diff --git a/http2/client_conn_pool.go b/http2/client_conn_pool.go
index 780968d6c1..e81b73e6a7 100644
--- a/http2/client_conn_pool.go
+++ b/http2/client_conn_pool.go
@@ -8,8 +8,8 @@ package http2
import (
"context"
- "crypto/tls"
"errors"
+ "net"
"net/http"
"sync"
)
@@ -158,7 +158,7 @@ func (c *dialCall) dial(ctx context.Context, addr string) {
// This code decides which ones live or die.
// The return value used is whether c was used.
// c is never closed.
-func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) {
p.mu.Lock()
for _, cc := range p.conns[key] {
if cc.CanTakeNewRequest() {
@@ -194,8 +194,8 @@ type addConnCall struct {
err error
}
-func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
- cc, err := t.NewClientConn(tc)
+func (c *addConnCall) run(t *Transport, key string, nc net.Conn) {
+ cc, err := t.NewClientConn(nc)
p := c.p
p.mu.Lock()
diff --git a/http2/clientconn_test.go b/http2/clientconn_test.go
index 4237b14364..42d9fd2dcc 100644
--- a/http2/clientconn_test.go
+++ b/http2/clientconn_test.go
@@ -9,12 +9,13 @@ package http2
import (
"bytes"
+ "context"
+ "crypto/tls"
"fmt"
"io"
- "net"
"net/http"
"reflect"
- "slices"
+ "sync/atomic"
"testing"
"time"
@@ -59,6 +60,7 @@ func TestTestClientConn(t *testing.T) {
streamID: rt.streamID(),
endStream: true,
size: 10,
+ multiple: true,
})
// tc.writeHeaders sends a HEADERS frame back to the client.
@@ -93,17 +95,15 @@ type testClientConn struct {
tr *Transport
fr *Framer
cc *ClientConn
- hooks *testSyncHooks
+ group *synctestGroup
+ testConnFramer
encbuf bytes.Buffer
enc *hpack.Encoder
roundtrips []*testRoundTrip
- rerr error // returned by Read
- netConnClosed bool // set when the ClientConn closes the net.Conn
- rbuf bytes.Buffer // sent to the test conn
- wbuf bytes.Buffer // sent by the test conn
+ netconn *synctestNetConn
}
func newTestClientConnFromClientConn(t *testing.T, cc *ClientConn) *testClientConn {
@@ -111,20 +111,42 @@ func newTestClientConnFromClientConn(t *testing.T, cc *ClientConn) *testClientCo
t: t,
tr: cc.t,
cc: cc,
- hooks: cc.t.syncHooks,
+ group: cc.t.transportTestHooks.group.(*synctestGroup),
+ }
+
+ // srv is the side controlled by the test.
+ var srv *synctestNetConn
+ if cc.tconn == nil {
+ // If cc.tconn is nil, we're being called with a new conn created by the
+ // Transport's client pool. This path skips dialing the server, and we
+ // create a test connection pair here.
+ cc.tconn, srv = synctestNetPipe(tc.group)
+ } else {
+ // If cc.tconn is non-nil, we're in a test which provides a conn to the
+ // Transport via a TLSNextProto hook. Extract the test connection pair.
+ if tc, ok := cc.tconn.(*tls.Conn); ok {
+ // Unwrap any *tls.Conn to the underlying net.Conn,
+ // to avoid dealing with encryption in tests.
+ cc.tconn = tc.NetConn()
+ }
+ srv = cc.tconn.(*synctestNetConn).peer
}
- cc.tconn = (*testClientConnNetConn)(tc)
+
+ srv.SetReadDeadline(tc.group.Now())
+ srv.autoWait = true
+ tc.netconn = srv
tc.enc = hpack.NewEncoder(&tc.encbuf)
- tc.fr = NewFramer(&tc.rbuf, &tc.wbuf)
- tc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ tc.fr = NewFramer(srv, srv)
+ tc.testConnFramer = testConnFramer{
+ t: t,
+ fr: tc.fr,
+ dec: hpack.NewDecoder(initialHeaderTableSize, nil),
+ }
tc.fr.SetMaxReadFrameSize(10 << 20)
t.Cleanup(func() {
- tc.sync()
- if tc.rerr == nil {
- tc.rerr = io.EOF
- }
- tc.sync()
+ tc.closeWrite()
})
+
return tc
}
@@ -132,7 +154,7 @@ func (tc *testClientConn) readClientPreface() {
tc.t.Helper()
// Read the client's HTTP/2 preface, sent prior to any HTTP/2 frames.
buf := make([]byte, len(clientPreface))
- if _, err := io.ReadFull(&tc.wbuf, buf); err != nil {
+ if _, err := io.ReadFull(tc.netconn, buf); err != nil {
tc.t.Fatalf("reading preface: %v", err)
}
if !bytes.Equal(buf, clientPreface) {
@@ -140,12 +162,12 @@ func (tc *testClientConn) readClientPreface() {
}
}
-func newTestClientConn(t *testing.T, opts ...func(*Transport)) *testClientConn {
+func newTestClientConn(t *testing.T, opts ...any) *testClientConn {
t.Helper()
tt := newTestTransport(t, opts...)
const singleUse = false
- _, err := tt.tr.newClientConn(nil, singleUse, tt.tr.syncHooks)
+ _, err := tt.tr.newClientConn(nil, singleUse)
if err != nil {
t.Fatalf("newClientConn: %v", err)
}
@@ -156,182 +178,35 @@ func newTestClientConn(t *testing.T, opts ...func(*Transport)) *testClientConn {
// sync waits for the ClientConn under test to reach a stable state,
// with all goroutines blocked on some input.
func (tc *testClientConn) sync() {
- tc.hooks.waitInactive()
+ tc.group.Wait()
}
// advance advances synthetic time by a duration.
func (tc *testClientConn) advance(d time.Duration) {
- tc.hooks.advance(d)
+ tc.group.AdvanceTime(d)
tc.sync()
}
// hasFrame reports whether a frame is available to be read.
func (tc *testClientConn) hasFrame() bool {
- return tc.wbuf.Len() > 0
-}
-
-// readFrame reads the next frame from the conn.
-func (tc *testClientConn) readFrame() Frame {
- if tc.wbuf.Len() == 0 {
- return nil
- }
- fr, err := tc.fr.ReadFrame()
- if err != nil {
- return nil
- }
- return fr
-}
-
-// testClientConnReadFrame reads a frame of a specific type from the conn.
-func testClientConnReadFrame[T any](tc *testClientConn) T {
- tc.t.Helper()
- var v T
- fr := tc.readFrame()
- if fr == nil {
- tc.t.Fatalf("got no frame, want frame %T", v)
- }
- v, ok := fr.(T)
- if !ok {
- tc.t.Fatalf("got frame %T, want %T", fr, v)
- }
- return v
-}
-
-// wantFrameType reads the next frame from the conn.
-// It produces an error if the frame type is not the expected value.
-func (tc *testClientConn) wantFrameType(want FrameType) {
- tc.t.Helper()
- fr := tc.readFrame()
- if fr == nil {
- tc.t.Fatalf("got no frame, want frame %v", want)
- }
- if got := fr.Header().Type; got != want {
- tc.t.Fatalf("got frame %v, want %v", got, want)
- }
-}
-
-// wantUnorderedFrames reads frames from the conn until every condition in want has been satisfied.
-//
-// want is a list of func(*SomeFrame) bool.
-// wantUnorderedFrames will call each func with frames of the appropriate type
-// until the func returns true.
-// It calls t.Fatal if an unexpected frame is received (no func has that frame type,
-// or all funcs with that type have returned true), or if the conn runs out of frames
-// with unsatisfied funcs.
-//
-// Example:
-//
-// // Read a SETTINGS frame, and any number of DATA frames for a stream.
-// // The SETTINGS frame may appear anywhere in the sequence.
-// // The last DATA frame must indicate the end of the stream.
-// tc.wantUnorderedFrames(
-// func(f *SettingsFrame) bool {
-// return true
-// },
-// func(f *DataFrame) bool {
-// return f.StreamEnded()
-// },
-// )
-func (tc *testClientConn) wantUnorderedFrames(want ...any) {
- tc.t.Helper()
- want = slices.Clone(want)
- seen := 0
-frame:
- for seen < len(want) && !tc.t.Failed() {
- fr := tc.readFrame()
- if fr == nil {
- break
- }
- for i, f := range want {
- if f == nil {
- continue
- }
- typ := reflect.TypeOf(f)
- if typ.Kind() != reflect.Func ||
- typ.NumIn() != 1 ||
- typ.NumOut() != 1 ||
- typ.Out(0) != reflect.TypeOf(true) {
- tc.t.Fatalf("expected func(*SomeFrame) bool, got %T", f)
- }
- if typ.In(0) == reflect.TypeOf(fr) {
- out := reflect.ValueOf(f).Call([]reflect.Value{reflect.ValueOf(fr)})
- if out[0].Bool() {
- want[i] = nil
- seen++
- }
- continue frame
- }
- }
- tc.t.Errorf("got unexpected frame type %T", fr)
- }
- if seen < len(want) {
- for _, f := range want {
- if f == nil {
- continue
- }
- tc.t.Errorf("did not see expected frame: %v", reflect.TypeOf(f).In(0))
- }
- tc.t.Fatalf("did not see %v expected frame types", len(want)-seen)
- }
-}
-
-type wantHeader struct {
- streamID uint32
- endStream bool
- header http.Header
-}
-
-// wantHeaders reads a HEADERS frame and potential CONTINUATION frames,
-// and asserts that they contain the expected headers.
-func (tc *testClientConn) wantHeaders(want wantHeader) {
- tc.t.Helper()
- got := testClientConnReadFrame[*MetaHeadersFrame](tc)
- if got, want := got.StreamID, want.streamID; got != want {
- tc.t.Fatalf("got stream ID %v, want %v", got, want)
- }
- if got, want := got.StreamEnded(), want.endStream; got != want {
- tc.t.Fatalf("got stream ended %v, want %v", got, want)
- }
- gotHeader := make(http.Header)
- for _, f := range got.Fields {
- gotHeader[f.Name] = append(gotHeader[f.Name], f.Value)
- }
- for k, v := range want.header {
- if !reflect.DeepEqual(v, gotHeader[k]) {
- tc.t.Fatalf("got header %q = %q; want %q", k, v, gotHeader[k])
- }
- }
+ return len(tc.netconn.Peek()) > 0
}
-type wantData struct {
- streamID uint32
- endStream bool
- size int
+// isClosed reports whether the peer has closed the connection.
+func (tc *testClientConn) isClosed() bool {
+ return tc.netconn.IsClosedByPeer()
}
-// wantData reads zero or more DATA frames, and asserts that they match the expectation.
-func (tc *testClientConn) wantData(want wantData) {
- tc.t.Helper()
- gotSize := 0
- gotEndStream := false
- for tc.hasFrame() && !gotEndStream {
- data := testClientConnReadFrame[*DataFrame](tc)
- gotSize += len(data.Data())
- if data.StreamEnded() {
- gotEndStream = true
- }
- }
- if gotSize != want.size {
- tc.t.Fatalf("got %v bytes of DATA frames, want %v", gotSize, want.size)
- }
- if gotEndStream != want.endStream {
- tc.t.Fatalf("after %v bytes of DATA frames, got END_STREAM=%v; want %v", gotSize, gotEndStream, want.endStream)
- }
+// closeWrite causes the net.Conn used by the ClientConn to return a error
+// from Read calls.
+func (tc *testClientConn) closeWrite() {
+ tc.netconn.Close()
}
// testRequestBody is a Request.Body for use in tests.
type testRequestBody struct {
- tc *testClientConn
+ tc *testClientConn
+ gate gate
// At most one of buf or bytes can be set at any given time:
buf bytes.Buffer // specific bytes to read from the body
@@ -342,16 +217,22 @@ type testRequestBody struct {
func (tc *testClientConn) newRequestBody() *testRequestBody {
b := &testRequestBody{
- tc: tc,
+ tc: tc,
+ gate: newGate(),
}
return b
}
+func (b *testRequestBody) unlock() {
+ b.gate.unlock(b.buf.Len() > 0 || b.bytes > 0 || b.err != nil)
+}
+
// Read is called by the ClientConn to read from a request body.
func (b *testRequestBody) Read(p []byte) (n int, _ error) {
- b.tc.cc.syncHooks.blockUntil(func() bool {
- return b.buf.Len() > 0 || b.bytes > 0 || b.err != nil
- })
+ if err := b.gate.waitAndLock(context.Background()); err != nil {
+ return 0, err
+ }
+ defer b.unlock()
switch {
case b.buf.Len() > 0:
return b.buf.Read(p)
@@ -376,6 +257,9 @@ func (b *testRequestBody) Close() error {
// writeBytes adds n arbitrary bytes to the body.
func (b *testRequestBody) writeBytes(n int) {
+ defer b.tc.sync()
+ b.gate.lock()
+ defer b.unlock()
b.bytes += n
b.checkWrite()
b.tc.sync()
@@ -383,9 +267,11 @@ func (b *testRequestBody) writeBytes(n int) {
// Write adds bytes to the body.
func (b *testRequestBody) Write(p []byte) (int, error) {
+ defer b.tc.sync()
+ b.gate.lock()
+ defer b.unlock()
n, err := b.buf.Write(p)
b.checkWrite()
- b.tc.sync()
return n, err
}
@@ -400,8 +286,10 @@ func (b *testRequestBody) checkWrite() {
// closeWithError sets an error which will be returned by Read.
func (b *testRequestBody) closeWithError(err error) {
+ defer b.tc.sync()
+ b.gate.lock()
+ defer b.unlock()
b.err = err
- b.tc.sync()
}
// roundTrip starts a RoundTrip call.
@@ -414,13 +302,14 @@ func (tc *testClientConn) roundTrip(req *http.Request) *testRoundTrip {
donec: make(chan struct{}),
}
tc.roundtrips = append(tc.roundtrips, rt)
- tc.hooks.newstream = func(cs *clientStream) { rt.cs = cs }
- tc.cc.goRun(func() {
+ go func() {
+ tc.group.Join()
defer close(rt.donec)
- rt.resp, rt.respErr = tc.cc.RoundTrip(req)
- })
+ rt.resp, rt.respErr = tc.cc.roundTrip(req, func(cs *clientStream) {
+ rt.id.Store(cs.ID)
+ })
+ }()
tc.sync()
- tc.hooks.newstream = nil
tc.t.Cleanup(func() {
if !rt.done() {
@@ -443,38 +332,6 @@ func (tc *testClientConn) greet(settings ...Setting) {
tc.wantFrameType(FrameSettings) // acknowledgement
}
-func (tc *testClientConn) writeSettings(settings ...Setting) {
- tc.t.Helper()
- if err := tc.fr.WriteSettings(settings...); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
-func (tc *testClientConn) writeSettingsAck() {
- tc.t.Helper()
- if err := tc.fr.WriteSettingsAck(); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
-func (tc *testClientConn) writeData(streamID uint32, endStream bool, data []byte) {
- tc.t.Helper()
- if err := tc.fr.WriteData(streamID, endStream, data); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
-func (tc *testClientConn) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) {
- tc.t.Helper()
- if err := tc.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
// makeHeaderBlockFragment encodes headers in a form suitable for inclusion
// in a HEADERS or CONTINUATION frame.
//
@@ -490,87 +347,6 @@ func (tc *testClientConn) makeHeaderBlockFragment(s ...string) []byte {
return tc.encbuf.Bytes()
}
-func (tc *testClientConn) writeHeaders(p HeadersFrameParam) {
- tc.t.Helper()
- if err := tc.fr.WriteHeaders(p); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
-// writeHeadersMode writes header frames, as modified by mode:
-//
-// - noHeader: Don't write the header.
-// - oneHeader: Write a single HEADERS frame.
-// - splitHeader: Write a HEADERS frame and CONTINUATION frame.
-func (tc *testClientConn) writeHeadersMode(mode headerType, p HeadersFrameParam) {
- tc.t.Helper()
- switch mode {
- case noHeader:
- case oneHeader:
- tc.writeHeaders(p)
- case splitHeader:
- if len(p.BlockFragment) < 2 {
- panic("too small")
- }
- contData := p.BlockFragment[1:]
- contEnd := p.EndHeaders
- p.BlockFragment = p.BlockFragment[:1]
- p.EndHeaders = false
- tc.writeHeaders(p)
- tc.writeContinuation(p.StreamID, contEnd, contData)
- default:
- panic("bogus mode")
- }
-}
-
-func (tc *testClientConn) writeContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) {
- tc.t.Helper()
- if err := tc.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
-func (tc *testClientConn) writeRSTStream(streamID uint32, code ErrCode) {
- tc.t.Helper()
- if err := tc.fr.WriteRSTStream(streamID, code); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
-func (tc *testClientConn) writePing(ack bool, data [8]byte) {
- tc.t.Helper()
- if err := tc.fr.WritePing(ack, data); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
-func (tc *testClientConn) writeGoAway(maxStreamID uint32, code ErrCode, debugData []byte) {
- tc.t.Helper()
- if err := tc.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
-func (tc *testClientConn) writeWindowUpdate(streamID, incr uint32) {
- tc.t.Helper()
- if err := tc.fr.WriteWindowUpdate(streamID, incr); err != nil {
- tc.t.Fatal(err)
- }
- tc.sync()
-}
-
-// closeWrite causes the net.Conn used by the ClientConn to return a error
-// from Read calls.
-func (tc *testClientConn) closeWrite(err error) {
- tc.rerr = err
- tc.sync()
-}
-
// inflowWindow returns the amount of inbound flow control available for a stream,
// or for the connection if streamID is 0.
func (tc *testClientConn) inflowWindow(streamID uint32) int32 {
@@ -593,15 +369,16 @@ type testRoundTrip struct {
resp *http.Response
respErr error
donec chan struct{}
- cs *clientStream
+ id atomic.Uint32
}
// streamID returns the HTTP/2 stream ID of the request.
func (rt *testRoundTrip) streamID() uint32 {
- if rt.cs == nil {
+ id := rt.id.Load()
+ if id == 0 {
panic("stream ID unknown")
}
- return rt.cs.ID
+ return id
}
// done reports whether RoundTrip has returned.
@@ -712,58 +489,46 @@ func diffHeaders(got, want http.Header) string {
return fmt.Sprintf("got: %v\nwant: %v", got, want)
}
-// testClientConnNetConn implements net.Conn.
-type testClientConnNetConn testClientConn
-
-func (nc *testClientConnNetConn) Read(b []byte) (n int, err error) {
- nc.cc.syncHooks.blockUntil(func() bool {
- return nc.rerr != nil || nc.rbuf.Len() > 0
- })
- if nc.rbuf.Len() > 0 {
- return nc.rbuf.Read(b)
- }
- return 0, nc.rerr
-}
-
-func (nc *testClientConnNetConn) Write(b []byte) (n int, err error) {
- return nc.wbuf.Write(b)
-}
-
-func (nc *testClientConnNetConn) Close() error {
- nc.netConnClosed = true
- return nil
-}
-
-func (*testClientConnNetConn) LocalAddr() (_ net.Addr) { return }
-func (*testClientConnNetConn) RemoteAddr() (_ net.Addr) { return }
-func (*testClientConnNetConn) SetDeadline(t time.Time) error { return nil }
-func (*testClientConnNetConn) SetReadDeadline(t time.Time) error { return nil }
-func (*testClientConnNetConn) SetWriteDeadline(t time.Time) error { return nil }
-
// A testTransport allows testing Transport.RoundTrip against fake servers.
// Tests that aren't specifically exercising RoundTrip's retry loop or connection pooling
// should use testClientConn instead.
type testTransport struct {
- t *testing.T
- tr *Transport
+ t *testing.T
+ tr *Transport
+ group *synctestGroup
ccs []*testClientConn
}
-func newTestTransport(t *testing.T, opts ...func(*Transport)) *testTransport {
- tr := &Transport{
- syncHooks: newTestSyncHooks(),
+func newTestTransport(t *testing.T, opts ...any) *testTransport {
+ tt := &testTransport{
+ t: t,
+ group: newSynctest(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)),
}
+ tt.group.Join()
+
+ tr := &Transport{}
for _, o := range opts {
- o(tr)
+ switch o := o.(type) {
+ case func(*http.Transport):
+ if tr.t1 == nil {
+ tr.t1 = &http.Transport{}
+ }
+ o(tr.t1)
+ case func(*Transport):
+ o(tr)
+ case *Transport:
+ tr = o
+ }
}
+ tt.tr = tr
- tt := &testTransport{
- t: t,
- tr: tr,
- }
- tr.syncHooks.newclientconn = func(cc *ClientConn) {
- tt.ccs = append(tt.ccs, newTestClientConnFromClientConn(t, cc))
+ tr.transportTestHooks = &transportTestHooks{
+ group: tt.group,
+ newclientconn: func(cc *ClientConn) {
+ tc := newTestClientConnFromClientConn(t, cc)
+ tt.ccs = append(tt.ccs, tc)
+ },
}
t.Cleanup(func() {
@@ -771,20 +536,18 @@ func newTestTransport(t *testing.T, opts ...func(*Transport)) *testTransport {
if len(tt.ccs) > 0 {
t.Fatalf("%v test ClientConns created, but not examined by test", len(tt.ccs))
}
- if tt.tr.syncHooks.total != 0 {
- t.Errorf("%v goroutines still running after test completed", tt.tr.syncHooks.total)
- }
+ tt.group.Close(t)
})
return tt
}
func (tt *testTransport) sync() {
- tt.tr.syncHooks.waitInactive()
+ tt.group.Wait()
}
func (tt *testTransport) advance(d time.Duration) {
- tt.tr.syncHooks.advance(d)
+ tt.group.AdvanceTime(d)
tt.sync()
}
@@ -801,6 +564,7 @@ func (tt *testTransport) getConn() *testClientConn {
tt.ccs = tt.ccs[1:]
tc.sync()
tc.readClientPreface()
+ tc.sync()
return tc
}
@@ -809,10 +573,11 @@ func (tt *testTransport) roundTrip(req *http.Request) *testRoundTrip {
t: tt.t,
donec: make(chan struct{}),
}
- tt.tr.syncHooks.goRun(func() {
+ go func() {
+ tt.group.Join()
defer close(rt.donec)
rt.resp, rt.respErr = tt.tr.RoundTrip(req)
- })
+ }()
tt.sync()
tt.t.Cleanup(func() {
diff --git a/http2/config.go b/http2/config.go
new file mode 100644
index 0000000000..de58dfb8dc
--- /dev/null
+++ b/http2/config.go
@@ -0,0 +1,122 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "math"
+ "net/http"
+ "time"
+)
+
+// http2Config is a package-internal version of net/http.HTTP2Config.
+//
+// http.HTTP2Config was added in Go 1.24.
+// When running with a version of net/http that includes HTTP2Config,
+// we merge the configuration with the fields in Transport or Server
+// to produce an http2Config.
+//
+// Zero valued fields in http2Config are interpreted as in the
+// net/http.HTTPConfig documentation.
+//
+// Precedence order for reconciling configurations is:
+//
+// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
+// - Otherwise use the http2.{Server.Transport} value.
+// - If the resulting value is zero or out of range, use a default.
+type http2Config struct {
+ MaxConcurrentStreams uint32
+ MaxDecoderHeaderTableSize uint32
+ MaxEncoderHeaderTableSize uint32
+ MaxReadFrameSize uint32
+ MaxUploadBufferPerConnection int32
+ MaxUploadBufferPerStream int32
+ SendPingTimeout time.Duration
+ PingTimeout time.Duration
+ WriteByteTimeout time.Duration
+ PermitProhibitedCipherSuites bool
+ CountError func(errType string)
+}
+
+// configFromServer merges configuration settings from
+// net/http.Server.HTTP2Config and http2.Server.
+func configFromServer(h1 *http.Server, h2 *Server) http2Config {
+ conf := http2Config{
+ MaxConcurrentStreams: h2.MaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
+ MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
+ CountError: h2.CountError,
+ }
+ fillNetHTTPServerConfig(&conf, h1)
+ setConfigDefaults(&conf, true)
+ return conf
+}
+
+// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
+// (the net/http Transport).
+func configFromTransport(h2 *Transport) http2Config {
+ conf := http2Config{
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ }
+
+ // Unlike most config fields, where out-of-range values revert to the default,
+ // Transport.MaxReadFrameSize clips.
+ if conf.MaxReadFrameSize < minMaxFrameSize {
+ conf.MaxReadFrameSize = minMaxFrameSize
+ } else if conf.MaxReadFrameSize > maxFrameSize {
+ conf.MaxReadFrameSize = maxFrameSize
+ }
+
+ if h2.t1 != nil {
+ fillNetHTTPTransportConfig(&conf, h2.t1)
+ }
+ setConfigDefaults(&conf, false)
+ return conf
+}
+
+func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
+ if *v < minval || *v > maxval {
+ *v = defval
+ }
+}
+
+func setConfigDefaults(conf *http2Config, server bool) {
+ setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
+ setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ if server {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
+ }
+ if server {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
+ }
+ setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
+ setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
+}
+
+// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
+// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
+func adjustHTTP1MaxHeaderSize(n int64) int64 {
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return n + typicalHeaders*perFieldOverhead
+}
diff --git a/http2/config_go124.go b/http2/config_go124.go
new file mode 100644
index 0000000000..e3784123c8
--- /dev/null
+++ b/http2/config_go124.go
@@ -0,0 +1,61 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.24
+
+package http2
+
+import "net/http"
+
+// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
+ fillNetHTTPConfig(conf, srv.HTTP2)
+}
+
+// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
+ fillNetHTTPConfig(conf, tr.HTTP2)
+}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/http2/config_pre_go124.go b/http2/config_pre_go124.go
new file mode 100644
index 0000000000..060fd6c64c
--- /dev/null
+++ b/http2/config_pre_go124.go
@@ -0,0 +1,16 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.24
+
+package http2
+
+import "net/http"
+
+// Pre-Go 1.24 fallback.
+// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
+
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
+
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/http2/config_test.go b/http2/config_test.go
new file mode 100644
index 0000000000..b8e7a7b043
--- /dev/null
+++ b/http2/config_test.go
@@ -0,0 +1,95 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.24
+
+package http2
+
+import (
+ "net/http"
+ "testing"
+ "time"
+)
+
+func TestConfigServerSettings(t *testing.T) {
+ config := &http.HTTP2Config{
+ MaxConcurrentStreams: 1,
+ MaxDecoderHeaderTableSize: 1<<20 + 2,
+ MaxEncoderHeaderTableSize: 1<<20 + 3,
+ MaxReadFrameSize: 1<<20 + 4,
+ MaxReceiveBufferPerConnection: 64<<10 + 5,
+ MaxReceiveBufferPerStream: 64<<10 + 6,
+ }
+ const maxHeaderBytes = 4096 + 7
+ st := newServerTester(t, nil, func(s *http.Server) {
+ s.MaxHeaderBytes = maxHeaderBytes
+ s.HTTP2 = config
+ })
+ st.writePreface()
+ st.writeSettings()
+ st.wantSettings(map[SettingID]uint32{
+ SettingMaxConcurrentStreams: uint32(config.MaxConcurrentStreams),
+ SettingHeaderTableSize: uint32(config.MaxDecoderHeaderTableSize),
+ SettingInitialWindowSize: uint32(config.MaxReceiveBufferPerStream),
+ SettingMaxFrameSize: uint32(config.MaxReadFrameSize),
+ SettingMaxHeaderListSize: maxHeaderBytes + (32 * 10),
+ })
+}
+
+func TestConfigTransportSettings(t *testing.T) {
+ config := &http.HTTP2Config{
+ MaxConcurrentStreams: 1, // ignored by Transport
+ MaxDecoderHeaderTableSize: 1<<20 + 2,
+ MaxEncoderHeaderTableSize: 1<<20 + 3,
+ MaxReadFrameSize: 1<<20 + 4,
+ MaxReceiveBufferPerConnection: 64<<10 + 5,
+ MaxReceiveBufferPerStream: 64<<10 + 6,
+ }
+ const maxHeaderBytes = 4096 + 7
+ tc := newTestClientConn(t, func(tr *http.Transport) {
+ tr.HTTP2 = config
+ tr.MaxResponseHeaderBytes = maxHeaderBytes
+ })
+ tc.wantSettings(map[SettingID]uint32{
+ SettingHeaderTableSize: uint32(config.MaxDecoderHeaderTableSize),
+ SettingInitialWindowSize: uint32(config.MaxReceiveBufferPerStream),
+ SettingMaxFrameSize: uint32(config.MaxReadFrameSize),
+ SettingMaxHeaderListSize: maxHeaderBytes + (32 * 10),
+ })
+ tc.wantWindowUpdate(0, uint32(config.MaxReceiveBufferPerConnection))
+}
+
+func TestConfigPingTimeoutServer(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ }, func(s *Server) {
+ s.ReadIdleTimeout = 2 * time.Second
+ s.PingTimeout = 3 * time.Second
+ })
+ st.greet()
+
+ st.advance(2 * time.Second)
+ _ = readFrame[*PingFrame](t, st)
+ st.advance(3 * time.Second)
+ st.wantClosed()
+}
+
+func TestConfigPingTimeoutTransport(t *testing.T) {
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.ReadIdleTimeout = 2 * time.Second
+ tr.PingTimeout = 3 * time.Second
+ })
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+ tc.wantFrameType(FrameHeaders)
+
+ tc.advance(2 * time.Second)
+ tc.wantFrameType(FramePing)
+ tc.advance(3 * time.Second)
+ err := rt.err()
+ if err == nil {
+ t.Fatalf("expected connection to close")
+ }
+}
diff --git a/http2/connframes_test.go b/http2/connframes_test.go
new file mode 100644
index 0000000000..2c4532571a
--- /dev/null
+++ b/http2/connframes_test.go
@@ -0,0 +1,431 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "io"
+ "net/http"
+ "os"
+ "reflect"
+ "slices"
+ "testing"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+type testConnFramer struct {
+ t testing.TB
+ fr *Framer
+ dec *hpack.Decoder
+}
+
+// readFrame reads the next frame.
+// It returns nil if the conn is closed or no frames are available.
+func (tf *testConnFramer) readFrame() Frame {
+ tf.t.Helper()
+ fr, err := tf.fr.ReadFrame()
+ if err == io.EOF || err == os.ErrDeadlineExceeded {
+ return nil
+ }
+ if err != nil {
+ tf.t.Fatalf("ReadFrame: %v", err)
+ }
+ return fr
+}
+
+type readFramer interface {
+ readFrame() Frame
+}
+
+// readFrame reads a frame of a specific type.
+func readFrame[T any](t testing.TB, framer readFramer) T {
+ t.Helper()
+ var v T
+ fr := framer.readFrame()
+ if fr == nil {
+ t.Fatalf("got no frame, want frame %T", v)
+ }
+ v, ok := fr.(T)
+ if !ok {
+ t.Fatalf("got frame %T, want %T", fr, v)
+ }
+ return v
+}
+
+// wantFrameType reads the next frame.
+// It produces an error if the frame type is not the expected value.
+func (tf *testConnFramer) wantFrameType(want FrameType) {
+ tf.t.Helper()
+ fr := tf.readFrame()
+ if fr == nil {
+ tf.t.Fatalf("got no frame, want frame %v", want)
+ }
+ if got := fr.Header().Type; got != want {
+ tf.t.Fatalf("got frame %v, want %v", got, want)
+ }
+}
+
+// wantUnorderedFrames reads frames until every condition in want has been satisfied.
+//
+// want is a list of func(*SomeFrame) bool.
+// wantUnorderedFrames will call each func with frames of the appropriate type
+// until the func returns true.
+// It calls t.Fatal if an unexpected frame is received (no func has that frame type,
+// or all funcs with that type have returned true), or if the framer runs out of frames
+// with unsatisfied funcs.
+//
+// Example:
+//
+// // Read a SETTINGS frame, and any number of DATA frames for a stream.
+// // The SETTINGS frame may appear anywhere in the sequence.
+// // The last DATA frame must indicate the end of the stream.
+// tf.wantUnorderedFrames(
+// func(f *SettingsFrame) bool {
+// return true
+// },
+// func(f *DataFrame) bool {
+// return f.StreamEnded()
+// },
+// )
+func (tf *testConnFramer) wantUnorderedFrames(want ...any) {
+ tf.t.Helper()
+ want = slices.Clone(want)
+ seen := 0
+frame:
+ for seen < len(want) && !tf.t.Failed() {
+ fr := tf.readFrame()
+ if fr == nil {
+ break
+ }
+ for i, f := range want {
+ if f == nil {
+ continue
+ }
+ typ := reflect.TypeOf(f)
+ if typ.Kind() != reflect.Func ||
+ typ.NumIn() != 1 ||
+ typ.NumOut() != 1 ||
+ typ.Out(0) != reflect.TypeOf(true) {
+ tf.t.Fatalf("expected func(*SomeFrame) bool, got %T", f)
+ }
+ if typ.In(0) == reflect.TypeOf(fr) {
+ out := reflect.ValueOf(f).Call([]reflect.Value{reflect.ValueOf(fr)})
+ if out[0].Bool() {
+ want[i] = nil
+ seen++
+ }
+ continue frame
+ }
+ }
+ tf.t.Errorf("got unexpected frame type %T", fr)
+ }
+ if seen < len(want) {
+ for _, f := range want {
+ if f == nil {
+ continue
+ }
+ tf.t.Errorf("did not see expected frame: %v", reflect.TypeOf(f).In(0))
+ }
+ tf.t.Fatalf("did not see %v expected frame types", len(want)-seen)
+ }
+}
+
+type wantHeader struct {
+ streamID uint32
+ endStream bool
+ header http.Header
+}
+
+// wantHeaders reads a HEADERS frame and potential CONTINUATION frames,
+// and asserts that they contain the expected headers.
+func (tf *testConnFramer) wantHeaders(want wantHeader) {
+ tf.t.Helper()
+
+ hf := readFrame[*HeadersFrame](tf.t, tf)
+ if got, want := hf.StreamID, want.streamID; got != want {
+ tf.t.Fatalf("got stream ID %v, want %v", got, want)
+ }
+ if got, want := hf.StreamEnded(), want.endStream; got != want {
+ tf.t.Fatalf("got stream ended %v, want %v", got, want)
+ }
+
+ gotHeader := make(http.Header)
+ tf.dec.SetEmitFunc(func(hf hpack.HeaderField) {
+ gotHeader[hf.Name] = append(gotHeader[hf.Name], hf.Value)
+ })
+ defer tf.dec.SetEmitFunc(nil)
+ if _, err := tf.dec.Write(hf.HeaderBlockFragment()); err != nil {
+ tf.t.Fatalf("decoding HEADERS frame: %v", err)
+ }
+ headersEnded := hf.HeadersEnded()
+ for !headersEnded {
+ cf := readFrame[*ContinuationFrame](tf.t, tf)
+ if cf == nil {
+ tf.t.Fatalf("got end of frames, want CONTINUATION")
+ }
+ if _, err := tf.dec.Write(cf.HeaderBlockFragment()); err != nil {
+ tf.t.Fatalf("decoding CONTINUATION frame: %v", err)
+ }
+ headersEnded = cf.HeadersEnded()
+ }
+ if err := tf.dec.Close(); err != nil {
+ tf.t.Fatalf("hpack decoding error: %v", err)
+ }
+
+ for k, v := range want.header {
+ if !reflect.DeepEqual(v, gotHeader[k]) {
+ tf.t.Fatalf("got header %q = %q; want %q", k, v, gotHeader[k])
+ }
+ }
+}
+
+// decodeHeader supports some older server tests.
+// TODO: rewrite those tests to use newer, more convenient test APIs.
+func (tf *testConnFramer) decodeHeader(headerBlock []byte) (pairs [][2]string) {
+ tf.dec.SetEmitFunc(func(hf hpack.HeaderField) {
+ if hf.Name == "date" {
+ return
+ }
+ pairs = append(pairs, [2]string{hf.Name, hf.Value})
+ })
+ defer tf.dec.SetEmitFunc(nil)
+ if _, err := tf.dec.Write(headerBlock); err != nil {
+ tf.t.Fatalf("hpack decoding error: %v", err)
+ }
+ if err := tf.dec.Close(); err != nil {
+ tf.t.Fatalf("hpack decoding error: %v", err)
+ }
+ return pairs
+}
+
+type wantData struct {
+ streamID uint32
+ endStream bool
+ size int
+ data []byte
+ multiple bool // data may be spread across multiple DATA frames
+}
+
+// wantData reads zero or more DATA frames, and asserts that they match the expectation.
+func (tf *testConnFramer) wantData(want wantData) {
+ tf.t.Helper()
+ gotSize := 0
+ gotEndStream := false
+ if want.data != nil {
+ want.size = len(want.data)
+ }
+ var gotData []byte
+ for {
+ fr := tf.readFrame()
+ if fr == nil {
+ break
+ }
+ data, ok := fr.(*DataFrame)
+ if !ok {
+ tf.t.Fatalf("got frame %T, want DataFrame", fr)
+ }
+ if want.data != nil {
+ gotData = append(gotData, data.Data()...)
+ }
+ gotSize += len(data.Data())
+ if data.StreamEnded() {
+ gotEndStream = true
+ break
+ }
+ if !want.endStream && gotSize >= want.size {
+ break
+ }
+ if !want.multiple {
+ break
+ }
+ }
+ if gotSize != want.size {
+ tf.t.Fatalf("got %v bytes of DATA frames, want %v", gotSize, want.size)
+ }
+ if gotEndStream != want.endStream {
+ tf.t.Fatalf("after %v bytes of DATA frames, got END_STREAM=%v; want %v", gotSize, gotEndStream, want.endStream)
+ }
+ if want.data != nil && !bytes.Equal(gotData, want.data) {
+ tf.t.Fatalf("got data %q, want %q", gotData, want.data)
+ }
+}
+
+func (tf *testConnFramer) wantRSTStream(streamID uint32, code ErrCode) {
+ tf.t.Helper()
+ fr := readFrame[*RSTStreamFrame](tf.t, tf)
+ if fr.StreamID != streamID || fr.ErrCode != code {
+ tf.t.Fatalf("got %v, want RST_STREAM StreamID=%v, code=%v", summarizeFrame(fr), streamID, code)
+ }
+}
+
+func (tf *testConnFramer) wantSettings(want map[SettingID]uint32) {
+ fr := readFrame[*SettingsFrame](tf.t, tf)
+ if fr.Header().Flags.Has(FlagSettingsAck) {
+ tf.t.Errorf("got SETTINGS frame with ACK set, want no ACK")
+ }
+ for wantID, wantVal := range want {
+ gotVal, ok := fr.Value(wantID)
+ if !ok {
+ tf.t.Errorf("SETTINGS: %v is not set, want %v", wantID, wantVal)
+ } else if gotVal != wantVal {
+ tf.t.Errorf("SETTINGS: %v is %v, want %v", wantID, gotVal, wantVal)
+ }
+ }
+ if tf.t.Failed() {
+ tf.t.Fatalf("%v", fr)
+ }
+}
+
+func (tf *testConnFramer) wantSettingsAck() {
+ tf.t.Helper()
+ fr := readFrame[*SettingsFrame](tf.t, tf)
+ if !fr.Header().Flags.Has(FlagSettingsAck) {
+ tf.t.Fatal("Settings Frame didn't have ACK set")
+ }
+}
+
+func (tf *testConnFramer) wantGoAway(maxStreamID uint32, code ErrCode) {
+ tf.t.Helper()
+ fr := readFrame[*GoAwayFrame](tf.t, tf)
+ if fr.LastStreamID != maxStreamID || fr.ErrCode != code {
+ tf.t.Fatalf("got %v, want GOAWAY LastStreamID=%v, code=%v", summarizeFrame(fr), maxStreamID, code)
+ }
+}
+
+func (tf *testConnFramer) wantWindowUpdate(streamID, incr uint32) {
+ tf.t.Helper()
+ wu := readFrame[*WindowUpdateFrame](tf.t, tf)
+ if wu.FrameHeader.StreamID != streamID {
+ tf.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID)
+ }
+ if wu.Increment != incr {
+ tf.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr)
+ }
+}
+
+func (tf *testConnFramer) wantClosed() {
+ tf.t.Helper()
+ fr, err := tf.fr.ReadFrame()
+ if err == nil {
+ tf.t.Fatalf("got unexpected frame (want closed connection): %v", fr)
+ }
+ if err == os.ErrDeadlineExceeded {
+ tf.t.Fatalf("connection is not closed; want it to be")
+ }
+}
+
+func (tf *testConnFramer) wantIdle() {
+ tf.t.Helper()
+ fr, err := tf.fr.ReadFrame()
+ if err == nil {
+ tf.t.Fatalf("got unexpected frame (want idle connection): %v", fr)
+ }
+ if err != os.ErrDeadlineExceeded {
+ tf.t.Fatalf("got unexpected frame error (want idle connection): %v", err)
+ }
+}
+
+func (tf *testConnFramer) writeSettings(settings ...Setting) {
+ tf.t.Helper()
+ if err := tf.fr.WriteSettings(settings...); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+func (tf *testConnFramer) writeSettingsAck() {
+ tf.t.Helper()
+ if err := tf.fr.WriteSettingsAck(); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+func (tf *testConnFramer) writeData(streamID uint32, endStream bool, data []byte) {
+ tf.t.Helper()
+ if err := tf.fr.WriteData(streamID, endStream, data); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+func (tf *testConnFramer) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) {
+ tf.t.Helper()
+ if err := tf.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+func (tf *testConnFramer) writeHeaders(p HeadersFrameParam) {
+ tf.t.Helper()
+ if err := tf.fr.WriteHeaders(p); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+// writeHeadersMode writes header frames, as modified by mode:
+//
+// - noHeader: Don't write the header.
+// - oneHeader: Write a single HEADERS frame.
+// - splitHeader: Write a HEADERS frame and CONTINUATION frame.
+func (tf *testConnFramer) writeHeadersMode(mode headerType, p HeadersFrameParam) {
+ tf.t.Helper()
+ switch mode {
+ case noHeader:
+ case oneHeader:
+ tf.writeHeaders(p)
+ case splitHeader:
+ if len(p.BlockFragment) < 2 {
+ panic("too small")
+ }
+ contData := p.BlockFragment[1:]
+ contEnd := p.EndHeaders
+ p.BlockFragment = p.BlockFragment[:1]
+ p.EndHeaders = false
+ tf.writeHeaders(p)
+ tf.writeContinuation(p.StreamID, contEnd, contData)
+ default:
+ panic("bogus mode")
+ }
+}
+
+func (tf *testConnFramer) writeContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) {
+ tf.t.Helper()
+ if err := tf.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+func (tf *testConnFramer) writePriority(id uint32, p PriorityParam) {
+ if err := tf.fr.WritePriority(id, p); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+func (tf *testConnFramer) writeRSTStream(streamID uint32, code ErrCode) {
+ tf.t.Helper()
+ if err := tf.fr.WriteRSTStream(streamID, code); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+func (tf *testConnFramer) writePing(ack bool, data [8]byte) {
+ tf.t.Helper()
+ if err := tf.fr.WritePing(ack, data); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+func (tf *testConnFramer) writeGoAway(maxStreamID uint32, code ErrCode, debugData []byte) {
+ tf.t.Helper()
+ if err := tf.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
+ tf.t.Fatal(err)
+ }
+}
+
+func (tf *testConnFramer) writeWindowUpdate(streamID, incr uint32) {
+ tf.t.Helper()
+ if err := tf.fr.WriteWindowUpdate(streamID, incr); err != nil {
+ tf.t.Fatal(err)
+ }
+}
diff --git a/http2/frame.go b/http2/frame.go
index 43557ab7e9..81faec7e75 100644
--- a/http2/frame.go
+++ b/http2/frame.go
@@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool {
// returned error is ErrFrameTooLarge. Other errors may be of type
// ConnectionError, StreamError, or anything else from the underlying
// reader.
+//
+// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
+// indicates the stream responsible for the error.
func (fr *Framer) ReadFrame() (Frame, error) {
fr.errDetail = nil
if fr.lastFrame != nil {
@@ -1487,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
pf := mh.PseudoFields()
for i, hf := range pf {
switch hf.Name {
- case ":method", ":path", ":scheme", ":authority":
+ case ":method", ":path", ":scheme", ":authority", ":protocol":
isRequest = true
case ":status":
isResponse = true
@@ -1495,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
return pseudoHeaderError(hf.Name)
}
// Check for duplicates.
- // This would be a bad algorithm, but N is 4.
+ // This would be a bad algorithm, but N is 5.
// And this doesn't allocate.
for _, hf2 := range pf[:i] {
if hf.Name == hf2.Name {
@@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int {
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
// merge them into the provided hf and returns a MetaHeadersFrame
// with the decoded hpack values.
-func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
+func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) {
if fr.AllowIllegalReads {
return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
}
@@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
}
// It would be nice to send a RST_STREAM before sending the GOAWAY,
// but the structure of the server's frame writer makes this difficult.
- return nil, ConnectionError(ErrCodeProtocol)
+ return mh, ConnectionError(ErrCodeProtocol)
}
// Also close the connection after any CONTINUATION frame following an
@@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
}
// It would be nice to send a RST_STREAM before sending the GOAWAY,
// but the structure of the server's frame writer makes this difficult.
- return nil, ConnectionError(ErrCodeProtocol)
+ return mh, ConnectionError(ErrCodeProtocol)
}
if _, err := hdec.Write(frag); err != nil {
- return nil, ConnectionError(ErrCodeCompression)
+ return mh, ConnectionError(ErrCodeCompression)
}
if hc.HeadersEnded() {
@@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
mh.HeadersFrame.invalidate()
if err := hdec.Close(); err != nil {
- return nil, ConnectionError(ErrCodeCompression)
+ return mh, ConnectionError(ErrCodeCompression)
}
if invalid != nil {
fr.errDetail = invalid
diff --git a/http2/gate_test.go b/http2/gate_test.go
new file mode 100644
index 0000000000..e5e6a315be
--- /dev/null
+++ b/http2/gate_test.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package http2
+
+import "context"
+
+// An gate is a monitor (mutex + condition variable) with one bit of state.
+//
+// The condition may be either set or unset.
+// Lock operations may be unconditional, or wait for the condition to be set.
+// Unlock operations record the new state of the condition.
+type gate struct {
+ // When unlocked, exactly one of set or unset contains a value.
+ // When locked, neither chan contains a value.
+ set chan struct{}
+ unset chan struct{}
+}
+
+// newGate returns a new, unlocked gate with the condition unset.
+func newGate() gate {
+ g := newLockedGate()
+ g.unlock(false)
+ return g
+}
+
+// newLocked gate returns a new, locked gate.
+func newLockedGate() gate {
+ return gate{
+ set: make(chan struct{}, 1),
+ unset: make(chan struct{}, 1),
+ }
+}
+
+// lock acquires the gate unconditionally.
+// It reports whether the condition is set.
+func (g *gate) lock() (set bool) {
+ select {
+ case <-g.set:
+ return true
+ case <-g.unset:
+ return false
+ }
+}
+
+// waitAndLock waits until the condition is set before acquiring the gate.
+// If the context expires, waitAndLock returns an error and does not acquire the gate.
+func (g *gate) waitAndLock(ctx context.Context) error {
+ select {
+ case <-g.set:
+ return nil
+ default:
+ }
+ select {
+ case <-g.set:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// lockIfSet acquires the gate if and only if the condition is set.
+func (g *gate) lockIfSet() (acquired bool) {
+ select {
+ case <-g.set:
+ return true
+ default:
+ return false
+ }
+}
+
+// unlock sets the condition and releases the gate.
+func (g *gate) unlock(set bool) {
+ if set {
+ g.set <- struct{}{}
+ } else {
+ g.unset <- struct{}{}
+ }
+}
+
+// unlock sets the condition to the result of f and releases the gate.
+// Useful in defers.
+func (g *gate) unlockFunc(f func() bool) {
+ g.unlock(f())
+}
diff --git a/http2/h2c/h2c_test.go b/http2/h2c/h2c_test.go
index 038cbc3649..3e78f29135 100644
--- a/http2/h2c/h2c_test.go
+++ b/http2/h2c/h2c_test.go
@@ -9,7 +9,6 @@ import (
"crypto/tls"
"fmt"
"io"
- "io/ioutil"
"log"
"net"
"net/http"
@@ -68,7 +67,7 @@ func TestContext(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = ioutil.ReadAll(resp.Body)
+ _, err = io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
@@ -162,7 +161,7 @@ func TestMaxBytesHandler(t *testing.T) {
t.Fatal(err)
}
defer resp.Body.Close()
- _, err = ioutil.ReadAll(resp.Body)
+ _, err = io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
diff --git a/http2/hpack/gen.go b/http2/hpack/gen.go
index 21a4198b33..0efa8e558c 100644
--- a/http2/hpack/gen.go
+++ b/http2/hpack/gen.go
@@ -10,7 +10,6 @@ import (
"bytes"
"fmt"
"go/format"
- "io/ioutil"
"os"
"sort"
@@ -176,7 +175,7 @@ func genFile(name string, buf *bytes.Buffer) {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
- if err := ioutil.WriteFile(name, b, 0644); err != nil {
+ if err := os.WriteFile(name, b, 0644); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
diff --git a/http2/http2.go b/http2/http2.go
index 6f2df28187..c7601c909f 100644
--- a/http2/http2.go
+++ b/http2/http2.go
@@ -17,24 +17,28 @@ package http2 // import "golang.org/x/net/http2"
import (
"bufio"
+ "context"
"crypto/tls"
+ "errors"
"fmt"
- "io"
+ "net"
"net/http"
"os"
"sort"
"strconv"
"strings"
"sync"
+ "time"
"golang.org/x/net/http/httpguts"
)
var (
- VerboseLogs bool
- logFrameWrites bool
- logFrameReads bool
- inTests bool
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+ inTests bool
+ disableExtendedConnectProtocol bool
)
func init() {
@@ -47,6 +51,9 @@ func init() {
logFrameWrites = true
logFrameReads = true
}
+ if strings.Contains(e, "http2xconnect=0") {
+ disableExtendedConnectProtocol = true
+ }
}
const (
@@ -138,6 +145,10 @@ func (s Setting) Valid() error {
if s.Val < 16384 || s.Val > 1<<24-1 {
return ConnectionError(ErrCodeProtocol)
}
+ case SettingEnableConnectProtocol:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
}
return nil
}
@@ -147,21 +158,23 @@ func (s Setting) Valid() error {
type SettingID uint16
const (
- SettingHeaderTableSize SettingID = 0x1
- SettingEnablePush SettingID = 0x2
- SettingMaxConcurrentStreams SettingID = 0x3
- SettingInitialWindowSize SettingID = 0x4
- SettingMaxFrameSize SettingID = 0x5
- SettingMaxHeaderListSize SettingID = 0x6
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+ SettingEnableConnectProtocol SettingID = 0x8
)
var settingName = map[SettingID]string{
- SettingHeaderTableSize: "HEADER_TABLE_SIZE",
- SettingEnablePush: "ENABLE_PUSH",
- SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
- SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
- SettingMaxFrameSize: "MAX_FRAME_SIZE",
- SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+ SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL",
}
func (s SettingID) String() string {
@@ -210,12 +223,6 @@ type stringWriter interface {
WriteString(s string) (n int, err error)
}
-// A gate lets two goroutines coordinate their activities.
-type gate chan struct{}
-
-func (g gate) Done() { g <- struct{}{} }
-func (g gate) Wait() { <-g }
-
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
type closeWaiter chan struct{}
@@ -241,13 +248,19 @@ func (cw closeWaiter) Wait() {
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type bufferedWriter struct {
- _ incomparable
- w io.Writer // immutable
- bw *bufio.Writer // non-nil when data is buffered
+ _ incomparable
+ group synctestGroupInterface // immutable
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
}
-func newBufferedWriter(w io.Writer) *bufferedWriter {
- return &bufferedWriter{w: w}
+func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+ return &bufferedWriter{
+ group: group,
+ conn: conn,
+ byteTimeout: timeout,
+ }
}
// bufWriterPoolBufferSize is the size of bufio.Writer's
@@ -274,7 +287,7 @@ func (w *bufferedWriter) Available() int {
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
- bw.Reset(w.w)
+ bw.Reset((*bufferedWriterTimeoutWriter)(w))
w.bw = bw
}
return w.bw.Write(p)
@@ -292,6 +305,38 @@ func (w *bufferedWriter) Flush() error {
return err
}
+type bufferedWriterTimeoutWriter bufferedWriter
+
+func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
+ return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+}
+
+// writeWithByteTimeout writes to conn.
+// If more than timeout passes without any bytes being written to the connection,
+// the write fails.
+func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+ if timeout <= 0 {
+ return conn.Write(p)
+ }
+ for {
+ var now time.Time
+ if group == nil {
+ now = time.Now()
+ } else {
+ now = group.Now()
+ }
+ conn.SetWriteDeadline(now.Add(timeout))
+ nn, err := conn.Write(p[n:])
+ n += nn
+ if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
+ // Either we finished the write, made no progress, or hit the deadline.
+ // Whichever it is, we're done now.
+ conn.SetWriteDeadline(time.Time{})
+ return n, err
+ }
+ }
+}
+
func mustUint31(v int32) uint32 {
if v < 0 || v > 2147483647 {
panic("out of range")
@@ -383,3 +428,14 @@ func validPseudoPath(v string) bool {
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()
+
+// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
+// It's defined as an interface here to let us keep synctestGroup entirely test-only
+// and not a part of non-test builds.
+type synctestGroupInterface interface {
+ Join()
+ Now() time.Time
+ NewTimer(d time.Duration) timer
+ AfterFunc(d time.Duration, f func()) timer
+ ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
+}
diff --git a/http2/http2_test.go b/http2/http2_test.go
index a16774b7ff..b1e71f1532 100644
--- a/http2/http2_test.go
+++ b/http2/http2_test.go
@@ -8,7 +8,6 @@ import (
"bytes"
"flag"
"fmt"
- "io/ioutil"
"net/http"
"os"
"path/filepath"
@@ -266,7 +265,7 @@ func TestNoUnicodeStrings(t *testing.T) {
return nil
}
- contents, err := ioutil.ReadFile(path)
+ contents, err := os.ReadFile(path)
if err != nil {
t.Fatal(err)
}
@@ -284,3 +283,11 @@ func TestNoUnicodeStrings(t *testing.T) {
t.Fatal(err)
}
}
+
+// must returns v if err is nil, or panics otherwise.
+func must[T any](v T, err error) T {
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
diff --git a/http2/netconn_test.go b/http2/netconn_test.go
new file mode 100644
index 0000000000..0f1b5fb1f3
--- /dev/null
+++ b/http2/netconn_test.go
@@ -0,0 +1,356 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "math"
+ "net"
+ "net/netip"
+ "os"
+ "sync"
+ "time"
+)
+
+// synctestNetPipe creates an in-memory, full duplex network connection.
+// Read and write timeouts are managed by the synctest group.
+//
+// Unlike net.Pipe, the connection is not synchronous.
+// Writes are made to a buffer, and return immediately.
+// By default, the buffer size is unlimited.
+func synctestNetPipe(group *synctestGroup) (r, w *synctestNetConn) {
+ s1addr := net.TCPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:8000"))
+ s2addr := net.TCPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:8001"))
+ s1 := newSynctestNetConnHalf(s1addr)
+ s2 := newSynctestNetConnHalf(s2addr)
+ r = &synctestNetConn{group: group, loc: s1, rem: s2}
+ w = &synctestNetConn{group: group, loc: s2, rem: s1}
+ r.peer = w
+ w.peer = r
+ return r, w
+}
+
+// A synctestNetConn is one endpoint of the connection created by synctestNetPipe.
+type synctestNetConn struct {
+ group *synctestGroup
+
+ // local and remote connection halves.
+ // Each half contains a buffer.
+ // Reads pull from the local buffer, and writes push to the remote buffer.
+ loc, rem *synctestNetConnHalf
+
+ // When set, group.Wait is automatically called before reads and after writes.
+ autoWait bool
+
+ // peer is the other endpoint.
+ peer *synctestNetConn
+}
+
+// Read reads data from the connection.
+func (c *synctestNetConn) Read(b []byte) (n int, err error) {
+ if c.autoWait {
+ c.group.Wait()
+ }
+ return c.loc.read(b)
+}
+
+// Peek returns the available unread read buffer,
+// without consuming its contents.
+func (c *synctestNetConn) Peek() []byte {
+ if c.autoWait {
+ c.group.Wait()
+ }
+ return c.loc.peek()
+}
+
+// Write writes data to the connection.
+func (c *synctestNetConn) Write(b []byte) (n int, err error) {
+ if c.autoWait {
+ defer c.group.Wait()
+ }
+ return c.rem.write(b)
+}
+
+// IsClosed reports whether the peer has closed its end of the connection.
+func (c *synctestNetConn) IsClosedByPeer() bool {
+ if c.autoWait {
+ c.group.Wait()
+ }
+ return c.loc.isClosedByPeer()
+}
+
+// Close closes the connection.
+func (c *synctestNetConn) Close() error {
+ c.loc.setWriteError(errors.New("connection closed by peer"))
+ c.rem.setReadError(io.EOF)
+ if c.autoWait {
+ c.group.Wait()
+ }
+ return nil
+}
+
+// LocalAddr returns the (fake) local network address.
+func (c *synctestNetConn) LocalAddr() net.Addr {
+ return c.loc.addr
+}
+
+// LocalAddr returns the (fake) remote network address.
+func (c *synctestNetConn) RemoteAddr() net.Addr {
+ return c.rem.addr
+}
+
+// SetDeadline sets the read and write deadlines for the connection.
+func (c *synctestNetConn) SetDeadline(t time.Time) error {
+ c.SetReadDeadline(t)
+ c.SetWriteDeadline(t)
+ return nil
+}
+
+// SetReadDeadline sets the read deadline for the connection.
+func (c *synctestNetConn) SetReadDeadline(t time.Time) error {
+ c.loc.rctx.setDeadline(c.group, t)
+ return nil
+}
+
+// SetWriteDeadline sets the write deadline for the connection.
+func (c *synctestNetConn) SetWriteDeadline(t time.Time) error {
+ c.rem.wctx.setDeadline(c.group, t)
+ return nil
+}
+
+// SetReadBufferSize sets the read buffer limit for the connection.
+// Writes by the peer will block so long as the buffer is full.
+func (c *synctestNetConn) SetReadBufferSize(size int) {
+ c.loc.setReadBufferSize(size)
+}
+
+// synctestNetConnHalf is one data flow in the connection created by synctestNetPipe.
+// Each half contains a buffer. Writes to the half push to the buffer, and reads pull from it.
+type synctestNetConnHalf struct {
+ addr net.Addr
+
+ // Read and write timeouts.
+ rctx, wctx deadlineContext
+
+ // A half can be readable and/or writable.
+ //
+ // These four channels act as a lock,
+ // and allow waiting for readability/writability.
+ // When the half is unlocked, exactly one channel contains a value.
+ // When the half is locked, all channels are empty.
+ lockr chan struct{} // readable
+ lockw chan struct{} // writable
+ lockrw chan struct{} // readable and writable
+ lockc chan struct{} // neither readable nor writable
+
+ bufMax int // maximum buffer size
+ buf bytes.Buffer
+ readErr error // error returned by reads
+ writeErr error // error returned by writes
+}
+
+func newSynctestNetConnHalf(addr net.Addr) *synctestNetConnHalf {
+ h := &synctestNetConnHalf{
+ addr: addr,
+ lockw: make(chan struct{}, 1),
+ lockr: make(chan struct{}, 1),
+ lockrw: make(chan struct{}, 1),
+ lockc: make(chan struct{}, 1),
+ bufMax: math.MaxInt, // unlimited
+ }
+ h.unlock()
+ return h
+}
+
+func (h *synctestNetConnHalf) lock() {
+ select {
+ case <-h.lockw:
+ case <-h.lockr:
+ case <-h.lockrw:
+ case <-h.lockc:
+ }
+}
+
+func (h *synctestNetConnHalf) unlock() {
+ canRead := h.readErr != nil || h.buf.Len() > 0
+ canWrite := h.writeErr != nil || h.bufMax > h.buf.Len()
+ switch {
+ case canRead && canWrite:
+ h.lockrw <- struct{}{}
+ case canRead:
+ h.lockr <- struct{}{}
+ case canWrite:
+ h.lockw <- struct{}{}
+ default:
+ h.lockc <- struct{}{}
+ }
+}
+
+func (h *synctestNetConnHalf) readWaitAndLock() error {
+ select {
+ case <-h.lockr:
+ return nil
+ case <-h.lockrw:
+ return nil
+ default:
+ }
+ ctx := h.rctx.context()
+ select {
+ case <-h.lockr:
+ return nil
+ case <-h.lockrw:
+ return nil
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ }
+}
+
+func (h *synctestNetConnHalf) writeWaitAndLock() error {
+ select {
+ case <-h.lockw:
+ return nil
+ case <-h.lockrw:
+ return nil
+ default:
+ }
+ ctx := h.wctx.context()
+ select {
+ case <-h.lockw:
+ return nil
+ case <-h.lockrw:
+ return nil
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ }
+}
+
+func (h *synctestNetConnHalf) peek() []byte {
+ h.lock()
+ defer h.unlock()
+ return h.buf.Bytes()
+}
+
+func (h *synctestNetConnHalf) isClosedByPeer() bool {
+ h.lock()
+ defer h.unlock()
+ return h.readErr != nil
+}
+
+func (h *synctestNetConnHalf) read(b []byte) (n int, err error) {
+ if err := h.readWaitAndLock(); err != nil {
+ return 0, err
+ }
+ defer h.unlock()
+ if h.buf.Len() == 0 && h.readErr != nil {
+ return 0, h.readErr
+ }
+ return h.buf.Read(b)
+}
+
+func (h *synctestNetConnHalf) setReadBufferSize(size int) {
+ h.lock()
+ defer h.unlock()
+ h.bufMax = size
+}
+
+func (h *synctestNetConnHalf) write(b []byte) (n int, err error) {
+ for n < len(b) {
+ nn, err := h.writePartial(b[n:])
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+func (h *synctestNetConnHalf) writePartial(b []byte) (n int, err error) {
+ if err := h.writeWaitAndLock(); err != nil {
+ return 0, err
+ }
+ defer h.unlock()
+ if h.writeErr != nil {
+ return 0, h.writeErr
+ }
+ writeMax := h.bufMax - h.buf.Len()
+ if writeMax < len(b) {
+ b = b[:writeMax]
+ }
+ return h.buf.Write(b)
+}
+
+func (h *synctestNetConnHalf) setReadError(err error) {
+ h.lock()
+ defer h.unlock()
+ if h.readErr == nil {
+ h.readErr = err
+ }
+}
+
+func (h *synctestNetConnHalf) setWriteError(err error) {
+ h.lock()
+ defer h.unlock()
+ if h.writeErr == nil {
+ h.writeErr = err
+ }
+}
+
+// deadlineContext converts a changable deadline (as in net.Conn.SetDeadline) into a Context.
+type deadlineContext struct {
+ mu sync.Mutex
+ ctx context.Context
+ cancel context.CancelCauseFunc
+ timer timer
+}
+
+// context returns a Context which expires when the deadline does.
+func (t *deadlineContext) context() context.Context {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.ctx == nil {
+ t.ctx, t.cancel = context.WithCancelCause(context.Background())
+ }
+ return t.ctx
+}
+
+// setDeadline sets the current deadline.
+func (t *deadlineContext) setDeadline(group *synctestGroup, deadline time.Time) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ // If t.ctx is non-nil and t.cancel is nil, then t.ctx was canceled
+ // and we should create a new one.
+ if t.ctx == nil || t.cancel == nil {
+ t.ctx, t.cancel = context.WithCancelCause(context.Background())
+ }
+ // Stop any existing deadline from expiring.
+ if t.timer != nil {
+ t.timer.Stop()
+ }
+ if deadline.IsZero() {
+ // No deadline.
+ return
+ }
+ if !deadline.After(group.Now()) {
+ // Deadline has already expired.
+ t.cancel(os.ErrDeadlineExceeded)
+ t.cancel = nil
+ return
+ }
+ if t.timer != nil {
+ // Reuse existing deadline timer.
+ t.timer.Reset(deadline.Sub(group.Now()))
+ return
+ }
+ // Create a new timer to cancel the context at the deadline.
+ t.timer = group.AfterFunc(deadline.Sub(group.Now()), func() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ t.cancel(os.ErrDeadlineExceeded)
+ t.cancel = nil
+ })
+}
diff --git a/http2/pipe_test.go b/http2/pipe_test.go
index 67562a92a1..326b94deb5 100644
--- a/http2/pipe_test.go
+++ b/http2/pipe_test.go
@@ -8,7 +8,6 @@ import (
"bytes"
"errors"
"io"
- "io/ioutil"
"testing"
)
@@ -85,7 +84,7 @@ func TestPipeCloseWithError(t *testing.T) {
io.WriteString(p, body)
a := errors.New("test error")
p.CloseWithError(a)
- all, err := ioutil.ReadAll(p)
+ all, err := io.ReadAll(p)
if string(all) != body {
t.Errorf("read bytes = %q; want %q", all, body)
}
@@ -112,7 +111,7 @@ func TestPipeBreakWithError(t *testing.T) {
io.WriteString(p, "foo")
a := errors.New("test err")
p.BreakWithError(a)
- all, err := ioutil.ReadAll(p)
+ all, err := io.ReadAll(p)
if string(all) != "" {
t.Errorf("read bytes = %q; want empty string", all)
}
diff --git a/http2/server.go b/http2/server.go
index ce2e8b40ee..b55547aec6 100644
--- a/http2/server.go
+++ b/http2/server.go
@@ -29,6 +29,7 @@ import (
"bufio"
"bytes"
"context"
+ "crypto/rand"
"crypto/tls"
"errors"
"fmt"
@@ -52,10 +53,14 @@ import (
)
const (
- prefaceTimeout = 10 * time.Second
- firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
- handlerChunkWriteSize = 4 << 10
- defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+
+ // maxQueuedControlFrames is the maximum number of control frames like
+ // SETTINGS, PING and RST_STREAM that will be queued for writing before
+ // the connection is closed to prevent memory exhaustion attacks.
maxQueuedControlFrames = 10000
)
@@ -127,6 +132,22 @@ type Server struct {
// If zero or negative, there is no timeout.
IdleTimeout time.Duration
+ // ReadIdleTimeout is the timeout after which a health check using a ping
+ // frame will be carried out if no frame is received on the connection.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to a ping is not received.
+ // If zero, a default of 15 seconds is used.
+ PingTimeout time.Duration
+
+ // WriteByteTimeout is the timeout after which a connection will be
+ // closed if no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ // If zero or negative, there is no timeout.
+ WriteByteTimeout time.Duration
+
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
@@ -154,57 +175,39 @@ type Server struct {
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
-}
-
-func (s *Server) initialConnRecvWindowSize() int32 {
- if s.MaxUploadBufferPerConnection >= initialWindowSize {
- return s.MaxUploadBufferPerConnection
- }
- return 1 << 20
-}
-func (s *Server) initialStreamRecvWindowSize() int32 {
- if s.MaxUploadBufferPerStream > 0 {
- return s.MaxUploadBufferPerStream
- }
- return 1 << 20
+ // Synchronization group used for testing.
+ // Outside of tests, this is nil.
+ group synctestGroupInterface
}
-func (s *Server) maxReadFrameSize() uint32 {
- if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
- return v
+func (s *Server) markNewGoroutine() {
+ if s.group != nil {
+ s.group.Join()
}
- return defaultMaxReadFrameSize
}
-func (s *Server) maxConcurrentStreams() uint32 {
- if v := s.MaxConcurrentStreams; v > 0 {
- return v
+func (s *Server) now() time.Time {
+ if s.group != nil {
+ return s.group.Now()
}
- return defaultMaxStreams
+ return time.Now()
}
-func (s *Server) maxDecoderHeaderTableSize() uint32 {
- if v := s.MaxDecoderHeaderTableSize; v > 0 {
- return v
+// newTimer creates a new time.Timer, or a synthetic timer in tests.
+func (s *Server) newTimer(d time.Duration) timer {
+ if s.group != nil {
+ return s.group.NewTimer(d)
}
- return initialHeaderTableSize
+ return timeTimer{time.NewTimer(d)}
}
-func (s *Server) maxEncoderHeaderTableSize() uint32 {
- if v := s.MaxEncoderHeaderTableSize; v > 0 {
- return v
+// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
+func (s *Server) afterFunc(d time.Duration, f func()) timer {
+ if s.group != nil {
+ return s.group.AfterFunc(d, f)
}
- return initialHeaderTableSize
-}
-
-// maxQueuedControlFrames is the maximum number of control frames like
-// SETTINGS, PING and RST_STREAM that will be queued for writing before
-// the connection is closed to prevent memory exhaustion attacks.
-func (s *Server) maxQueuedControlFrames() int {
- // TODO: if anybody asks, add a Server field, and remember to define the
- // behavior of negative values.
- return maxQueuedControlFrames
+ return timeTimer{time.AfterFunc(d, f)}
}
type serverInternalState struct {
@@ -303,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if s.TLSNextProto == nil {
s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
}
- protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) {
if testHookOnConn != nil {
testHookOnConn()
}
@@ -320,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error {
ctx = bc.BaseContext()
}
conf.ServeConn(c, &ServeConnOpts{
- Context: ctx,
- Handler: h,
- BaseConfig: hs,
+ Context: ctx,
+ Handler: h,
+ BaseConfig: hs,
+ SawClientPreface: sawClientPreface,
})
}
- s.TLSNextProto[NextProtoTLS] = protoHandler
+ s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ protoHandler(hs, c, h, false)
+ }
+ // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
+ //
+ // A connection passed in this method has already had the HTTP/2 preface read from it.
+ s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ nc, err := unencryptedNetConnFromTLSConn(c)
+ if err != nil {
+ if lg := hs.ErrorLog; lg != nil {
+ lg.Print(err)
+ } else {
+ log.Print(err)
+ }
+ go c.Close()
+ return
+ }
+ protoHandler(hs, nc, h, true)
+ }
return nil
}
@@ -400,16 +422,22 @@ func (o *ServeConnOpts) handler() http.Handler {
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ s.serveConn(c, opts, nil)
+}
+
+func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) {
baseCtx, cancel := serverConnBaseContext(c, opts)
defer cancel()
+ http1srv := opts.baseConfig()
+ conf := configFromServer(http1srv, s)
sc := &serverConn{
srv: s,
- hs: opts.baseConfig(),
+ hs: http1srv,
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
+ bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -419,13 +447,19 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
- advMaxStreams: s.maxConcurrentStreams(),
+ advMaxStreams: conf.MaxConcurrentStreams,
initialStreamSendWindowSize: initialWindowSize,
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxFrameSize: initialMaxFrameSize,
+ pingTimeout: conf.PingTimeout,
+ countErrorFunc: conf.CountError,
serveG: newGoroutineLock(),
pushEnabled: true,
sawClientPreface: opts.SawClientPreface,
}
+ if newf != nil {
+ newf(sc)
+ }
s.state.registerConn(sc)
defer s.state.unregisterConn(sc)
@@ -451,15 +485,15 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.flow.add(initialWindowSize)
sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
- sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
+ sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
fr := NewFramer(sc.bw, c)
- if s.CountError != nil {
- fr.countError = s.CountError
+ if conf.CountError != nil {
+ fr.countError = conf.CountError
}
- fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
+ fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
- fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
sc.framer = fr
if tc, ok := c.(connectionStater); ok {
@@ -492,7 +526,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
// So for now, do nothing here again.
}
- if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated."
@@ -529,7 +563,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
opts.UpgradeRequest = nil
}
- sc.serve()
+ sc.serve(conf)
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
@@ -569,6 +603,7 @@ type serverConn struct {
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched WriteScheduler
+ countErrorFunc func(errType string)
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
@@ -588,6 +623,7 @@ type serverConn struct {
streams map[uint32]*stream
unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32
+ initialStreamRecvWindowSize int32
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
@@ -598,9 +634,14 @@ type serverConn struct {
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ pingSent bool
+ sentPingData [8]byte
goAwayCode ErrCode
- shutdownTimer *time.Timer // nil until used
- idleTimer *time.Timer // nil if unused
+ shutdownTimer timer // nil until used
+ idleTimer timer // nil if unused
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
+ readIdleTimer timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -615,11 +656,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
if n <= 0 {
n = http.DefaultMaxHeaderBytes
}
- // http2's count is in a slightly different unit and includes 32 bytes per pair.
- // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
- const perFieldOverhead = 32 // per http2 spec
- const typicalHeaders = 10 // conservative
- return uint32(n + typicalHeaders*perFieldOverhead)
+ return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
}
func (sc *serverConn) curOpenStreams() uint32 {
@@ -649,12 +686,12 @@ type stream struct {
flow outflow // limits writing from Handler to client
inflow inflow // what the client is allowed to POST/etc to us
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- readDeadline *time.Timer // nil if unused
- writeDeadline *time.Timer // nil if unused
- closeErr error // set before cw is closed
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline timer // nil if unused
+ writeDeadline timer // nil if unused
+ closeErr error // set before cw is closed
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -732,11 +769,7 @@ func isClosedConnError(err error) bool {
return false
}
- // TODO: remove this string search and be more like the Windows
- // case below. That might involve modifying the standard library
- // to return better error types.
- str := err.Error()
- if strings.Contains(str, "use of closed network connection") {
+ if errors.Is(err, net.ErrClosed) {
return true
}
@@ -815,8 +848,9 @@ type readFrameResult struct {
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
- gate := make(gate)
- gateDone := gate.Done
+ sc.srv.markNewGoroutine()
+ gate := make(chan struct{})
+ gateDone := func() { gate <- struct{}{} }
for {
f, err := sc.framer.ReadFrame()
select {
@@ -847,6 +881,7 @@ type frameWriteResult struct {
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
+ sc.srv.markNewGoroutine()
var err error
if wd == nil {
err = wr.write.writeFrame(sc)
@@ -885,7 +920,7 @@ func (sc *serverConn) notePanic() {
}
}
-func (sc *serverConn) serve() {
+func (sc *serverConn) serve(conf http2Config) {
sc.serveG.check()
defer sc.notePanic()
defer sc.conn.Close()
@@ -897,20 +932,24 @@ func (sc *serverConn) serve() {
sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
}
+ settings := writeSettings{
+ {SettingMaxFrameSize, conf.MaxReadFrameSize},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
+ {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
+ }
+ if !disableExtendedConnectProtocol {
+ settings = append(settings, Setting{SettingEnableConnectProtocol, 1})
+ }
sc.writeFrame(FrameWriteRequest{
- write: writeSettings{
- {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
- {SettingMaxConcurrentStreams, sc.advMaxStreams},
- {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
- {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
- {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
- },
+ write: settings,
})
sc.unackedSettings++
// Each connection starts with initialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
- if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
+ if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
@@ -926,15 +965,22 @@ func (sc *serverConn) serve() {
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 {
- sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
+ if conf.SendPingTimeout > 0 {
+ sc.readIdleTimeout = conf.SendPingTimeout
+ sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ defer sc.readIdleTimer.Stop()
+ }
+
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
+ lastFrameTime := sc.srv.now()
loopNum := 0
for {
loopNum++
@@ -948,6 +994,7 @@ func (sc *serverConn) serve() {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
+ lastFrameTime = sc.srv.now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -979,6 +1026,8 @@ func (sc *serverConn) serve() {
case idleTimerMsg:
sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo)
+ case readIdleTimerMsg:
+ sc.handlePingTimer(lastFrameTime)
case shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
@@ -1001,7 +1050,7 @@ func (sc *serverConn) serve() {
// If the peer is causing us to generate a lot of control frames,
// but not reading them from us, assume they are trying to make us
// run out of memory.
- if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
+ if sc.queuedControlFrames > maxQueuedControlFrames {
sc.vlogf("http2: too many control frames in send queue, closing connection")
return
}
@@ -1017,12 +1066,39 @@ func (sc *serverConn) serve() {
}
}
+func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
+ if sc.pingSent {
+ sc.vlogf("timeout waiting for PING response")
+ sc.conn.Close()
+ return
+ }
+
+ pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
+ now := sc.srv.now()
+ if pingAt.After(now) {
+ // We received frames since arming the ping timer.
+ // Reset it for the next possible timeout.
+ sc.readIdleTimer.Reset(pingAt.Sub(now))
+ return
+ }
+
+ sc.pingSent = true
+ // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
+ // is we send a PING frame containing 0s.
+ _, _ = rand.Read(sc.sentPingData[:])
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePing{data: sc.sentPingData},
+ })
+ sc.readIdleTimer.Reset(sc.pingTimeout)
+}
+
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
+ readIdleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
handlerDoneMsg = new(serverMessage)
@@ -1030,6 +1106,7 @@ var (
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
+func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
@@ -1061,10 +1138,10 @@ func (sc *serverConn) readPreface() error {
errc <- nil
}
}()
- timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+ timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
- case <-timer.C:
+ case <-timer.C():
return errPrefaceTimeout
case err := <-errc:
if err == nil {
@@ -1282,6 +1359,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
sc.writingFrame = false
sc.writingFrameAsync = false
+ if res.err != nil {
+ sc.conn.Close()
+ }
+
wr := res.wr
if writeEndsStream(wr.write) {
@@ -1429,7 +1510,7 @@ func (sc *serverConn) goAway(code ErrCode) {
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
+ sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@@ -1482,6 +1563,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
sc.goAway(ErrCodeFlowControl)
return true
case ConnectionError:
+ if res.f != nil {
+ if id := res.f.Header().StreamID; id > sc.maxClientStreamID {
+ sc.maxClientStreamID = id
+ }
+ }
sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
sc.goAway(ErrCode(ev))
return true // goAway will handle shutdown
@@ -1551,6 +1637,11 @@ func (sc *serverConn) processFrame(f Frame) error {
func (sc *serverConn) processPing(f *PingFrame) error {
sc.serveG.check()
if f.IsAck() {
+ if sc.pingSent && sc.sentPingData == f.Data {
+ // This is a response to a PING we sent.
+ sc.pingSent = false
+ sc.readIdleTimer.Reset(sc.readIdleTimeout)
+ }
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
return nil
@@ -1638,7 +1729,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
delete(sc.streams, st.id)
if len(sc.streams) == 0 {
sc.setConnState(http.StateIdle)
- if sc.srv.IdleTimeout > 0 {
+ if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil {
sc.idleTimer.Reset(sc.srv.IdleTimeout)
}
if h1ServerKeepAlivesDisabled(sc.hs) {
@@ -1660,6 +1751,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
}
}
st.closeErr = err
+ st.cancelCtx()
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
sc.writeSched.CloseStream(st.id)
}
@@ -1713,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error {
sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
case SettingMaxHeaderListSize:
sc.peerMaxHeaderListSize = s.Val
+ case SettingEnableConnectProtocol:
+ // Receipt of this parameter by a server does not
+ // have any impact
default:
// Unknown setting: "An endpoint that receives a SETTINGS
// frame with any unknown or unsupported identifier MUST
@@ -2020,7 +2115,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
return sc.scheduleHandler(id, rw, req, handler)
@@ -2116,9 +2211,9 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
- st.inflow.init(sc.srv.initialStreamRecvWindowSize())
+ st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
- st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
@@ -2143,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
scheme: f.PseudoValue("scheme"),
authority: f.PseudoValue("authority"),
path: f.PseudoValue("path"),
+ protocol: f.PseudoValue("protocol"),
+ }
+
+ // extended connect is disabled, so we should not see :protocol
+ if disableExtendedConnectProtocol && rp.protocol != "" {
+ return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
isConnect := rp.method == "CONNECT"
if isConnect {
- if rp.path != "" || rp.scheme != "" || rp.authority == "" {
+ if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
@@ -2171,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
if rp.authority == "" {
rp.authority = rp.header.Get("Host")
}
+ if rp.protocol != "" {
+ rp.header.Set(":protocol", rp.protocol)
+ }
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
if err != nil {
@@ -2197,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
type requestParam struct {
method string
scheme, authority, path string
+ protocol string
header http.Header
}
@@ -2238,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
var url_ *url.URL
var requestURI string
- if rp.method == "CONNECT" {
+ if rp.method == "CONNECT" && rp.protocol == "" {
url_ = &url.URL{Host: rp.authority}
requestURI = rp.authority // mimic HTTP/1 server behavior
} else {
@@ -2342,6 +2447,7 @@ func (sc *serverConn) handlerDone() {
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
@@ -2638,7 +2744,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
- date = time.Now().UTC().Format(http.TimeFormat)
+ date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
@@ -2760,7 +2866,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(time.Now()) {
+ if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onReadTimeout()
@@ -2776,9 +2882,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.readDeadline = nil
} else if st.readDeadline == nil {
- st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
+ st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
} else {
- st.readDeadline.Reset(deadline.Sub(time.Now()))
+ st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
}
})
return nil
@@ -2786,7 +2892,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(time.Now()) {
+ if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout()
@@ -2802,14 +2908,19 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.writeDeadline = nil
} else if st.writeDeadline == nil {
- st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
+ st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
} else {
- st.writeDeadline.Reset(deadline.Sub(time.Now()))
+ st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
}
})
return nil
}
+func (w *responseWriter) EnableFullDuplex() error {
+ // We always support full duplex responses, so this is a no-op.
+ return nil
+}
+
func (w *responseWriter) Flush() {
w.FlushError()
}
@@ -3256,7 +3367,7 @@ func (sc *serverConn) countError(name string, err error) error {
if sc == nil || sc.srv == nil {
return err
}
- f := sc.srv.CountError
+ f := sc.countErrorFunc
if f == nil {
return err
}
diff --git a/http2/server_push_test.go b/http2/server_push_test.go
index cda8f43367..69e4c3b12d 100644
--- a/http2/server_push_test.go
+++ b/http2/server_push_test.go
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net/http"
"reflect"
"runtime"
@@ -40,7 +39,7 @@ func TestServer_Push_Success(t *testing.T) {
if r.Body == nil {
return fmt.Errorf("nil Body")
}
- if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 {
+ if buf, err := io.ReadAll(r.Body); err != nil || len(buf) != 0 {
return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err)
}
return nil
@@ -106,7 +105,7 @@ func TestServer_Push_Success(t *testing.T) {
errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI())
}
})
- stURL = st.ts.URL
+ stURL = "https://" + st.authority()
// Send one request, which should push two responses.
st.greet()
@@ -170,7 +169,7 @@ func TestServer_Push_Success(t *testing.T) {
return checkPushPromise(f, 2, [][2]string{
{":method", "GET"},
{":scheme", "https"},
- {":authority", st.ts.Listener.Addr().String()},
+ {":authority", st.authority()},
{":path", "/pushed?get"},
{"user-agent", userAgent},
})
@@ -179,7 +178,7 @@ func TestServer_Push_Success(t *testing.T) {
return checkPushPromise(f, 4, [][2]string{
{":method", "HEAD"},
{":scheme", "https"},
- {":authority", st.ts.Listener.Addr().String()},
+ {":authority", st.authority()},
{":path", "/pushed?head"},
{"cookie", cookie},
{"user-agent", userAgent},
@@ -219,12 +218,12 @@ func TestServer_Push_Success(t *testing.T) {
consumed := map[uint32]int{}
for k := 0; len(expected) > 0; k++ {
- f, err := st.readFrame()
- if err != nil {
+ f := st.readFrame()
+ if f == nil {
for id, left := range expected {
t.Errorf("stream %d: missing %d frames", id, len(left))
}
- t.Fatalf("readFrame %d: %v", k, err)
+ break
}
id := f.Header().StreamID
label := fmt.Sprintf("stream %d, frame %d", id, consumed[id])
@@ -340,10 +339,10 @@ func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher,
t.Error(err)
}
// Should not get a PUSH_PROMISE frame.
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Error("stream should end after headers")
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
}
func TestServer_Push_RejectIfDisabled(t *testing.T) {
@@ -460,7 +459,7 @@ func TestServer_Push_StateTransitions(t *testing.T) {
}
getSlash(st)
// After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote.
- st.wantPushPromise()
+ _ = readFrame[*PushPromiseFrame](t, st)
if got, want := st.streamState(2), stateHalfClosedRemote; got != want {
t.Fatalf("streamState(2)=%v, want %v", got, want)
}
@@ -469,10 +468,10 @@ func TestServer_Push_StateTransitions(t *testing.T) {
// the stream before we check st.streamState(2) -- should that happen, we'll
// see stateClosed and fail the above check.
close(gotPromise)
- st.wantHeaders()
- if df := st.wantData(); !df.StreamEnded() {
- t.Fatal("expected END_STREAM flag on DATA")
- }
+ st.wantHeaders(wantHeader{
+ streamID: 2,
+ endStream: false,
+ })
if got, want := st.streamState(2), stateClosed; got != want {
t.Fatalf("streamState(2)=%v, want %v", got, want)
}
@@ -555,9 +554,9 @@ func TestServer_Push_Underflow(t *testing.T) {
numPushPromises := 0
numHeaders := 0
for numHeaders < numRequests*2 || numPushPromises < numRequests {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatal(err)
+ f := st.readFrame()
+ if f == nil {
+ st.t.Fatal("conn is idle, want frame")
}
switch f := f.(type) {
case *HeadersFrame:
diff --git a/http2/server_test.go b/http2/server_test.go
index a931a06e57..201cf0d00e 100644
--- a/http2/server_test.go
+++ b/http2/server_test.go
@@ -14,8 +14,8 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"log"
+ "math"
"net"
"net/http"
"net/http/httptest"
@@ -38,7 +38,7 @@ func stderrv() io.Writer {
return os.Stderr
}
- return ioutil.Discard
+ return io.Discard
}
type safeBuffer struct {
@@ -65,16 +65,16 @@ func (sb *safeBuffer) Len() int {
}
type serverTester struct {
- cc net.Conn // client conn
- t testing.TB
- ts *httptest.Server
- fr *Framer
- serverLogBuf safeBuffer // logger for httptest.Server
- logFilter []string // substrings to filter out
- scMu sync.Mutex // guards sc
- sc *serverConn
- hpackDec *hpack.Decoder
- decodedHeaders [][2]string
+ cc net.Conn // client conn
+ t testing.TB
+ group *synctestGroup
+ h1server *http.Server
+ h2server *Server
+ serverLogBuf safeBuffer // logger for httptest.Server
+ logFilter []string // substrings to filter out
+ scMu sync.Mutex // guards sc
+ sc *serverConn
+ testConnFramer
// If http2debug!=2, then we capture Frame debug logs that will be written
// to t.Log after a test fails. The read and write logs use separate locks
@@ -101,23 +101,153 @@ func resetHooks() {
testHookOnPanicMu.Unlock()
}
+func newTestServer(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *httptest.Server {
+ ts := httptest.NewUnstartedServer(handler)
+ ts.EnableHTTP2 = true
+ ts.Config.ErrorLog = log.New(twriter{t: t}, "", log.LstdFlags)
+ h2server := new(Server)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case func(*httptest.Server):
+ v(ts)
+ case func(*http.Server):
+ v(ts.Config)
+ case func(*Server):
+ v(h2server)
+ default:
+ t.Fatalf("unknown newTestServer option type %T", v)
+ }
+ }
+ ConfigureServer(ts.Config, h2server)
+
+ // ConfigureServer populates ts.Config.TLSConfig.
+ // Copy it to ts.TLS as well.
+ ts.TLS = ts.Config.TLSConfig
+
+ // Go 1.22 changes the default minimum TLS version to TLS 1.2,
+ // in order to properly test cases where we want to reject low
+ // TLS versions, we need to explicitly configure the minimum
+ // version here.
+ ts.Config.TLSConfig.MinVersion = tls.VersionTLS10
+
+ ts.StartTLS()
+ t.Cleanup(func() {
+ ts.CloseClientConnections()
+ ts.Close()
+ })
+
+ return ts
+}
+
type serverTesterOpt string
-var optOnlyServer = serverTesterOpt("only_server")
-var optQuiet = serverTesterOpt("quiet_logging")
var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames")
+var optQuiet = func(server *http.Server) {
+ server.ErrorLog = log.New(io.Discard, "", 0)
+}
+
func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {
+ t.Helper()
+ g := newSynctest(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))
+ t.Cleanup(func() {
+ g.Close(t)
+ })
+
+ h1server := &http.Server{}
+ h2server := &Server{
+ group: g,
+ }
+ tlsState := tls.ConnectionState{
+ Version: tls.VersionTLS13,
+ ServerName: "go.dev",
+ CipherSuite: tls.TLS_AES_128_GCM_SHA256,
+ }
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case func(*Server):
+ v(h2server)
+ case func(*http.Server):
+ v(h1server)
+ case func(*tls.ConnectionState):
+ v(&tlsState)
+ default:
+ t.Fatalf("unknown newServerTester option type %T", v)
+ }
+ }
+ ConfigureServer(h1server, h2server)
+
+ cli, srv := synctestNetPipe(g)
+ cli.SetReadDeadline(g.Now())
+ cli.autoWait = true
+
+ st := &serverTester{
+ t: t,
+ cc: cli,
+ group: g,
+ h1server: h1server,
+ h2server: h2server,
+ }
+ st.hpackEnc = hpack.NewEncoder(&st.headerBuf)
+ if h1server.ErrorLog == nil {
+ h1server.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags)
+ }
+
+ t.Cleanup(func() {
+ st.Close()
+ g.AdvanceTime(goAwayTimeout) // give server time to shut down
+ })
+
+ connc := make(chan *serverConn)
+ go func() {
+ g.Join()
+ h2server.serveConn(&netConnWithConnectionState{
+ Conn: srv,
+ state: tlsState,
+ }, &ServeConnOpts{
+ Handler: handler,
+ BaseConfig: h1server,
+ }, func(sc *serverConn) {
+ connc <- sc
+ })
+ }()
+ st.sc = <-connc
+
+ st.fr = NewFramer(st.cc, st.cc)
+ st.testConnFramer = testConnFramer{
+ t: t,
+ fr: NewFramer(st.cc, st.cc),
+ dec: hpack.NewDecoder(initialHeaderTableSize, nil),
+ }
+ g.Wait()
+ return st
+}
+
+type netConnWithConnectionState struct {
+ net.Conn
+ state tls.ConnectionState
+}
+
+func (c *netConnWithConnectionState) ConnectionState() tls.ConnectionState {
+ return c.state
+}
+
+// newServerTesterWithRealConn creates a test server listening on a localhost port.
+// Mostly superseded by newServerTester, which creates a test server using a fake
+// net.Conn and synthetic time. This function is still around because some benchmarks
+// rely on it; new tests should use newServerTester.
+func newServerTesterWithRealConn(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {
resetHooks()
ts := httptest.NewUnstartedServer(handler)
+ t.Cleanup(ts.Close)
tlsConfig := &tls.Config{
InsecureSkipVerify: true,
NextProtos: []string{NextProtoTLS},
}
- var onlyServer, quiet, framerReuseFrames bool
+ var framerReuseFrames bool
h2server := new(Server)
for _, opt := range opts {
switch v := opt.(type) {
@@ -125,14 +255,12 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}
v(tlsConfig)
case func(*httptest.Server):
v(ts)
+ case func(*http.Server):
+ v(ts.Config)
case func(*Server):
v(h2server)
case serverTesterOpt:
switch v {
- case optOnlyServer:
- onlyServer = true
- case optQuiet:
- quiet = true
case optFramerReuseFrames:
framerReuseFrames = true
}
@@ -152,16 +280,12 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}
ts.Config.TLSConfig.MinVersion = tls.VersionTLS10
st := &serverTester{
- t: t,
- ts: ts,
+ t: t,
}
st.hpackEnc = hpack.NewEncoder(&st.headerBuf)
- st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField)
ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
- if quiet {
- ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
- } else {
+ if ts.Config.ErrorLog == nil {
ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags)
}
ts.StartTLS()
@@ -175,36 +299,54 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}
st.sc = v
}
log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st}))
- if !onlyServer {
- cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
- if err != nil {
- t.Fatal(err)
+ cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ st.cc = cc
+ st.testConnFramer = testConnFramer{
+ t: t,
+ fr: NewFramer(st.cc, st.cc),
+ dec: hpack.NewDecoder(initialHeaderTableSize, nil),
+ }
+ if framerReuseFrames {
+ st.fr.SetReuseFrames()
+ }
+ if !logFrameReads && !logFrameWrites {
+ st.fr.debugReadLoggerf = func(m string, v ...interface{}) {
+ m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n"
+ st.frameReadLogMu.Lock()
+ fmt.Fprintf(&st.frameReadLogBuf, m, v...)
+ st.frameReadLogMu.Unlock()
}
- st.cc = cc
- st.fr = NewFramer(cc, cc)
- if framerReuseFrames {
- st.fr.SetReuseFrames()
- }
- if !logFrameReads && !logFrameWrites {
- st.fr.debugReadLoggerf = func(m string, v ...interface{}) {
- m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n"
- st.frameReadLogMu.Lock()
- fmt.Fprintf(&st.frameReadLogBuf, m, v...)
- st.frameReadLogMu.Unlock()
- }
- st.fr.debugWriteLoggerf = func(m string, v ...interface{}) {
- m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n"
- st.frameWriteLogMu.Lock()
- fmt.Fprintf(&st.frameWriteLogBuf, m, v...)
- st.frameWriteLogMu.Unlock()
- }
- st.fr.logReads = true
- st.fr.logWrites = true
+ st.fr.debugWriteLoggerf = func(m string, v ...interface{}) {
+ m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n"
+ st.frameWriteLogMu.Lock()
+ fmt.Fprintf(&st.frameWriteLogBuf, m, v...)
+ st.frameWriteLogMu.Unlock()
}
+ st.fr.logReads = true
+ st.fr.logWrites = true
}
return st
}
+// sync waits for all goroutines to idle.
+func (st *serverTester) sync() {
+ if st.group != nil {
+ st.group.Wait()
+ }
+}
+
+// advance advances synthetic time by a duration.
+func (st *serverTester) advance(d time.Duration) {
+ st.group.AdvanceTime(d)
+}
+
+func (st *serverTester) authority() string {
+ return "dummy.tld"
+}
+
func (st *serverTester) closeConn() {
st.scMu.Lock()
defer st.scMu.Unlock()
@@ -280,7 +422,6 @@ func (st *serverTester) Close() {
st.cc.Close()
}
}
- st.ts.Close()
if st.cc != nil {
st.cc.Close()
}
@@ -290,13 +431,16 @@ func (st *serverTester) Close() {
// greet initiates the client's HTTP/2 connection into a state where
// frames may be sent.
func (st *serverTester) greet() {
+ st.t.Helper()
st.greetAndCheckSettings(func(Setting) error { return nil })
}
func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) {
+ st.t.Helper()
st.writePreface()
- st.writeInitialSettings()
- st.wantSettings().ForeachSetting(checkSetting)
+ st.writeSettings()
+ st.sync()
+ readFrame[*SettingsFrame](st.t, st).ForeachSetting(checkSetting)
st.writeSettingsAck()
// The initial WINDOW_UPDATE and SETTINGS ACK can come in any order.
@@ -304,9 +448,9 @@ func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error
var gotWindowUpdate bool
for i := 0; i < 2; i++ {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatal(err)
+ f := st.readFrame()
+ if f == nil {
+ st.t.Fatal("wanted a settings ACK and window update, got none")
}
switch f := f.(type) {
case *SettingsFrame:
@@ -319,7 +463,8 @@ func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error
if f.FrameHeader.StreamID != 0 {
st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID)
}
- incr := uint32(st.sc.srv.initialConnRecvWindowSize() - initialWindowSize)
+ conf := configFromServer(st.sc.hs, st.sc.srv)
+ incr := uint32(conf.MaxUploadBufferPerConnection - initialWindowSize)
if f.Increment != incr {
st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr)
}
@@ -348,34 +493,6 @@ func (st *serverTester) writePreface() {
}
}
-func (st *serverTester) writeInitialSettings() {
- if err := st.fr.WriteSettings(); err != nil {
- if runtime.GOOS == "openbsd" && strings.HasSuffix(err.Error(), "write: broken pipe") {
- st.t.Logf("Error writing initial SETTINGS frame from client to server: %v", err)
- st.t.Skipf("Skipping test with known OpenBSD failure mode. (See https://go.dev/issue/52208.)")
- }
- st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err)
- }
-}
-
-func (st *serverTester) writeSettingsAck() {
- if err := st.fr.WriteSettingsAck(); err != nil {
- st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err)
- }
-}
-
-func (st *serverTester) writeHeaders(p HeadersFrameParam) {
- if err := st.fr.WriteHeaders(p); err != nil {
- st.t.Fatalf("Error writing HEADERS: %v", err)
- }
-}
-
-func (st *serverTester) writePriority(id uint32, p PriorityParam) {
- if err := st.fr.WritePriority(id, p); err != nil {
- st.t.Fatalf("Error writing PRIORITY: %v", err)
- }
-}
-
func (st *serverTester) encodeHeaderField(k, v string) {
err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
if err != nil {
@@ -409,7 +526,7 @@ func (st *serverTester) encodeHeader(headers ...string) []byte {
}
st.headerBuf.Reset()
- defaultAuthority := st.ts.Listener.Addr().String()
+ defaultAuthority := st.authority()
if len(headers) == 0 {
// Fast path, mostly for benchmarks, so test code doesn't pollute
@@ -474,150 +591,13 @@ func (st *serverTester) bodylessReq1(headers ...string) {
})
}
-func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) {
- if err := st.fr.WriteData(streamID, endStream, data); err != nil {
- st.t.Fatalf("Error writing DATA: %v", err)
- }
-}
-
-func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) {
- if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil {
- st.t.Fatalf("Error writing DATA: %v", err)
- }
-}
-
-// writeReadPing sends a PING and immediately reads the PING ACK.
-// It will fail if any other unread data was pending on the connection.
-func (st *serverTester) writeReadPing() {
- data := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
- if err := st.fr.WritePing(false, data); err != nil {
- st.t.Fatalf("Error writing PING: %v", err)
- }
- p := st.wantPing()
- if p.Flags&FlagPingAck == 0 {
- st.t.Fatalf("got a PING, want a PING ACK")
- }
- if p.Data != data {
- st.t.Fatalf("got PING data = %x, want %x", p.Data, data)
- }
-}
-
-func (st *serverTester) readFrame() (Frame, error) {
- return st.fr.ReadFrame()
-}
-
-func (st *serverTester) wantHeaders() *HeadersFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a HEADERS frame: %v", err)
- }
- hf, ok := f.(*HeadersFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *HeadersFrame", f)
- }
- return hf
-}
-
-func (st *serverTester) wantContinuation() *ContinuationFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err)
- }
- cf, ok := f.(*ContinuationFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *ContinuationFrame", f)
- }
- return cf
-}
-
-func (st *serverTester) wantData() *DataFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a DATA frame: %v", err)
- }
- df, ok := f.(*DataFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *DataFrame", f)
- }
- return df
-}
-
-func (st *serverTester) wantSettings() *SettingsFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err)
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *SettingsFrame", f)
- }
- return sf
-}
-
-func (st *serverTester) wantPing() *PingFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a PING frame: %v", err)
- }
- pf, ok := f.(*PingFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *PingFrame", f)
- }
- return pf
-}
-
-func (st *serverTester) wantGoAway() *GoAwayFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err)
- }
- gf, ok := f.(*GoAwayFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *GoAwayFrame", f)
- }
- return gf
-}
-
-func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting an RSTStream frame: %v", err)
- }
- rs, ok := f.(*RSTStreamFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *RSTStreamFrame", f)
- }
- if rs.FrameHeader.StreamID != streamID {
- st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID)
- }
- if rs.ErrCode != errCode {
- st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode)
- }
-}
-
-func (st *serverTester) wantWindowUpdate(streamID, incr uint32) {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err)
- }
- wu, ok := f.(*WindowUpdateFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *WindowUpdateFrame", f)
- }
- if wu.FrameHeader.StreamID != streamID {
- st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID)
- }
- if wu.Increment != incr {
- st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr)
- }
-}
-
func (st *serverTester) wantFlowControlConsumed(streamID, consumed int32) {
+ conf := configFromServer(st.sc.hs, st.sc.srv)
var initial int32
if streamID == 0 {
- initial = st.sc.srv.initialConnRecvWindowSize()
+ initial = conf.MaxUploadBufferPerConnection
} else {
- initial = st.sc.srv.initialStreamRecvWindowSize()
+ initial = conf.MaxUploadBufferPerStream
}
donec := make(chan struct{})
st.sc.sendServeMsg(func(sc *serverConn) {
@@ -634,32 +614,6 @@ func (st *serverTester) wantFlowControlConsumed(streamID, consumed int32) {
<-donec
}
-func (st *serverTester) wantSettingsAck() {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatal(err)
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- st.t.Fatalf("Wanting a settings ACK, received a %T", f)
- }
- if !sf.Header().Flags.Has(FlagSettingsAck) {
- st.t.Fatal("Settings Frame didn't have ACK set")
- }
-}
-
-func (st *serverTester) wantPushPromise() *PushPromiseFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatal(err)
- }
- ppf, ok := f.(*PushPromiseFrame)
- if !ok {
- st.t.Fatalf("Wanted PushPromise, received %T", ppf)
- }
- return ppf
-}
-
func TestServer(t *testing.T) {
gotReq := make(chan bool, 1)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
@@ -668,12 +622,6 @@ func TestServer(t *testing.T) {
})
defer st.Close()
- covers("3.5", `
- The server connection preface consists of a potentially empty
- SETTINGS frame ([SETTINGS]) that MUST be the first frame the
- server sends in the HTTP/2 connection.
- `)
-
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
@@ -866,7 +814,7 @@ func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, wr
if r.ContentLength != wantContentLength {
t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
}
- all, err := ioutil.ReadAll(r.Body)
+ all, err := io.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
@@ -887,7 +835,7 @@ func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError s
if r.ContentLength != wantContentLength {
t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
}
- all, err := ioutil.ReadAll(r.Body)
+ all, err := io.ReadAll(r.Body)
if err == nil {
t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.",
wantReadError, all)
@@ -1095,37 +1043,32 @@ func testRejectRequest(t *testing.T, send func(*serverTester)) {
st.wantRSTStream(1, ErrCodeProtocol)
}
-func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) {
+func newServerTesterForError(t *testing.T) *serverTester {
+ t.Helper()
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
t.Error("server request made it to handler; should've been rejected")
}, optQuiet)
- defer st.Close()
-
st.greet()
- send(st)
- gf := st.wantGoAway()
- if gf.ErrCode != ErrCodeProtocol {
- t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol)
- }
+ return st
}
// Section 5.1, on idle connections: "Receiving any frame other than
// HEADERS or PRIORITY on a stream in this state MUST be treated as a
// connection error (Section 5.4.1) of type PROTOCOL_ERROR."
func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) {
- testRejectRequestWithProtocolError(t, func(st *serverTester) {
- st.fr.WriteWindowUpdate(123, 456)
- })
+ st := newServerTesterForError(t)
+ st.fr.WriteWindowUpdate(123, 456)
+ st.wantGoAway(123, ErrCodeProtocol)
}
func TestRejectFrameOnIdle_Data(t *testing.T) {
- testRejectRequestWithProtocolError(t, func(st *serverTester) {
- st.fr.WriteData(123, true, nil)
- })
+ st := newServerTesterForError(t)
+ st.fr.WriteData(123, true, nil)
+ st.wantGoAway(123, ErrCodeProtocol)
}
func TestRejectFrameOnIdle_RSTStream(t *testing.T) {
- testRejectRequestWithProtocolError(t, func(st *serverTester) {
- st.fr.WriteRSTStream(123, ErrCodeCancel)
- })
+ st := newServerTesterForError(t)
+ st.fr.WriteRSTStream(123, ErrCodeCancel)
+ st.wantGoAway(123, ErrCodeProtocol)
}
func TestServer_Request_Connect(t *testing.T) {
@@ -1199,7 +1142,7 @@ func TestServer_Ping(t *testing.T) {
t.Fatal(err)
}
- pf := st.wantPing()
+ pf := readFrame[*PingFrame](t, st)
if !pf.Flags.Has(FlagPingAck) {
t.Error("response ping doesn't have ACK set")
}
@@ -1222,38 +1165,36 @@ func (l *filterListener) Accept() (net.Conn, error) {
}
func TestServer_MaxQueuedControlFrames(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping in short mode")
- }
+ // Goroutine debugging makes this test very slow.
+ disableGoroutineTracking(t)
- st := newServerTester(t, nil, func(ts *httptest.Server) {
- // TCP buffer sizes on test systems aren't under our control and can be large.
- // Create a conn that blocks after 10000 bytes written.
- ts.Listener = &filterListener{
- Listener: ts.Listener,
- accept: func(conn net.Conn) (net.Conn, error) {
- return newBlockingWriteConn(conn, 10000), nil
- },
- }
- })
- defer st.Close()
+ st := newServerTester(t, nil)
st.greet()
- const extraPings = 500000 // enough to fill the TCP buffers
+ st.cc.(*synctestNetConn).SetReadBufferSize(0) // all writes block
+ st.cc.(*synctestNetConn).autoWait = false // don't sync after every write
+ // Send maxQueuedControlFrames pings, plus a few extra
+ // to account for ones that enter the server's write buffer.
+ const extraPings = 2
for i := 0; i < maxQueuedControlFrames+extraPings; i++ {
pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
- if err := st.fr.WritePing(false, pingData); err != nil {
- if i == 0 {
- t.Fatal(err)
- }
- // We expect the connection to get closed by the server when the TCP
- // buffer fills up and the write queue reaches MaxQueuedControlFrames.
- t.Logf("sent %d PING frames", i)
- return
+ st.fr.WritePing(false, pingData)
+ }
+ st.group.Wait()
+
+ // Unblock the server.
+ // It should have closed the connection after exceeding the control frame limit.
+ st.cc.(*synctestNetConn).SetReadBufferSize(math.MaxInt)
+
+ st.advance(goAwayTimeout)
+ // Some frames may have persisted in the server's buffers.
+ for i := 0; i < 10; i++ {
+ if st.readFrame() == nil {
+ break
}
}
- t.Errorf("unexpected success sending all PING frames")
+ st.wantClosed()
}
func TestServer_RejectsLargeFrames(t *testing.T) {
@@ -1269,15 +1210,9 @@ func TestServer_RejectsLargeFrames(t *testing.T) {
// will only read the first 9 bytes (the headre) and then disconnect.
st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1))
- gf := st.wantGoAway()
- if gf.ErrCode != ErrCodeFrameSize {
- t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize)
- }
- if st.serverLogBuf.Len() != 0 {
- // Previously we spun here for a bit until the GOAWAY disconnect
- // timer fired, logging while we fired.
- t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes())
- }
+ st.wantGoAway(0, ErrCodeFrameSize)
+ st.advance(goAwayTimeout)
+ st.wantClosed()
}
func TestServer_Handler_Sends_WindowUpdate(t *testing.T) {
@@ -1303,7 +1238,6 @@ func TestServer_Handler_Sends_WindowUpdate(t *testing.T) {
EndStream: false, // data coming
EndHeaders: true,
})
- st.writeReadPing()
// Write less than half the max window of data and consume it.
// The server doesn't return flow control yet, buffering the 1024 bytes to
@@ -1311,20 +1245,17 @@ func TestServer_Handler_Sends_WindowUpdate(t *testing.T) {
data := make([]byte, windowSize)
st.writeData(1, false, data[:1024])
puppet.do(readBodyHandler(t, string(data[:1024])))
- st.writeReadPing()
// Write up to the window limit.
// The server returns the buffered credit.
st.writeData(1, false, data[1024:])
st.wantWindowUpdate(0, 1024)
st.wantWindowUpdate(1, 1024)
- st.writeReadPing()
// The handler consumes the data and the server returns credit.
puppet.do(readBodyHandler(t, string(data[1024:])))
st.wantWindowUpdate(0, windowSize-1024)
st.wantWindowUpdate(1, windowSize-1024)
- st.writeReadPing()
}
// the version of the TestServer_Handler_Sends_WindowUpdate with padding.
@@ -1348,7 +1279,6 @@ func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) {
EndStream: false,
EndHeaders: true,
})
- st.writeReadPing()
// Write half a window of data, with some padding.
// The server doesn't return the padding yet, buffering the 5 bytes to combine
@@ -1356,7 +1286,6 @@ func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) {
data := make([]byte, windowSize/2)
pad := make([]byte, 4)
st.writeDataPadded(1, false, data, pad)
- st.writeReadPing()
// The handler consumes the body.
// The server returns flow control for the body and padding
@@ -1373,13 +1302,7 @@ func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) {
if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil {
t.Fatal(err)
}
- gf := st.wantGoAway()
- if gf.ErrCode != ErrCodeFlowControl {
- t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl)
- }
- if gf.LastStreamID != 0 {
- t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0)
- }
+ st.wantGoAway(0, ErrCodeFlowControl)
}
func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) {
@@ -1586,10 +1509,10 @@ func TestServer_StateTransitions(t *testing.T) {
st.writeData(1, true, nil)
leaveHandler <- true
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Fatal("expected END_STREAM flag")
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
if got, want := st.streamState(1), stateClosed; got != want {
t.Errorf("at end, state is %v; want %v", got, want)
@@ -1601,97 +1524,101 @@ func TestServer_StateTransitions(t *testing.T) {
// test HEADERS w/o EndHeaders + another HEADERS (should get rejected)
func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: false,
- })
- st.writeHeaders(HeadersFrameParam{ // Not a continuation.
- StreamID: 3, // different stream.
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
+ st := newServerTesterForError(t)
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: false,
+ })
+ st.writeHeaders(HeadersFrameParam{ // Not a continuation.
+ StreamID: 3, // different stream.
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
})
+ st.wantGoAway(0, ErrCodeProtocol)
}
// test HEADERS w/o EndHeaders + PING (should get rejected)
func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: false,
- })
- if err := st.fr.WritePing(false, [8]byte{}); err != nil {
- t.Fatal(err)
- }
+ st := newServerTesterForError(t)
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: false,
})
+ if err := st.fr.WritePing(false, [8]byte{}); err != nil {
+ t.Fatal(err)
+ }
+ st.wantGoAway(0, ErrCodeProtocol)
}
// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected)
func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- st.wantHeaders()
- if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
- t.Fatal(err)
- }
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optQuiet)
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
})
+ if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
+ t.Fatal(err)
+ }
+ st.wantGoAway(1, ErrCodeProtocol)
}
// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID
func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: false,
- })
- if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
- t.Fatal(err)
- }
+ st := newServerTesterForError(t)
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: false,
})
+ if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
+ t.Fatal(err)
+ }
+ st.wantGoAway(0, ErrCodeProtocol)
}
// No HEADERS on stream 0.
func TestServer_Rejects_Headers0(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.fr.AllowIllegalWrites = true
- st.writeHeaders(HeadersFrameParam{
- StreamID: 0,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
+ st := newServerTesterForError(t)
+ st.fr.AllowIllegalWrites = true
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 0,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
})
+ st.wantGoAway(0, ErrCodeProtocol)
}
// No CONTINUATION on stream 0.
func TestServer_Rejects_Continuation0(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.fr.AllowIllegalWrites = true
- if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil {
- t.Fatal(err)
- }
- })
+ st := newServerTesterForError(t)
+ st.fr.AllowIllegalWrites = true
+ if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil {
+ t.Fatal(err)
+ }
+ st.wantGoAway(0, ErrCodeProtocol)
}
// No PRIORITY on stream 0.
func TestServer_Rejects_Priority0(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.fr.AllowIllegalWrites = true
- st.writePriority(0, PriorityParam{StreamDep: 1})
- })
+ st := newServerTesterForError(t)
+ st.fr.AllowIllegalWrites = true
+ st.writePriority(0, PriorityParam{StreamDep: 1})
+ st.wantGoAway(0, ErrCodeProtocol)
}
// No HEADERS frame with a self-dependence.
@@ -1717,36 +1644,15 @@ func TestServer_Rejects_PrioritySelfDependence(t *testing.T) {
}
func TestServer_Rejects_PushPromise(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- pp := PushPromiseParam{
- StreamID: 1,
- PromiseID: 3,
- }
- if err := st.fr.WritePushPromise(pp); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// testServerRejectsConn tests that the server hangs up with a GOAWAY
-// frame and a server close after the client does something
-// deserving a CONNECTION_ERROR.
-func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
- st.addLogFilter("connection error: PROTOCOL_ERROR")
- defer st.Close()
- st.greet()
- writeReq(st)
-
- st.wantGoAway()
-
- fr, err := st.fr.ReadFrame()
- if err == nil {
- t.Errorf("ReadFrame got frame of type %T; want io.EOF", fr)
+ st := newServerTesterForError(t)
+ pp := PushPromiseParam{
+ StreamID: 1,
+ PromiseID: 3,
}
- if err != io.EOF {
- t.Errorf("ReadFrame = %v; want io.EOF", err)
+ if err := st.fr.WritePushPromise(pp); err != nil {
+ t.Fatal(err)
}
+ st.wantGoAway(1, ErrCodeProtocol)
}
// testServerRejectsStream tests that the server sends a RST_STREAM with the provided
@@ -1786,13 +1692,10 @@ func TestServer_Response_NoData(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Fatal("want END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
})
}
@@ -1802,22 +1705,15 @@ func TestServer_Response_NoData_Header_FooBar(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Fatal("want END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"foo-bar", "some-value"},
- {"content-length", "0"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ header: http.Header{
+ ":status": []string{"200"},
+ "foo-bar": []string{"some-value"},
+ "content-length": []string{"0"},
+ },
+ })
})
}
@@ -1862,15 +1758,14 @@ func TestServerIgnoresContentLengthSignWhenWritingChunks(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-length", tt.wantCL},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("For case %q, value %q, got = %q; want %q", tt.name, tt.cl, goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-length": []string{tt.wantCL},
+ },
+ })
})
}
}
@@ -1940,29 +1835,20 @@ func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("don't want END_STREAM, expecting data")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "foo/bar"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- df := st.wantData()
- if !df.StreamEnded() {
- t.Error("expected DATA to have END_STREAM flag")
- }
- if got := string(df.Data()); got != msg {
- t.Errorf("got DATA %q; want %q", got, msg)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{"foo/bar"},
+ "content-length": []string{strconv.Itoa(len(msg))},
+ },
+ })
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: true,
+ data: []byte(msg),
+ })
})
}
@@ -1974,16 +1860,15 @@ func TestServer_Response_TransferEncoding_chunked(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{"text/plain; charset=utf-8"},
+ "content-length": []string{strconv.Itoa(len(msg))},
+ },
+ })
})
}
@@ -1996,22 +1881,15 @@ func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{"text/html; charset=utf-8"},
+ "content-length": []string{strconv.Itoa(len(msg))},
+ },
+ })
})
}
@@ -2025,23 +1903,16 @@ func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"foo", "proper value"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "foo": []string{"proper value"},
+ "content-type": []string{"text/html; charset=utf-8"},
+ "content-length": []string{strconv.Itoa(len(msg))},
+ },
+ })
})
}
@@ -2052,29 +1923,20 @@ func TestServer_Response_Data_SniffLenType(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("don't want END_STREAM, expecting data")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- df := st.wantData()
- if !df.StreamEnded() {
- t.Error("expected DATA to have END_STREAM flag")
- }
- if got := string(df.Data()); got != msg {
- t.Errorf("got DATA %q; want %q", got, msg)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{"text/html; charset=utf-8"},
+ "content-length": []string{strconv.Itoa(len(msg))},
+ },
+ })
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: true,
+ data: []byte(msg),
+ })
})
}
@@ -2088,40 +1950,25 @@ func TestServer_Response_Header_Flush_MidWrite(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/html; charset=utf-8"}, // sniffed
- // and no content-length
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- {
- df := st.wantData()
- if df.StreamEnded() {
- t.Error("unexpected END_STREAM flag")
- }
- if got := string(df.Data()); got != msg {
- t.Errorf("got DATA %q; want %q", got, msg)
- }
- }
- {
- df := st.wantData()
- if !df.StreamEnded() {
- t.Error("wanted END_STREAM flag on last data chunk")
- }
- if got := string(df.Data()); got != msg2 {
- t.Errorf("got DATA %q; want %q", got, msg2)
- }
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{"text/html; charset=utf-8"}, // sniffed
+ // and no content-length
+ },
+ })
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: false,
+ data: []byte(msg),
+ })
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: true,
+ data: []byte(msg2),
+ })
})
}
@@ -2157,25 +2004,18 @@ func TestServer_Response_LargeWrite(t *testing.T) {
if err := st.fr.WriteWindowUpdate(0, size); err != nil {
t.Fatal(err)
}
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"}, // sniffed
- // and no content-length
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{"text/plain; charset=utf-8"}, // sniffed
+ // and no content-length
+ },
+ })
var bytes, frames int
for {
- df := st.wantData()
+ df := readFrame[*DataFrame](t, st)
bytes += len(df.Data())
frames++
for _, b := range df.Data() {
@@ -2226,27 +2066,26 @@ func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) {
getSlash(st) // make the single request
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ })
- df := st.wantData()
- if got := len(df.Data()); got != reads[0] {
- t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got)
- }
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: false,
+ size: reads[0],
+ })
- for _, quota := range reads[1:] {
+ for i, quota := range reads[1:] {
if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil {
t.Fatal(err)
}
- df := st.wantData()
- if int(quota) != len(df.Data()) {
- t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota)
- }
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: i == len(reads[1:])-1,
+ size: quota,
+ })
}
})
}
@@ -2273,13 +2112,10 @@ func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) {
getSlash(st) // make the single request
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ })
if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
t.Fatal(err)
@@ -2301,21 +2137,16 @@ func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) {
getSlash(st) // make the single request
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ })
- df := st.wantData()
- if got := len(df.Data()); got != 0 {
- t.Fatalf("unexpected %d DATA bytes; want 0", got)
- }
- if !df.StreamEnded() {
- t.Fatal("DATA didn't have END_STREAM")
- }
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: true,
+ size: 0,
+ })
})
}
@@ -2340,49 +2171,33 @@ func TestServer_Response_Automatic100Continue(t *testing.T) {
EndStream: false,
EndHeaders: true,
})
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "100"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Fatalf("Got headers %v; want %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"100"},
+ },
+ })
// Okay, they sent status 100, so we can send our
// gigantic and/or sensitive "foo" payload now.
st.writeData(1, true, []byte(msg))
- hf = st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("expected data to follow")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth = st.decodeHeader(hf.HeaderBlockFragment())
- wanth = [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", strconv.Itoa(len(reply))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{"text/plain; charset=utf-8"},
+ "content-length": []string{strconv.Itoa(len(reply))},
+ },
+ })
- df := st.wantData()
- if string(df.Data()) != reply {
- t.Errorf("Client read %q; want %q", df.Data(), reply)
- }
- if !df.StreamEnded() {
- t.Errorf("expect data stream end")
- }
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: true,
+ data: []byte(reply),
+ })
})
}
@@ -2404,13 +2219,10 @@ func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) {
EndStream: false,
EndHeaders: true,
})
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ })
// Close the connection and wait for the handler to (hopefully) notice.
st.cc.Close()
_ = <-errc
@@ -2431,6 +2243,11 @@ func TestServer_Rejects_Too_Many_Streams(t *testing.T) {
<-leaveHandler
})
defer st.Close()
+
+ // Automatically syncing after every write / before every read
+ // slows this test down substantially.
+ st.cc.(*synctestNetConn).autoWait = false
+
st.greet()
nextStreamID := uint32(1)
streamID := func() uint32 {
@@ -2470,11 +2287,16 @@ func TestServer_Rejects_Too_Many_Streams(t *testing.T) {
if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil {
t.Fatal(err)
}
+ st.sync()
st.wantRSTStream(rejectID, ErrCodeProtocol)
// But let a handler finish:
leaveHandler <- true
- st.wantHeaders()
+ st.sync()
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
// And now another stream should be able to start:
goodID := streamID()
@@ -2494,14 +2316,14 @@ func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
+ hf := readFrame[*HeadersFrame](t, st)
if hf.HeadersEnded() {
t.Fatal("got unwanted END_HEADERS flag")
}
n := 0
for {
n++
- cf := st.wantContinuation()
+ cf := readFrame[*ContinuationFrame](t, st)
if cf.HeadersEnded() {
break
}
@@ -2530,10 +2352,10 @@ func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {
EndStream: false, // DATA is coming
EndHeaders: true,
})
- hf := st.wantHeaders()
- if !hf.HeadersEnded() || !hf.StreamEnded() {
- t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
// Sent when the a Handler closes while a client has
// indicated it's still sending DATA:
@@ -2588,79 +2410,51 @@ func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {
func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) }
func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) }
-func testRejectTLS(t *testing.T, max uint16) {
- st := newServerTester(t, nil, func(c *tls.Config) {
+func testRejectTLS(t *testing.T, version uint16) {
+ st := newServerTester(t, nil, func(state *tls.ConnectionState) {
// As of 1.18 the default minimum Go TLS version is
// 1.2. In order to test rejection of lower versions,
- // manually set the minimum version to 1.0
- c.MinVersion = tls.VersionTLS10
- c.MaxVersion = max
+ // manually set the version to 1.0
+ state.Version = version
})
defer st.Close()
- gf := st.wantGoAway()
- if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
- t.Errorf("Got error code %v; want %v", got, want)
- }
+ st.wantGoAway(0, ErrCodeInadequateSecurity)
}
func TestServer_Rejects_TLSBadCipher(t *testing.T) {
- st := newServerTester(t, nil, func(c *tls.Config) {
- // All TLS 1.3 ciphers are good. Test with TLS 1.2.
- c.MaxVersion = tls.VersionTLS12
- // Only list bad ones:
- c.CipherSuites = []uint16{
- tls.TLS_RSA_WITH_RC4_128_SHA,
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
- }
+ st := newServerTester(t, nil, func(state *tls.ConnectionState) {
+ state.Version = tls.VersionTLS12
+ state.CipherSuite = tls.TLS_RSA_WITH_RC4_128_SHA
})
defer st.Close()
- gf := st.wantGoAway()
- if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
- t.Errorf("Got error code %v; want %v", got, want)
- }
+ st.wantGoAway(0, ErrCodeInadequateSecurity)
}
func TestServer_Advertises_Common_Cipher(t *testing.T) {
- const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- st := newServerTester(t, nil, func(c *tls.Config) {
- // Have the client only support the one required by the spec.
- c.CipherSuites = []uint16{requiredSuite}
- }, func(ts *httptest.Server) {
- var srv *http.Server = ts.Config
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
+ }, func(srv *http.Server) {
// Have the server configured with no specific cipher suites.
// This tests that Go's defaults include the required one.
srv.TLSConfig = nil
})
- defer st.Close()
- st.greet()
-}
-func (st *serverTester) onHeaderField(f hpack.HeaderField) {
- if f.Name == "date" {
- return
- }
- st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value})
-}
+ // Have the client only support the one required by the spec.
+ const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ tlsConfig := tlsConfigInsecure.Clone()
+ tlsConfig.MaxVersion = tls.VersionTLS12
+ tlsConfig.CipherSuites = []uint16{requiredSuite}
+ tr := &Transport{TLSClientConfig: tlsConfig}
+ defer tr.CloseIdleConnections()
-func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) {
- st.decodedHeaders = nil
- if _, err := st.hpackDec.Write(headerBlock); err != nil {
- st.t.Fatalf("hpack decoding error: %v", err)
+ req, err := http.NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
}
- if err := st.hpackDec.Close(); err != nil {
- st.t.Fatalf("hpack decoding error: %v", err)
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
}
- return st.decodedHeaders
+ res.Body.Close()
}
// testServerResponse sets up an idle HTTP/2 connection. The client function should
@@ -2801,22 +2595,18 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) {
chunk = chunk[:frameSize]
}
b = b[len(chunk):]
- st.fr.WriteContinuation(1, len(b) == 0, chunk)
- }
-
- h := st.wantHeaders()
- if !h.HeadersEnded() {
- t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
- }
- headers := st.decodeHeader(h.HeaderBlockFragment())
- want := [][2]string{
- {":status", "431"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", "63"},
- }
- if !reflect.DeepEqual(headers, want) {
- t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
+ st.fr.WriteContinuation(1, len(b) == 0, chunk)
}
+
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"431"},
+ "content-type": []string{"text/html; charset=utf-8"},
+ "content-length": []string{"63"},
+ },
+ })
}
func TestServer_Response_Stream_With_Missing_Trailer(t *testing.T) {
@@ -2825,17 +2615,15 @@ func TestServer_Response_Stream_With_Missing_Trailer(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- df := st.wantData()
- if len(df.data) != 0 {
- t.Fatal("did not want data")
- }
- if !df.StreamEnded() {
- t.Fatal("want END_STREAM flag")
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ })
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: true,
+ size: 0,
+ })
})
}
@@ -2844,8 +2632,8 @@ func TestCompressionErrorOnWrite(t *testing.T) {
var serverConfig *http.Server
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// No response body.
- }, func(ts *httptest.Server) {
- serverConfig = ts.Config
+ }, func(s *http.Server) {
+ serverConfig = s
serverConfig.MaxHeaderBytes = maxStrLen
})
st.addLogFilter("connection error: COMPRESSION_ERROR")
@@ -2873,20 +2661,16 @@ func TestCompressionErrorOnWrite(t *testing.T) {
EndStream: true,
EndHeaders: true,
})
- h := st.wantHeaders()
- if !h.HeadersEnded() {
- t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
- }
- headers := st.decodeHeader(h.HeaderBlockFragment())
- want := [][2]string{
- {":status", "431"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", "63"},
- }
- if !reflect.DeepEqual(headers, want) {
- t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
- }
- df := st.wantData()
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"431"},
+ "content-type": []string{"text/html; charset=utf-8"},
+ "content-length": []string{"63"},
+ },
+ })
+ df := readFrame[*DataFrame](t, st)
if !strings.Contains(string(df.Data()), "HTTP Error 431") {
t.Errorf("Unexpected data body: %q", df.Data())
}
@@ -2902,10 +2686,7 @@ func TestCompressionErrorOnWrite(t *testing.T) {
EndStream: true,
EndHeaders: true,
})
- ga := st.wantGoAway()
- if ga.ErrCode != ErrCodeCompression {
- t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
- }
+ st.wantGoAway(3, ErrCodeCompression)
}
func TestCompressionErrorOnClose(t *testing.T) {
@@ -2924,10 +2705,7 @@ func TestCompressionErrorOnClose(t *testing.T) {
EndStream: true,
EndHeaders: true,
})
- ga := st.wantGoAway()
- if ga.ErrCode != ErrCodeCompression {
- t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
- }
+ st.wantGoAway(1, ErrCodeCompression)
}
// test that a server handler can read trailers from a client
@@ -2962,7 +2740,7 @@ func TestServerReadsTrailers(t *testing.T) {
if !reflect.DeepEqual(r.Trailer, wantTrailer) {
t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer)
}
- slurp, err := ioutil.ReadAll(r.Body)
+ slurp, err := io.ReadAll(r.Body)
if string(slurp) != testBody {
t.Errorf("read body %q; want %q", slurp, testBody)
}
@@ -3017,66 +2795,51 @@ func testServerWritesTrailers(t *testing.T, withFlush bool) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("response HEADERS had END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("response HEADERS didn't have END_HEADERS")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"foo", "Bar"},
- {"trailer", "Server-Trailer-A, Server-Trailer-B"},
- {"trailer", "Server-Trailer-C"},
- {"trailer", "Transfer-Encoding, Content-Length, Trailer"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", "5"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
- }
- df := st.wantData()
- if string(df.Data()) != "Hello" {
- t.Fatalf("Client read %q; want Hello", df.Data())
- }
- if df.StreamEnded() {
- t.Fatalf("data frame had STREAM_ENDED")
- }
- tf := st.wantHeaders() // for the trailers
- if !tf.StreamEnded() {
- t.Fatalf("trailers HEADERS lacked END_STREAM")
- }
- if !tf.HeadersEnded() {
- t.Fatalf("trailers HEADERS lacked END_HEADERS")
- }
- wanth = [][2]string{
- {"post-header-trailer", "hi1"},
- {"post-header-trailer2", "hi2"},
- {"server-trailer-a", "valuea"},
- {"server-trailer-c", "valuec"},
- }
- goth = st.decodeHeader(tf.HeaderBlockFragment())
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "foo": []string{"Bar"},
+ "trailer": []string{
+ "Server-Trailer-A, Server-Trailer-B",
+ "Server-Trailer-C",
+ "Transfer-Encoding, Content-Length, Trailer",
+ },
+ "content-type": []string{"text/plain; charset=utf-8"},
+ "content-length": []string{"5"},
+ },
+ })
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: false,
+ data: []byte("Hello"),
+ })
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ header: http.Header{
+ "post-header-trailer": []string{"hi1"},
+ "post-header-trailer2": []string{"hi2"},
+ "server-trailer-a": []string{"valuea"},
+ "server-trailer-c": []string{"valuec"},
+ },
+ })
})
}
func TestServerWritesUndeclaredTrailers(t *testing.T) {
const trailer = "Trailer-Header"
const value = "hi1"
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.Header().Set(http.TrailerPrefix+trailer, value)
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
cl := &http.Client{Transport: tr}
- resp, err := cl.Get(st.ts.URL)
+ resp, err := cl.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -3099,31 +2862,24 @@ func TestServerDoesntWriteInvalidHeaders(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Error("response HEADERS lacked END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("response HEADERS didn't have END_HEADERS")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"ok1", "x"},
- {"content-length", "0"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ header: http.Header{
+ ":status": []string{"200"},
+ "ok1": []string{"x"},
+ "content-length": []string{"0"},
+ },
+ })
})
}
func BenchmarkServerGets(b *testing.B) {
- defer disableGoroutineTracking()()
+ disableGoroutineTracking(b)
b.ReportAllocs()
const msg = "Hello, world"
- st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, msg)
})
defer st.Close()
@@ -3142,24 +2898,23 @@ func BenchmarkServerGets(b *testing.B) {
EndStream: true,
EndHeaders: true,
})
- st.wantHeaders()
- df := st.wantData()
- if !df.StreamEnded() {
+ st.wantFrameType(FrameHeaders)
+ if df := readFrame[*DataFrame](b, st); !df.StreamEnded() {
b.Fatalf("DATA didn't have END_STREAM; got %v", df)
}
}
}
func BenchmarkServerPosts(b *testing.B) {
- defer disableGoroutineTracking()()
+ disableGoroutineTracking(b)
b.ReportAllocs()
const msg = "Hello, world"
- st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) {
// Consume the (empty) body from th peer before replying, otherwise
// the server will sometimes (depending on scheduling) send the peer a
// a RST_STREAM with the CANCEL error code.
- if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil {
+ if n, err := io.Copy(io.Discard, r.Body); n != 0 || err != nil {
b.Errorf("Copy error; got %v, %v; want 0, nil", n, err)
}
io.WriteString(w, msg)
@@ -3181,9 +2936,8 @@ func BenchmarkServerPosts(b *testing.B) {
EndHeaders: true,
})
st.writeData(id, true, nil)
- st.wantHeaders()
- df := st.wantData()
- if !df.StreamEnded() {
+ st.wantFrameType(FrameHeaders)
+ if df := readFrame[*DataFrame](b, st); !df.StreamEnded() {
b.Fatalf("DATA didn't have END_STREAM; got %v", df)
}
}
@@ -3203,7 +2957,7 @@ func BenchmarkServerToClientStreamReuseFrames(b *testing.B) {
}
func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) {
- defer disableGoroutineTracking()()
+ disableGoroutineTracking(b)
b.ReportAllocs()
const msgLen = 1
// default window size
@@ -3219,11 +2973,11 @@ func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) {
return msg
}
- st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) {
// Consume the (empty) body from th peer before replying, otherwise
// the server will sometimes (depending on scheduling) send the peer a
// a RST_STREAM with the CANCEL error code.
- if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil {
+ if n, err := io.Copy(io.Discard, r.Body); n != 0 || err != nil {
b.Errorf("Copy error; got %v, %v; want 0, nil", n, err)
}
for i := 0; i < b.N; i += 1 {
@@ -3244,18 +2998,22 @@ func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) {
})
st.writeData(id, true, nil)
- st.wantHeaders()
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ })
var pendingWindowUpdate = uint32(0)
for i := 0; i < b.N; i += 1 {
expected := nextMsg(i)
- df := st.wantData()
- if bytes.Compare(expected, df.data) != 0 {
- b.Fatalf("Bad message received; want %v; got %v", expected, df.data)
- }
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: false,
+ data: expected,
+ })
// try to send infrequent but large window updates so they don't overwhelm the test
- pendingWindowUpdate += uint32(len(df.data))
+ pendingWindowUpdate += uint32(len(expected))
if pendingWindowUpdate >= windowSize/2 {
if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil {
b.Fatal(err)
@@ -3266,10 +3024,10 @@ func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) {
pendingWindowUpdate = 0
}
}
- df := st.wantData()
- if !df.StreamEnded() {
- b.Fatalf("DATA didn't have END_STREAM; got %v", df)
- }
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: true,
+ })
}
// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53
@@ -3433,14 +3191,13 @@ func TestServerNoAutoContentLengthOnHead(t *testing.T) {
EndStream: true,
EndHeaders: true,
})
- h := st.wantHeaders()
- headers := st.decodeHeader(h.HeaderBlockFragment())
- want := [][2]string{
- {":status", "200"},
- }
- if !reflect.DeepEqual(headers, want) {
- t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ header: http.Header{
+ ":status": []string{"200"},
+ },
+ })
}
// golang.org/issue/13495
@@ -3457,16 +3214,15 @@ func TestServerNoDuplicateContentType(t *testing.T) {
EndStream: true,
EndHeaders: true,
})
- h := st.wantHeaders()
- headers := st.decodeHeader(h.HeaderBlockFragment())
- want := [][2]string{
- {":status", "200"},
- {"content-type", ""},
- {"content-length", "41"},
- }
- if !reflect.DeepEqual(headers, want) {
- t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{""},
+ "content-length": []string{"41"},
+ },
+ })
}
func TestServerContentLengthCanBeDisabled(t *testing.T) {
@@ -3482,29 +3238,28 @@ func TestServerContentLengthCanBeDisabled(t *testing.T) {
EndStream: true,
EndHeaders: true,
})
- h := st.wantHeaders()
- headers := st.decodeHeader(h.HeaderBlockFragment())
- want := [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"},
- }
- if !reflect.DeepEqual(headers, want) {
- t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{"text/plain; charset=utf-8"},
+ },
+ })
}
-func disableGoroutineTracking() (restore func()) {
+func disableGoroutineTracking(t testing.TB) {
old := DebugGoroutines
DebugGoroutines = false
- return func() { DebugGoroutines = old }
+ t.Cleanup(func() { DebugGoroutines = old })
}
func BenchmarkServer_GetRequest(b *testing.B) {
- defer disableGoroutineTracking()()
+ disableGoroutineTracking(b)
b.ReportAllocs()
const msg = "Hello, world."
- st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
- n, err := io.Copy(ioutil.Discard, r.Body)
+ st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) {
+ n, err := io.Copy(io.Discard, r.Body)
if err != nil || n > 0 {
b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err)
}
@@ -3526,17 +3281,17 @@ func BenchmarkServer_GetRequest(b *testing.B) {
EndStream: true,
EndHeaders: true,
})
- st.wantHeaders()
- st.wantData()
+ st.wantFrameType(FrameHeaders)
+ st.wantFrameType(FrameData)
}
}
func BenchmarkServer_PostRequest(b *testing.B) {
- defer disableGoroutineTracking()()
+ disableGoroutineTracking(b)
b.ReportAllocs()
const msg = "Hello, world."
- st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
- n, err := io.Copy(ioutil.Discard, r.Body)
+ st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) {
+ n, err := io.Copy(io.Discard, r.Body)
if err != nil || n > 0 {
b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err)
}
@@ -3558,8 +3313,8 @@ func BenchmarkServer_PostRequest(b *testing.B) {
EndHeaders: true,
})
st.writeData(streamID, true, nil)
- st.wantHeaders()
- st.wantData()
+ st.wantFrameType(FrameHeaders)
+ st.wantFrameType(FrameData)
}
}
@@ -3610,7 +3365,7 @@ func TestServerHandleCustomConn(t *testing.T) {
EndStream: true,
EndHeaders: true,
})
- go io.Copy(ioutil.Discard, c2)
+ go io.Copy(io.Discard, c2)
<-handlerDone
}()
const testString = "my custom ConnectionState"
@@ -3644,17 +3399,16 @@ func TestServer_Rejects_ConnHeaders(t *testing.T) {
defer st.Close()
st.greet()
st.bodylessReq1("connection", "foo")
- hf := st.wantHeaders()
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "400"},
- {"content-type", "text/plain; charset=utf-8"},
- {"x-content-type-options", "nosniff"},
- {"content-length", "51"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"400"},
+ "content-type": []string{"text/plain; charset=utf-8"},
+ "x-content-type-options": []string{"nosniff"},
+ "content-length": []string{"51"},
+ },
+ })
}
type hpackEncoder struct {
@@ -3731,7 +3485,7 @@ func TestExpect100ContinueAfterHandlerWrites(t *testing.T) {
doRead := make(chan bool, 1)
defer close(doRead) // fallback cleanup
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, msg)
w.(http.Flusher).Flush()
@@ -3740,14 +3494,12 @@ func TestExpect100ContinueAfterHandlerWrites(t *testing.T) {
r.Body.Read(make([]byte, 10))
io.WriteString(w, msg2)
-
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20))
+ req, _ := http.NewRequest("POST", ts.URL, io.LimitReader(neverEnding('A'), 2<<20))
req.Header.Set("Expect", "100-continue")
res, err := tr.RoundTrip(req)
@@ -3808,14 +3560,13 @@ func TestUnreadFlowControlReturned_Server(t *testing.T) {
unblock := make(chan bool, 1)
defer close(unblock)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
// Don't read the 16KB request body. Wait until the client's
// done sending it and then return. This should cause the Server
// to then return those 16KB of flow control to the client.
tt.reqFn(r)
<-unblock
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
@@ -3833,7 +3584,7 @@ func TestUnreadFlowControlReturned_Server(t *testing.T) {
return 0, io.EOF
}),
)
- req, _ := http.NewRequest("POST", st.ts.URL, body)
+ req, _ := http.NewRequest("POST", ts.URL, body)
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(tt.name, err)
@@ -3862,12 +3613,18 @@ func TestServerReturnsStreamAndConnFlowControlOnBodyClose(t *testing.T) {
BlockFragment: st.encodeHeader(),
EndHeaders: true,
})
- st.wantHeaders()
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ })
const size = inflowMinRefresh // enough to trigger flow control return
st.writeData(1, false, make([]byte, size))
st.wantWindowUpdate(0, size) // conn-level flow control is returned
unblockHandler <- struct{}{}
- st.wantData()
+ st.wantData(wantData{
+ streamID: 1,
+ endStream: true,
+ })
}
func TestServerIdleTimeout(t *testing.T) {
@@ -3882,22 +3639,24 @@ func TestServerIdleTimeout(t *testing.T) {
defer st.Close()
st.greet()
- ga := st.wantGoAway()
- if ga.ErrCode != ErrCodeNo {
- t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode)
- }
+ st.advance(500 * time.Millisecond)
+ st.wantGoAway(0, ErrCodeNo)
}
func TestServerIdleTimeout_AfterRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
- const timeout = 250 * time.Millisecond
+ const (
+ requestTimeout = 2 * time.Second
+ idleTimeout = 1 * time.Second
+ )
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- time.Sleep(timeout * 2)
+ var st *serverTester
+ st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ st.group.Sleep(requestTimeout)
}, func(h2s *Server) {
- h2s.IdleTimeout = timeout
+ h2s.IdleTimeout = idleTimeout
})
defer st.Close()
@@ -3906,14 +3665,16 @@ func TestServerIdleTimeout_AfterRequest(t *testing.T) {
// Send a request which takes twice the timeout. Verifies the
// idle timeout doesn't fire while we're in a request:
st.bodylessReq1()
- st.wantHeaders()
+ st.advance(requestTimeout)
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
// But the idle timeout should be rearmed after the request
// is done:
- ga := st.wantGoAway()
- if ga.ErrCode != ErrCodeNo {
- t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode)
- }
+ st.advance(idleTimeout)
+ st.wantGoAway(1, ErrCodeNo)
}
// grpc-go closes the Request.Body currently with a Read.
@@ -3949,22 +3710,21 @@ func TestIssue20704Race(t *testing.T) {
itemCount = 100
)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
for i := 0; i < itemCount; i++ {
_, err := w.Write(make([]byte, itemSize))
if err != nil {
return
}
}
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
cl := &http.Client{Transport: tr}
for i := 0; i < 1000; i++ {
- resp, err := cl.Get(st.ts.URL)
+ resp, err := cl.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -3976,7 +3736,7 @@ func TestIssue20704Race(t *testing.T) {
func TestServer_Rejects_TooSmall(t *testing.T) {
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- ioutil.ReadAll(r.Body)
+ io.ReadAll(r.Body)
return nil
}, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
@@ -4016,13 +3776,10 @@ func TestServerHandlerConnectionClose(t *testing.T) {
var sawRes bool
var sawWindowUpdate bool
for {
- f, err := st.readFrame()
- if err == io.EOF {
+ f := st.readFrame()
+ if f == nil {
break
}
- if err != nil {
- t.Fatal(err)
- }
switch f := f.(type) {
case *GoAwayFrame:
sawGoAway = true
@@ -4074,6 +3831,8 @@ func TestServerHandlerConnectionClose(t *testing.T) {
}
sawWindowUpdate = true
unblockHandler <- true
+ st.sync()
+ st.advance(goAwayTimeout)
default:
t.Logf("unexpected frame: %v", summarizeFrame(f))
}
@@ -4139,20 +3898,9 @@ func TestServer_Headers_HalfCloseRemote(t *testing.T) {
}
func TestServerGracefulShutdown(t *testing.T) {
- var st *serverTester
handlerDone := make(chan struct{})
- st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- defer close(handlerDone)
- go st.ts.Config.Shutdown(context.Background())
-
- ga := st.wantGoAway()
- if ga.ErrCode != ErrCodeNo {
- t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode)
- }
- if ga.LastStreamID != 1 {
- t.Errorf("GOAWAY LastStreamID = %v; want 1", ga.LastStreamID)
- }
-
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ <-handlerDone
w.Header().Set("x-foo", "bar")
})
defer st.Close()
@@ -4160,17 +3908,23 @@ func TestServerGracefulShutdown(t *testing.T) {
st.greet()
st.bodylessReq1()
- <-handlerDone
- hf := st.wantHeaders()
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"x-foo", "bar"},
- {"content-length", "0"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
+ st.sync()
+ st.h1server.Shutdown(context.Background())
+
+ st.wantGoAway(1, ErrCodeNo)
+
+ close(handlerDone)
+ st.sync()
+
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ header: http.Header{
+ ":status": []string{"200"},
+ "x-foo": []string{"bar"},
+ "content-length": []string{"0"},
+ },
+ })
n, err := st.cc.Read([]byte{0})
if n != 0 || err == nil {
@@ -4241,26 +3995,25 @@ func TestContentEncodingNoSniffing(t *testing.T) {
for _, tt := range resps {
t.Run(tt.name, func(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
if tt.contentEncoding != nil {
w.Header().Set("Content-Encoding", tt.contentEncoding.(string))
}
w.Write(tt.body)
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req, _ := http.NewRequest("GET", ts.URL, nil)
res, err := tr.RoundTrip(req)
if err != nil {
- t.Fatalf("GET %s: %v", st.ts.URL, err)
+ t.Fatalf("GET %s: %v", ts.URL, err)
}
defer res.Body.Close()
g := res.Header.Get("Content-Encoding")
- t.Logf("%s: Content-Encoding: %s", st.ts.URL, g)
+ t.Logf("%s: Content-Encoding: %s", ts.URL, g)
if w := tt.contentEncoding; g != w {
if w != nil { // The case where contentEncoding was set explicitly.
@@ -4274,7 +4027,7 @@ func TestContentEncodingNoSniffing(t *testing.T) {
if w := tt.wantContentType; g != w {
t.Errorf("Content-Type mismatch\n\tgot: %q\n\twant: %q", g, w)
}
- t.Logf("%s: Content-Type: %s", st.ts.URL, g)
+ t.Logf("%s: Content-Type: %s", ts.URL, g)
})
}
}
@@ -4322,13 +4075,10 @@ func TestServerWindowUpdateOnBodyClose(t *testing.T) {
// Wait for flow control credit for the portion of the request written so far.
increments := windowSize / 2
for {
- f, err := st.readFrame()
- if err == io.EOF {
+ f := st.readFrame()
+ if f == nil {
break
}
- if err != nil {
- t.Fatal(err)
- }
if wu, ok := f.(*WindowUpdateFrame); ok && wu.StreamID == 0 {
increments -= int(wu.Increment)
if increments == 0 {
@@ -4362,24 +4112,16 @@ func TestNoErrorLoggedOnPostAfterGOAWAY(t *testing.T) {
EndStream: false,
EndHeaders: true,
})
- st.wantHeaders()
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
st.sc.startGracefulShutdown()
- for {
- f, err := st.readFrame()
- if err == io.EOF {
- st.t.Fatal("got a EOF; want *GoAwayFrame")
- }
- if err != nil {
- t.Fatal(err)
- }
- if gf, ok := f.(*GoAwayFrame); ok && gf.StreamID == 0 {
- break
- }
- }
+ st.wantRSTStream(1, ErrCodeNo)
+ st.wantGoAway(1, ErrCodeNo)
st.writeData(1, true, []byte(content))
- time.Sleep(200 * time.Millisecond)
st.Close()
if bytes.Contains(st.serverLogBuf.Bytes(), []byte("PROTOCOL_ERROR")) {
@@ -4395,27 +4137,22 @@ func TestServerSendsProcessing(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "102"},
- }
-
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got = %q; want %q", goth, wanth)
- }
-
- hf = st.wantHeaders()
- goth = st.decodeHeader(hf.HeaderBlockFragment())
- wanth = [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", "5"},
- }
-
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got = %q; want %q", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"102"},
+ },
+ })
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "content-type": []string{"text/plain; charset=utf-8"},
+ "content-length": []string{"5"},
+ },
+ })
})
}
@@ -4435,45 +4172,43 @@ func TestServerSendsEarlyHints(t *testing.T) {
return nil
}, func(st *serverTester) {
getSlash(st)
- hf := st.wantHeaders()
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "103"},
- {"link", "; rel=preload; as=style"},
- {"link", "; rel=preload; as=script"},
- }
-
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got = %q; want %q", goth, wanth)
- }
-
- hf = st.wantHeaders()
- goth = st.decodeHeader(hf.HeaderBlockFragment())
- wanth = [][2]string{
- {":status", "103"},
- {"link", "; rel=preload; as=style"},
- {"link", "; rel=preload; as=script"},
- {"link", "; rel=preload; as=script"},
- }
-
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got = %q; want %q", goth, wanth)
- }
-
- hf = st.wantHeaders()
- goth = st.decodeHeader(hf.HeaderBlockFragment())
- wanth = [][2]string{
- {":status", "200"},
- {"link", "; rel=preload; as=style"},
- {"link", "; rel=preload; as=script"},
- {"link", "; rel=preload; as=script"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", "123"},
- }
-
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got = %q; want %q", goth, wanth)
- }
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"103"},
+ "link": []string{
+ "; rel=preload; as=style",
+ "; rel=preload; as=script",
+ },
+ },
+ })
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"103"},
+ "link": []string{
+ "; rel=preload; as=style",
+ "; rel=preload; as=script",
+ "; rel=preload; as=script",
+ },
+ },
+ })
+ st.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: false,
+ header: http.Header{
+ ":status": []string{"200"},
+ "link": []string{
+ "; rel=preload; as=style",
+ "; rel=preload; as=script",
+ "; rel=preload; as=script",
+ },
+ "content-type": []string{"text/plain; charset=utf-8"},
+ "content-length": []string{"123"},
+ },
+ })
})
}
@@ -4495,7 +4230,6 @@ func TestProtocolErrorAfterGoAway(t *testing.T) {
EndHeaders: true,
})
st.writeData(1, false, []byte(content[:5]))
- st.writeReadPing()
// Send a GOAWAY with ErrCodeNo, followed by a bogus window update.
// The server should close the connection.
@@ -4506,14 +4240,9 @@ func TestProtocolErrorAfterGoAway(t *testing.T) {
t.Fatal(err)
}
- for {
- if _, err := st.readFrame(); err != nil {
- if err != io.EOF {
- t.Errorf("unexpected readFrame error: %v", err)
- }
- break
- }
- }
+ st.advance(goAwayTimeout)
+ st.wantGoAway(1, ErrCodeNo)
+ st.wantClosed()
}
func TestServerInitialFlowControlWindow(t *testing.T) {
@@ -4534,9 +4263,9 @@ func TestServerInitialFlowControlWindow(t *testing.T) {
}, func(s *Server) {
s.MaxUploadBufferPerConnection = want
})
- defer st.Close()
st.writePreface()
- st.writeInitialSettings()
+ st.writeSettings()
+ _ = readFrame[*SettingsFrame](t, st)
st.writeSettingsAck()
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
@@ -4547,10 +4276,7 @@ func TestServerInitialFlowControlWindow(t *testing.T) {
window := 65535
Frames:
for {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatal(err)
- }
+ f := st.readFrame()
switch f := f.(type) {
case *WindowUpdateFrame:
if f.FrameHeader.StreamID != 0 {
@@ -4560,6 +4286,8 @@ func TestServerInitialFlowControlWindow(t *testing.T) {
window += int(f.Increment)
case *HeadersFrame:
break Frames
+ case nil:
+ break Frames
default:
}
}
@@ -4606,7 +4334,7 @@ func TestCanonicalHeaderCacheGrowth(t *testing.T) {
// We should not access the slice after this point.
func TestServerWriteDoesNotRetainBufferAfterReturn(t *testing.T) {
donec := make(chan struct{})
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
defer close(donec)
buf := make([]byte, 1<<20)
var i byte
@@ -4620,13 +4348,12 @@ func TestServerWriteDoesNotRetainBufferAfterReturn(t *testing.T) {
return
}
}
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req, _ := http.NewRequest("GET", ts.URL, nil)
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
@@ -4642,7 +4369,7 @@ func TestServerWriteDoesNotRetainBufferAfterReturn(t *testing.T) {
// We should not access the slice after this point.
func TestServerWriteDoesNotRetainBufferAfterServerClose(t *testing.T) {
donec := make(chan struct{}, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
donec <- struct{}{}
defer close(donec)
buf := make([]byte, 1<<20)
@@ -4657,20 +4384,19 @@ func TestServerWriteDoesNotRetainBufferAfterServerClose(t *testing.T) {
return
}
}
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req, _ := http.NewRequest("GET", ts.URL, nil)
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
<-donec
- st.ts.Config.Close()
+ ts.Config.Close()
<-donec
}
@@ -4697,9 +4423,7 @@ func TestServerMaxHandlerGoroutines(t *testing.T) {
})
defer st.Close()
- st.writePreface()
- st.writeInitialSettings()
- st.writeSettingsAck()
+ st.greet()
// Make maxHandlers concurrent requests.
// Reset them all, but only after the handler goroutines have started.
@@ -4766,20 +4490,9 @@ func TestServerMaxHandlerGoroutines(t *testing.T) {
st.fr.WriteRSTStream(streamID, ErrCodeCancel)
streamID += 2
}
-Frames:
- for {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatal(err)
- }
- switch f := f.(type) {
- case *GoAwayFrame:
- if f.ErrCode != ErrCodeEnhanceYourCalm {
- t.Errorf("err code = %v; want %v", f.ErrCode, ErrCodeEnhanceYourCalm)
- }
- break Frames
- default:
- }
+ fr := readFrame[*GoAwayFrame](t, st)
+ if fr.ErrCode != ErrCodeEnhanceYourCalm {
+ t.Errorf("err code = %v; want %v", fr.ErrCode, ErrCodeEnhanceYourCalm)
}
for _, s := range stops {
@@ -4790,14 +4503,12 @@ Frames:
func TestServerContinuationFlood(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
fmt.Println(r.Header)
- }, func(ts *httptest.Server) {
- ts.Config.MaxHeaderBytes = 4096
+ }, func(s *http.Server) {
+ s.MaxHeaderBytes = 4096
})
defer st.Close()
- st.writePreface()
- st.writeInitialSettings()
- st.writeSettingsAck()
+ st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
@@ -4814,13 +4525,21 @@ func TestServerContinuationFlood(t *testing.T) {
))
for {
- f, err := st.readFrame()
- if err != nil {
+ f := st.readFrame()
+ if f == nil {
break
}
- switch f.(type) {
+ switch f := f.(type) {
case *HeadersFrame:
t.Fatalf("received HEADERS frame; want GOAWAY and a closed connection")
+ case *GoAwayFrame:
+ // We might not see the GOAWAY (see below), but if we do it should
+ // indicate that the server processed this request so the client doesn't
+ // attempt to retry it.
+ if got, want := f.LastStreamID, uint32(1); got != want {
+ t.Errorf("received GOAWAY with LastStreamId %v, want %v", got, want)
+ }
+
}
}
// We expect to have seen a GOAWAY before the connection closes,
@@ -4839,9 +4558,7 @@ func TestServerContinuationAfterInvalidHeader(t *testing.T) {
})
defer st.Close()
- st.writePreface()
- st.writeInitialSettings()
- st.writeSettingsAck()
+ st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
@@ -4857,8 +4574,8 @@ func TestServerContinuationAfterInvalidHeader(t *testing.T) {
var sawGoAway bool
for {
- f, err := st.readFrame()
- if err != nil {
+ f := st.readFrame()
+ if f == nil {
break
}
switch f.(type) {
@@ -4872,3 +4589,145 @@ func TestServerContinuationAfterInvalidHeader(t *testing.T) {
t.Errorf("connection closed with no GOAWAY frame; want one")
}
}
+
+func TestServerUpgradeRequestPrefaceFailure(t *testing.T) {
+ // An h2c upgrade request fails when the client preface is not as expected.
+ s2 := &Server{
+ // Setting IdleTimeout triggers #67168.
+ IdleTimeout: 60 * time.Minute,
+ }
+ c1, c2 := net.Pipe()
+ donec := make(chan struct{})
+ go func() {
+ defer close(donec)
+ s2.ServeConn(c1, &ServeConnOpts{
+ UpgradeRequest: httptest.NewRequest("GET", "/", nil),
+ })
+ }()
+ // The server expects to see the HTTP/2 preface,
+ // but we close the connection instead.
+ c2.Close()
+ <-donec
+}
+
+// Issue 67036: A stream error should result in the handler's request context being canceled.
+func TestServerRequestCancelOnError(t *testing.T) {
+ recvc := make(chan struct{}) // handler has started
+ donec := make(chan struct{}) // handler has finished
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ close(recvc)
+ <-r.Context().Done()
+ close(donec)
+ })
+ defer st.Close()
+
+ st.greet()
+
+ // Client sends request headers, handler starts.
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ <-recvc
+
+ // Client sends an invalid second set of request headers.
+ // The stream is reset.
+ // The handler's context is canceled, and the handler exits.
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ <-donec
+}
+
+func TestServerSetReadWriteDeadlineRace(t *testing.T) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
+ ctl := http.NewResponseController(w)
+ ctl.SetReadDeadline(time.Now().Add(3600 * time.Second))
+ ctl.SetWriteDeadline(time.Now().Add(3600 * time.Second))
+ })
+ resp, err := ts.Client().Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp.Body.Close()
+}
+
+func TestServerWriteByteTimeout(t *testing.T) {
+ const timeout = 1 * time.Second
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.Write(make([]byte, 100))
+ }, func(s *Server) {
+ s.WriteByteTimeout = timeout
+ })
+ st.greet()
+
+ st.cc.(*synctestNetConn).SetReadBufferSize(1) // write one byte at a time
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+
+ // Read a few bytes, staying just under WriteByteTimeout.
+ for i := 0; i < 10; i++ {
+ st.advance(timeout - 1)
+ if n, err := st.cc.Read(make([]byte, 1)); n != 1 || err != nil {
+ t.Fatalf("read %v: %v, %v; want 1, nil", i, n, err)
+ }
+ }
+
+ // Wait for WriteByteTimeout.
+ // The connection should close.
+ st.advance(1 * time.Second) // timeout after writing one byte
+ st.advance(1 * time.Second) // timeout after failing to write any more bytes
+ st.wantClosed()
+}
+
+func TestServerPingSent(t *testing.T) {
+ const readIdleTimeout = 15 * time.Second
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ }, func(s *Server) {
+ s.ReadIdleTimeout = readIdleTimeout
+ })
+ st.greet()
+
+ st.wantIdle()
+
+ st.advance(readIdleTimeout)
+ _ = readFrame[*PingFrame](t, st)
+ st.wantIdle()
+
+ st.advance(14 * time.Second)
+ st.wantIdle()
+ st.advance(1 * time.Second)
+ st.wantClosed()
+}
+
+func TestServerPingResponded(t *testing.T) {
+ const readIdleTimeout = 15 * time.Second
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ }, func(s *Server) {
+ s.ReadIdleTimeout = readIdleTimeout
+ })
+ st.greet()
+
+ st.wantIdle()
+
+ st.advance(readIdleTimeout)
+ pf := readFrame[*PingFrame](t, st)
+ st.wantIdle()
+
+ st.advance(14 * time.Second)
+ st.wantIdle()
+
+ st.writePing(true, pf.Data)
+
+ st.advance(2 * time.Second)
+ st.wantIdle()
+}
diff --git a/http2/sync_test.go b/http2/sync_test.go
new file mode 100644
index 0000000000..aeddbd6f3c
--- /dev/null
+++ b/http2/sync_test.go
@@ -0,0 +1,293 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "context"
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+// A synctestGroup synchronizes between a set of cooperating goroutines.
+type synctestGroup struct {
+ mu sync.Mutex
+ gids map[int]bool
+ now time.Time
+ timers map[*fakeTimer]struct{}
+}
+
+type goroutine struct {
+ id int
+ parent int
+ state string
+}
+
+// newSynctest creates a new group with the synthetic clock set the provided time.
+func newSynctest(now time.Time) *synctestGroup {
+ return &synctestGroup{
+ gids: map[int]bool{
+ currentGoroutine(): true,
+ },
+ now: now,
+ }
+}
+
+// Join adds the current goroutine to the group.
+func (g *synctestGroup) Join() {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ g.gids[currentGoroutine()] = true
+}
+
+// Count returns the number of goroutines in the group.
+func (g *synctestGroup) Count() int {
+ gs := stacks(true)
+ count := 0
+ for _, gr := range gs {
+ if !g.gids[gr.id] && !g.gids[gr.parent] {
+ continue
+ }
+ count++
+ }
+ return count
+}
+
+// Close calls t.Fatal if the group contains any running goroutines.
+func (g *synctestGroup) Close(t testing.TB) {
+ if count := g.Count(); count != 1 {
+ buf := make([]byte, 16*1024)
+ n := runtime.Stack(buf, true)
+ t.Logf("stacks:\n%s", buf[:n])
+ t.Fatalf("%v goroutines still running after test completed, expect 1", count)
+ }
+}
+
+// Wait blocks until every goroutine in the group and their direct children are idle.
+func (g *synctestGroup) Wait() {
+ for i := 0; ; i++ {
+ if g.idle() {
+ return
+ }
+ runtime.Gosched()
+ }
+}
+
+func (g *synctestGroup) idle() bool {
+ gs := stacks(true)
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ for _, gr := range gs[1:] {
+ if !g.gids[gr.id] && !g.gids[gr.parent] {
+ continue
+ }
+ // From runtime/runtime2.go.
+ switch gr.state {
+ case "IO wait":
+ case "chan receive (nil chan)":
+ case "chan send (nil chan)":
+ case "select":
+ case "select (no cases)":
+ case "chan receive":
+ case "chan send":
+ case "sync.Cond.Wait":
+ case "sync.Mutex.Lock":
+ case "sync.RWMutex.RLock":
+ case "sync.RWMutex.Lock":
+ default:
+ return false
+ }
+ }
+ return true
+}
+
+func currentGoroutine() int {
+ s := stacks(false)
+ return s[0].id
+}
+
+func stacks(all bool) []goroutine {
+ buf := make([]byte, 16*1024)
+ for {
+ n := runtime.Stack(buf, all)
+ if n < len(buf) {
+ buf = buf[:n]
+ break
+ }
+ buf = make([]byte, len(buf)*2)
+ }
+
+ var goroutines []goroutine
+ for _, gs := range strings.Split(string(buf), "\n\n") {
+ skip, rest, ok := strings.Cut(gs, "goroutine ")
+ if skip != "" || !ok {
+ panic(fmt.Errorf("1 unparsable goroutine stack:\n%s", gs))
+ }
+ ids, rest, ok := strings.Cut(rest, " [")
+ if !ok {
+ panic(fmt.Errorf("2 unparsable goroutine stack:\n%s", gs))
+ }
+ id, err := strconv.Atoi(ids)
+ if err != nil {
+ panic(fmt.Errorf("3 unparsable goroutine stack:\n%s", gs))
+ }
+ state, rest, ok := strings.Cut(rest, "]")
+ var parent int
+ _, rest, ok = strings.Cut(rest, "\ncreated by ")
+ if ok && strings.Contains(rest, " in goroutine ") {
+ _, rest, ok := strings.Cut(rest, " in goroutine ")
+ if !ok {
+ panic(fmt.Errorf("4 unparsable goroutine stack:\n%s", gs))
+ }
+ parents, rest, ok := strings.Cut(rest, "\n")
+ if !ok {
+ panic(fmt.Errorf("5 unparsable goroutine stack:\n%s", gs))
+ }
+ parent, err = strconv.Atoi(parents)
+ if err != nil {
+ panic(fmt.Errorf("6 unparsable goroutine stack:\n%s", gs))
+ }
+ }
+ goroutines = append(goroutines, goroutine{
+ id: id,
+ parent: parent,
+ state: state,
+ })
+ }
+ return goroutines
+}
+
+// AdvanceTime advances the synthetic clock by d.
+func (g *synctestGroup) AdvanceTime(d time.Duration) {
+ defer g.Wait()
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ g.now = g.now.Add(d)
+ for tm := range g.timers {
+ if tm.when.After(g.now) {
+ continue
+ }
+ tm.run()
+ delete(g.timers, tm)
+ }
+}
+
+// Now returns the current synthetic time.
+func (g *synctestGroup) Now() time.Time {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ return g.now
+}
+
+// TimeUntilEvent returns the amount of time until the next scheduled timer.
+func (g *synctestGroup) TimeUntilEvent() (d time.Duration, scheduled bool) {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ for tm := range g.timers {
+ if dd := tm.when.Sub(g.now); !scheduled || dd < d {
+ d = dd
+ scheduled = true
+ }
+ }
+ return d, scheduled
+}
+
+// Sleep is time.Sleep, but using synthetic time.
+func (g *synctestGroup) Sleep(d time.Duration) {
+ tm := g.NewTimer(d)
+ <-tm.C()
+}
+
+// NewTimer is time.NewTimer, but using synthetic time.
+func (g *synctestGroup) NewTimer(d time.Duration) Timer {
+ return g.addTimer(d, &fakeTimer{
+ ch: make(chan time.Time),
+ })
+}
+
+// AfterFunc is time.AfterFunc, but using synthetic time.
+func (g *synctestGroup) AfterFunc(d time.Duration, f func()) Timer {
+ return g.addTimer(d, &fakeTimer{
+ f: f,
+ })
+}
+
+// ContextWithTimeout is context.WithTimeout, but using synthetic time.
+func (g *synctestGroup) ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
+ ctx, cancel := context.WithCancel(ctx)
+ tm := g.AfterFunc(d, cancel)
+ return ctx, func() {
+ tm.Stop()
+ cancel()
+ }
+}
+
+func (g *synctestGroup) addTimer(d time.Duration, tm *fakeTimer) *fakeTimer {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ tm.g = g
+ tm.when = g.now.Add(d)
+ if g.timers == nil {
+ g.timers = make(map[*fakeTimer]struct{})
+ }
+ if tm.when.After(g.now) {
+ g.timers[tm] = struct{}{}
+ } else {
+ tm.run()
+ }
+ return tm
+}
+
+type Timer = interface {
+ C() <-chan time.Time
+ Reset(d time.Duration) bool
+ Stop() bool
+}
+
+type fakeTimer struct {
+ g *synctestGroup
+ when time.Time
+ ch chan time.Time
+ f func()
+}
+
+func (tm *fakeTimer) run() {
+ if tm.ch != nil {
+ tm.ch <- tm.g.now
+ } else {
+ go func() {
+ tm.g.Join()
+ tm.f()
+ }()
+ }
+}
+
+func (tm *fakeTimer) C() <-chan time.Time { return tm.ch }
+
+func (tm *fakeTimer) Reset(d time.Duration) bool {
+ tm.g.mu.Lock()
+ defer tm.g.mu.Unlock()
+ _, stopped := tm.g.timers[tm]
+ if d <= 0 {
+ delete(tm.g.timers, tm)
+ tm.run()
+ } else {
+ tm.when = tm.g.now.Add(d)
+ tm.g.timers[tm] = struct{}{}
+ }
+ return stopped
+}
+
+func (tm *fakeTimer) Stop() bool {
+ tm.g.mu.Lock()
+ defer tm.g.mu.Unlock()
+ _, stopped := tm.g.timers[tm]
+ delete(tm.g.timers, tm)
+ return stopped
+}
diff --git a/http2/testdata/draft-ietf-httpbis-http2.xml b/http2/testdata/draft-ietf-httpbis-http2.xml
deleted file mode 100644
index 39d756de7a..0000000000
--- a/http2/testdata/draft-ietf-httpbis-http2.xml
+++ /dev/null
@@ -1,5021 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol version 2
-
-
- Twist
-
- mbelshe@chromium.org
-
-
-
-
- Google, Inc
-
- fenix@google.com
-
-
-
-
- Mozilla
-
-
- 331 E Evelyn Street
- Mountain View
- CA
- 94041
- US
-
- martin.thomson@gmail.com
-
-
-
-
- Applications
- HTTPbis
- HTTP
- SPDY
- Web
-
-
-
- This specification describes an optimized expression of the semantics of the Hypertext
- Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a
- reduced perception of latency by introducing header field compression and allowing multiple
- concurrent messages on the same connection. It also introduces unsolicited push of
- representations from servers to clients.
-
-
- This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax.
- HTTP's existing semantics remain unchanged.
-
-
-
-
-
- Discussion of this draft takes place on the HTTPBIS working group mailing list
- (ietf-http-wg@w3.org), which is archived at .
-
-
- Working Group information can be found at ; that specific to HTTP/2 are at .
-
-
- The changes in this draft are summarized in .
-
-
-
-
-
-
-
-
-
- The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the
- HTTP/1.1 message format ( ) has
- several characteristics that have a negative overall effect on application performance
- today.
-
-
- In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given
- TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed
- request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1
- clients that need to make many requests typically use multiple connections to a server in
- order to achieve concurrency and thereby reduce latency.
-
-
- Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary
- network traffic, as well as causing the initial TCP congestion
- window to quickly fill. This can result in excessive latency when multiple requests are
- made on a new TCP connection.
-
-
- HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an
- underlying connection. Specifically, it allows interleaving of request and response
- messages on the same connection and uses an efficient coding for HTTP header fields. It
- also allows prioritization of requests, letting more important requests complete more
- quickly, further improving performance.
-
-
- The resulting protocol is more friendly to the network, because fewer TCP connections can
- be used in comparison to HTTP/1.x. This means less competition with other flows, and
- longer-lived connections, which in turn leads to better utilization of available network
- capacity.
-
-
- Finally, HTTP/2 also enables more efficient processing of messages through use of binary
- message framing.
-
-
-
-
-
- HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core
- features of HTTP/1.1, but aims to be more efficient in several ways.
-
-
- The basic protocol unit in HTTP/2 is a frame . Each frame
- type serves a different purpose. For example, HEADERS and
- DATA frames form the basis of HTTP requests and
- responses ; other frame types like SETTINGS ,
- WINDOW_UPDATE , and PUSH_PROMISE are used in support of other
- HTTP/2 features.
-
-
- Multiplexing of requests is achieved by having each HTTP request-response exchange
- associated with its own stream . Streams are largely
- independent of each other, so a blocked or stalled request or response does not prevent
- progress on other streams.
-
-
- Flow control and prioritization ensure that it is possible to efficiently use multiplexed
- streams. Flow control helps to ensure that only data that
- can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed
- to the most important streams first.
-
-
- HTTP/2 adds a new interaction mode, whereby a server can push
- responses to a client . Server push allows a server to speculatively send a client
- data that the server anticipates the client will need, trading off some network usage
- against a potential latency gain. The server does this by synthesizing a request, which it
- sends as a PUSH_PROMISE frame. The server is then able to send a response to
- the synthetic request on a separate stream.
-
-
- Frames that contain HTTP header fields are compressed .
- HTTP requests can be highly redundant, so compression can reduce the size of requests and
- responses significantly.
-
-
-
-
- The HTTP/2 specification is split into four parts:
-
-
- Starting HTTP/2 covers how an HTTP/2 connection is
- initiated.
-
-
- The framing and streams layers describe the way HTTP/2 frames are
- structured and formed into multiplexed streams.
-
-
- Frame and error
- definitions include details of the frame and error types used in HTTP/2.
-
-
- HTTP mappings and additional
- requirements describe how HTTP semantics are expressed using frames and
- streams.
-
-
-
-
- While some of the frame and stream layer concepts are isolated from HTTP, this
- specification does not define a completely generic framing layer. The framing and streams
- layers are tailored to the needs of the HTTP protocol and server push.
-
-
-
-
-
- The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD
- NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as
- described in RFC 2119 .
-
-
- All numeric values are in network byte order. Values are unsigned unless otherwise
- indicated. Literal values are provided in decimal or hexadecimal as appropriate.
- Hexadecimal literals are prefixed with 0x to distinguish them
- from decimal literals.
-
-
- The following terms are used:
-
-
- The endpoint initiating the HTTP/2 connection.
-
-
- A transport-layer connection between two endpoints.
-
-
- An error that affects the entire HTTP/2 connection.
-
-
- Either the client or server of the connection.
-
-
- The smallest unit of communication within an HTTP/2 connection, consisting of a header
- and a variable-length sequence of octets structured according to the frame type.
-
-
- An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint
- that is remote to the primary subject of discussion.
-
-
- An endpoint that is receiving frames.
-
-
- An endpoint that is transmitting frames.
-
-
- The endpoint which did not initiate the HTTP/2 connection.
-
-
- A bi-directional flow of frames across a virtual channel within the HTTP/2 connection.
-
-
- An error on the individual HTTP/2 stream.
-
-
-
-
- Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined
- in .
-
-
-
-
-
-
- An HTTP/2 connection is an application layer protocol running on top of a TCP connection
- ( ). The client is the TCP connection initiator.
-
-
- HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same
- default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result,
- implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the
- upstream server (the immediate peer to which the client wishes to establish a connection)
- supports HTTP/2.
-
-
-
- The means by which support for HTTP/2 is determined is different for "http" and "https"
- URIs. Discovery for "http" URIs is described in . Discovery
- for "https" URIs is described in .
-
-
-
-
- The protocol defined in this document has two identifiers.
-
-
-
- The string "h2" identifies the protocol where HTTP/2 uses TLS . This identifier is used in the TLS application layer protocol negotiation extension (ALPN)
- field and any place that HTTP/2 over TLS is identified.
-
-
- The "h2" string is serialized into an ALPN protocol identifier as the two octet
- sequence: 0x68, 0x32.
-
-
-
-
- The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP.
- This identifier is used in the HTTP/1.1 Upgrade header field and any place that
- HTTP/2 over TCP is identified.
-
-
-
-
-
- Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message
- semantics described in this document.
-
-
- RFC Editor's Note: please remove the remainder of this section prior to the
- publication of a final version of this document.
-
-
- Only implementations of the final, published RFC can identify themselves as "h2" or "h2c".
- Until such an RFC exists, implementations MUST NOT identify themselves using these
- strings.
-
-
- Examples and text throughout the rest of this document use "h2" as a matter of
- editorial convenience only. Implementations of draft versions MUST NOT identify using
- this string.
-
-
- Implementations of draft versions of the protocol MUST add the string "-" and the
- corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11
- over TLS is identified using the string "h2-11".
-
-
- Non-compatible experiments that are based on these draft versions MUST append the string
- "-" and an experiment name to the identifier. For example, an experimental implementation
- of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself
- as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in
- . Experimenters are
- encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list.
-
-
-
-
-
- A client that makes a request for an "http" URI without prior knowledge about support for
- HTTP/2 uses the HTTP Upgrade mechanism ( ). The client makes an HTTP/1.1 request that includes an Upgrade
- header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include
- exactly one HTTP2-Settings header field.
-
-
- For example:
-
-
-]]>
-
-
- Requests that contain an entity body MUST be sent in their entirety before the client can
- send HTTP/2 frames. This means that a large request entity can block the use of the
- connection until it is completely sent.
-
-
- If concurrency of an initial request with subsequent requests is important, an OPTIONS
- request can be used to perform the upgrade to HTTP/2, at the cost of an additional
- round-trip.
-
-
- A server that does not support HTTP/2 can respond to the request as though the Upgrade
- header field were absent:
-
-
-
-HTTP/1.1 200 OK
-Content-Length: 243
-Content-Type: text/html
-
-...
-
-
-
- A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with
- "h2" implies HTTP/2 over TLS, which is instead negotiated as described in .
-
-
- A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols)
- response. After the empty line that terminates the 101 response, the server can begin
- sending HTTP/2 frames. These frames MUST include a response to the request that initiated
- the Upgrade.
-
-
-
-
- For example:
-
-
-HTTP/1.1 101 Switching Protocols
-Connection: Upgrade
-Upgrade: h2c
-
-[ HTTP/2 connection ...
-
-
-
- The first HTTP/2 frame sent by the server is a SETTINGS frame ( ) as the server connection preface ( ). Upon receiving the 101 response, the client sends a connection preface , which includes a
- SETTINGS frame.
-
-
- The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is
- assigned default priority values . Stream 1 is
- implicitly half closed from the client toward the server, since the request is completed
- as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the
- response.
-
-
-
-
- A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field
- that includes parameters that govern the HTTP/2 connection, provided in anticipation of
- the server accepting the request to upgrade.
-
-
-
-
-
- A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present,
- or if more than one is present. A server MUST NOT send this header field.
-
-
-
- The content of the HTTP2-Settings header field is the
- payload of a SETTINGS frame ( ), encoded as a
- base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The
- ABNF production for token68 is
- defined in .
-
-
- Since the upgrade is only intended to apply to the immediate connection, a client
- sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded
- downstream.
-
-
- A server decodes and interprets these values as it would any other
- SETTINGS frame. Acknowledgement of the
- SETTINGS parameters is not necessary, since a 101 response serves as implicit
- acknowledgment. Providing these values in the Upgrade request gives a client an
- opportunity to provide parameters prior to receiving any frames from the server.
-
-
-
-
-
-
- A client that makes a request to an "https" URI uses TLS
- with the application layer protocol negotiation extension .
-
-
- HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a
- client or selected by a server.
-
-
- Once TLS negotiation is complete, both the client and the server send a connection preface .
-
-
-
-
-
- A client can learn that a particular server supports HTTP/2 by other means. For example,
- describes a mechanism for advertising this capability.
-
-
- A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2,
- after the connection preface ; a server can
- identify such a connection by the presence of the connection preface. This only affects
- the establishment of HTTP/2 connections over cleartext TCP; implementations that support
- HTTP/2 over TLS MUST use protocol negotiation in TLS .
-
-
- Without additional information, prior support for HTTP/2 is not a strong signal that a
- given server will support HTTP/2 for future connections. For example, it is possible for
- server configurations to change, for configurations to differ between instances in
- clustered servers, or for network conditions to change.
-
-
-
-
-
- Upon establishment of a TCP connection and determination that HTTP/2 will be used by both
- peers, each endpoint MUST send a connection preface as a final confirmation and to
- establish the initial SETTINGS parameters for the HTTP/2 connection. The client and
- server each send a different connection preface.
-
-
- The client connection preface starts with a sequence of 24 octets, which in hex notation
- are:
-
-
-
-
-
- (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n ). This sequence
- is followed by a SETTINGS frame ( ). The
- SETTINGS frame MAY be empty. The client sends the client connection
- preface immediately upon receipt of a 101 Switching Protocols response (indicating a
- successful upgrade), or as the first application data octets of a TLS connection. If
- starting an HTTP/2 connection with prior knowledge of server support for the protocol, the
- client connection preface is sent upon connection establishment.
-
-
-
-
- The client connection preface is selected so that a large proportion of HTTP/1.1 or
- HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note
- that this does not address the concerns raised in .
-
-
-
-
- The server connection preface consists of a potentially empty SETTINGS
- frame ( ) that MUST be the first frame the server sends in the
- HTTP/2 connection.
-
-
- The SETTINGS frames received from a peer as part of the connection preface
- MUST be acknowledged (see ) after sending the connection
- preface.
-
-
- To avoid unnecessary latency, clients are permitted to send additional frames to the
- server immediately after sending the client connection preface, without waiting to receive
- the server connection preface. It is important to note, however, that the server
- connection preface SETTINGS frame might include parameters that necessarily
- alter how a client is expected to communicate with the server. Upon receiving the
- SETTINGS frame, the client is expected to honor any parameters established.
- In some configurations, it is possible for the server to transmit SETTINGS
- before the client sends additional frames, providing an opportunity to avoid this issue.
-
-
- Clients and servers MUST treat an invalid connection preface as a connection error of type
- PROTOCOL_ERROR . A GOAWAY frame ( )
- MAY be omitted in this case, since an invalid preface indicates that the peer is not using
- HTTP/2.
-
-
-
-
-
-
- Once the HTTP/2 connection is established, endpoints can begin exchanging frames.
-
-
-
-
- All frames begin with a fixed 9-octet header followed by a variable-length payload.
-
-
-
-
-
- The fields of the frame header are defined as:
-
-
-
- The length of the frame payload expressed as an unsigned 24-bit integer. Values
- greater than 214 (16,384) MUST NOT be sent unless the receiver has
- set a larger value for SETTINGS_MAX_FRAME_SIZE .
-
-
- The 9 octets of the frame header are not included in this value.
-
-
-
-
- The 8-bit type of the frame. The frame type determines the format and semantics of
- the frame. Implementations MUST ignore and discard any frame that has a type that
- is unknown.
-
-
-
-
- An 8-bit field reserved for frame-type specific boolean flags.
-
-
- Flags are assigned semantics specific to the indicated frame type. Flags that have
- no defined semantics for a particular frame type MUST be ignored, and MUST be left
- unset (0) when sending.
-
-
-
-
- A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST
- remain unset (0) when sending and MUST be ignored when receiving.
-
-
-
-
- A 31-bit stream identifier (see ). The value 0 is
- reserved for frames that are associated with the connection as a whole as opposed to
- an individual stream.
-
-
-
-
-
- The structure and content of the frame payload is dependent entirely on the frame type.
-
-
-
-
-
- The size of a frame payload is limited by the maximum size that a receiver advertises in
- the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value
- between 214 (16,384) and 224 -1 (16,777,215) octets,
- inclusive.
-
-
- All implementations MUST be capable of receiving and minimally processing frames up to
- 214 octets in length, plus the 9 octet frame
- header . The size of the frame header is not included when describing frame sizes.
-
-
- Certain frame types, such as PING , impose additional limits
- on the amount of payload data allowed.
-
-
-
-
- If a frame size exceeds any defined limit, or is too small to contain mandatory frame
- data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error
- in a frame that could alter the state of the entire connection MUST be treated as a connection error ; this includes any frame carrying
- a header block (that is, HEADERS ,
- PUSH_PROMISE , and CONTINUATION ), SETTINGS ,
- and any WINDOW_UPDATE frame with a stream identifier of 0.
-
-
- Endpoints are not obligated to use all available space in a frame. Responsiveness can be
- improved by using frames that are smaller than the permitted maximum size. Sending large
- frames can result in delays in sending time-sensitive frames (such
- RST_STREAM , WINDOW_UPDATE , or PRIORITY )
- which if blocked by the transmission of a large frame, could affect performance.
-
-
-
-
-
- Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values.
- They are used within HTTP request and response messages as well as server push operations
- (see ).
-
-
- Header lists are collections of zero or more header fields. When transmitted over a
- connection, a header list is serialized into a header block using HTTP Header Compression . The serialized header block is then
- divided into one or more octet sequences, called header block fragments, and transmitted
- within the payload of HEADERS , PUSH_PROMISE or CONTINUATION frames.
-
-
- The Cookie header field is treated specially by the HTTP
- mapping (see ).
-
-
- A receiving endpoint reassembles the header block by concatenating its fragments, then
- decompresses the block to reconstruct the header list.
-
-
- A complete header block consists of either:
-
-
- a single HEADERS or PUSH_PROMISE frame,
- with the END_HEADERS flag set, or
-
-
- a HEADERS or PUSH_PROMISE frame with the END_HEADERS
- flag cleared and one or more CONTINUATION frames,
- where the last CONTINUATION frame has the END_HEADERS flag set.
-
-
-
-
- Header compression is stateful. One compression context and one decompression context is
- used for the entire connection. Each header block is processed as a discrete unit.
- Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved
- frames of any other type or from any other stream. The last frame in a sequence of
- HEADERS or CONTINUATION frames MUST have the END_HEADERS
- flag set. The last frame in a sequence of PUSH_PROMISE or
- CONTINUATION frames MUST have the END_HEADERS flag set. This allows a
- header block to be logically equivalent to a single frame.
-
-
- Header block fragments can only be sent as the payload of HEADERS ,
- PUSH_PROMISE or CONTINUATION frames, because these frames
- carry data that can modify the compression context maintained by a receiver. An endpoint
- receiving HEADERS , PUSH_PROMISE or
- CONTINUATION frames MUST reassemble header blocks and perform decompression
- even if the frames are to be discarded. A receiver MUST terminate the connection with a
- connection error of type
- COMPRESSION_ERROR if it does not decompress a header block.
-
-
-
-
-
-
- A "stream" is an independent, bi-directional sequence of frames exchanged between the client
- and server within an HTTP/2 connection. Streams have several important characteristics:
-
-
- A single HTTP/2 connection can contain multiple concurrently open streams, with either
- endpoint interleaving frames from multiple streams.
-
-
- Streams can be established and used unilaterally or shared by either the client or
- server.
-
-
- Streams can be closed by either endpoint.
-
-
- The order in which frames are sent on a stream is significant. Recipients process frames
- in the order they are received. In particular, the order of HEADERS ,
- and DATA frames is semantically significant.
-
-
- Streams are identified by an integer. Stream identifiers are assigned to streams by the
- endpoint initiating the stream.
-
-
-
-
-
-
- The lifecycle of a stream is shown in .
-
-
-
-
- | |<-----------' |
- | R | closed | R |
- `-------------------->| |<--------------------'
- +--------+
-
- H: HEADERS frame (with implied CONTINUATIONs)
- PP: PUSH_PROMISE frame (with implied CONTINUATIONs)
- ES: END_STREAM flag
- R: RST_STREAM frame
-]]>
-
-
-
-
- Note that this diagram shows stream state transitions and the frames and flags that affect
- those transitions only. In this regard, CONTINUATION frames do not result
- in state transitions; they are effectively part of the HEADERS or
- PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is
- processed as a separate event to the frame that bears it; a HEADERS frame
- with the END_STREAM flag set can cause two state transitions.
-
-
- Both endpoints have a subjective view of the state of a stream that could be different
- when frames are in transit. Endpoints do not coordinate the creation of streams; they are
- created unilaterally by either endpoint. The negative consequences of a mismatch in
- states are limited to the "closed" state after sending RST_STREAM , where
- frames might be received for some time after closing.
-
-
- Streams have the following states:
-
-
-
-
-
- All streams start in the "idle" state. In this state, no frames have been
- exchanged.
-
-
- The following transitions are valid from this state:
-
-
- Sending or receiving a HEADERS frame causes the stream to become
- "open". The stream identifier is selected as described in . The same HEADERS frame can also
- cause a stream to immediately become "half closed".
-
-
- Sending a PUSH_PROMISE frame marks the associated stream for
- later use. The stream state for the reserved stream transitions to "reserved
- (local)".
-
-
- Receiving a PUSH_PROMISE frame marks the associated stream as
- reserved by the remote peer. The state of the stream becomes "reserved
- (remote)".
-
-
-
-
- Receiving any frames other than HEADERS or
- PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type
- PROTOCOL_ERROR .
-
-
-
-
-
-
- A stream in the "reserved (local)" state is one that has been promised by sending a
- PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an
- idle stream by associating the stream with an open stream that was initiated by the
- remote peer (see ).
-
-
- In this state, only the following transitions are possible:
-
-
- The endpoint can send a HEADERS frame. This causes the stream to
- open in a "half closed (remote)" state.
-
-
- Either endpoint can send a RST_STREAM frame to cause the stream
- to become "closed". This releases the stream reservation.
-
-
-
-
- An endpoint MUST NOT send any type of frame other than HEADERS or
- RST_STREAM in this state.
-
-
- A PRIORITY frame MAY be received in this state. Receiving any type
- of frame other than RST_STREAM or PRIORITY on a stream
- in this state MUST be treated as a connection
- error of type PROTOCOL_ERROR .
-
-
-
-
-
-
- A stream in the "reserved (remote)" state has been reserved by a remote peer.
-
-
- In this state, only the following transitions are possible:
-
-
- Receiving a HEADERS frame causes the stream to transition to
- "half closed (local)".
-
-
- Either endpoint can send a RST_STREAM frame to cause the stream
- to become "closed". This releases the stream reservation.
-
-
-
-
- An endpoint MAY send a PRIORITY frame in this state to reprioritize
- the reserved stream. An endpoint MUST NOT send any type of frame other than
- RST_STREAM , WINDOW_UPDATE , or PRIORITY
- in this state.
-
-
- Receiving any type of frame other than HEADERS or
- RST_STREAM on a stream in this state MUST be treated as a connection error of type
- PROTOCOL_ERROR .
-
-
-
-
-
-
- A stream in the "open" state may be used by both peers to send frames of any type.
- In this state, sending peers observe advertised stream
- level flow control limits .
-
-
- From this state either endpoint can send a frame with an END_STREAM flag set, which
- causes the stream to transition into one of the "half closed" states: an endpoint
- sending an END_STREAM flag causes the stream state to become "half closed (local)";
- an endpoint receiving an END_STREAM flag causes the stream state to become "half
- closed (remote)".
-
-
- Either endpoint can send a RST_STREAM frame from this state, causing
- it to transition immediately to "closed".
-
-
-
-
-
-
- A stream that is in the "half closed (local)" state cannot be used for sending
- frames. Only WINDOW_UPDATE , PRIORITY and
- RST_STREAM frames can be sent in this state.
-
-
- A stream transitions from this state to "closed" when a frame that contains an
- END_STREAM flag is received, or when either peer sends a RST_STREAM
- frame.
-
-
- A receiver can ignore WINDOW_UPDATE frames in this state, which might
- arrive for a short period after a frame bearing the END_STREAM flag is sent.
-
-
- PRIORITY frames received in this state are used to reprioritize
- streams that depend on the current stream.
-
-
-
-
-
-
- A stream that is "half closed (remote)" is no longer being used by the peer to send
- frames. In this state, an endpoint is no longer obligated to maintain a receiver
- flow control window if it performs flow control.
-
-
- If an endpoint receives additional frames for a stream that is in this state, other
- than WINDOW_UPDATE , PRIORITY or
- RST_STREAM , it MUST respond with a stream error of type
- STREAM_CLOSED .
-
-
- A stream that is "half closed (remote)" can be used by the endpoint to send frames
- of any type. In this state, the endpoint continues to observe advertised stream level flow control limits .
-
-
- A stream can transition from this state to "closed" by sending a frame that contains
- an END_STREAM flag, or when either peer sends a RST_STREAM frame.
-
-
-
-
-
-
- The "closed" state is the terminal state.
-
-
- An endpoint MUST NOT send frames other than PRIORITY on a closed
- stream. An endpoint that receives any frame other than PRIORITY
- after receiving a RST_STREAM MUST treat that as a stream error of type
- STREAM_CLOSED . Similarly, an endpoint that receives any frames after
- receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type
- STREAM_CLOSED , unless the frame is permitted as described below.
-
-
- WINDOW_UPDATE or RST_STREAM frames can be received in
- this state for a short period after a DATA or HEADERS
- frame containing an END_STREAM flag is sent. Until the remote peer receives and
- processes RST_STREAM or the frame bearing the END_STREAM flag, it
- might send frames of these types. Endpoints MUST ignore
- WINDOW_UPDATE or RST_STREAM frames received in this
- state, though endpoints MAY choose to treat frames that arrive a significant time
- after sending END_STREAM as a connection
- error of type PROTOCOL_ERROR .
-
-
- PRIORITY frames can be sent on closed streams to prioritize streams
- that are dependent on the closed stream. Endpoints SHOULD process
- PRIORITY frame, though they can be ignored if the stream has been
- removed from the dependency tree (see ).
-
-
- If this state is reached as a result of sending a RST_STREAM frame,
- the peer that receives the RST_STREAM might have already sent - or
- enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint
- MUST ignore frames that it receives on closed streams after it has sent a
- RST_STREAM frame. An endpoint MAY choose to limit the period over
- which it ignores frames and treat frames that arrive after this time as being in
- error.
-
-
- Flow controlled frames (i.e., DATA ) received after sending
- RST_STREAM are counted toward the connection flow control window.
- Even though these frames might be ignored, because they are sent before the sender
- receives the RST_STREAM , the sender will consider the frames to count
- against the flow control window.
-
-
- An endpoint might receive a PUSH_PROMISE frame after it sends
- RST_STREAM . PUSH_PROMISE causes a stream to become
- "reserved" even if the associated stream has been reset. Therefore, a
- RST_STREAM is needed to close an unwanted promised stream.
-
-
-
-
-
- In the absence of more specific guidance elsewhere in this document, implementations
- SHOULD treat the receipt of a frame that is not expressly permitted in the description of
- a state as a connection error of type
- PROTOCOL_ERROR . Frame of unknown types are ignored.
-
-
- An example of the state transitions for an HTTP request/response exchange can be found in
- . An example of the state transitions for server push can be
- found in and .
-
-
-
-
- Streams are identified with an unsigned 31-bit integer. Streams initiated by a client
- MUST use odd-numbered stream identifiers; those initiated by the server MUST use
- even-numbered stream identifiers. A stream identifier of zero (0x0) is used for
- connection control messages; the stream identifier zero cannot be used to establish a
- new stream.
-
-
- HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are
- responded to with a stream identifier of one (0x1). After the upgrade
- completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1
- cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1.
-
-
- The identifier of a newly established stream MUST be numerically greater than all
- streams that the initiating endpoint has opened or reserved. This governs streams that
- are opened using a HEADERS frame and streams that are reserved using
- PUSH_PROMISE . An endpoint that receives an unexpected stream identifier
- MUST respond with a connection error of
- type PROTOCOL_ERROR .
-
-
- The first use of a new stream identifier implicitly closes all streams in the "idle"
- state that might have been initiated by that peer with a lower-valued stream identifier.
- For example, if a client sends a HEADERS frame on stream 7 without ever
- sending a frame on stream 5, then stream 5 transitions to the "closed" state when the
- first frame for stream 7 is sent or received.
-
-
- Stream identifiers cannot be reused. Long-lived connections can result in an endpoint
- exhausting the available range of stream identifiers. A client that is unable to
- establish a new stream identifier can establish a new connection for new streams. A
- server that is unable to establish a new stream identifier can send a
- GOAWAY frame so that the client is forced to open a new connection for
- new streams.
-
-
-
-
-
- A peer can limit the number of concurrently active streams using the
- SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent
- streams setting is specific to each endpoint and applies only to the peer that receives
- the setting. That is, clients specify the maximum number of concurrent streams the
- server can initiate, and servers specify the maximum number of concurrent streams the
- client can initiate.
-
-
- Streams that are in the "open" state, or either of the "half closed" states count toward
- the maximum number of streams that an endpoint is permitted to open. Streams in any of
- these three states count toward the limit advertised in the
- SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the
- "reserved" states do not count toward the stream limit.
-
-
- Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a
- HEADERS frame that causes their advertised concurrent stream limit to be
- exceeded MUST treat this as a stream error . An
- endpoint that wishes to reduce the value of
- SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current
- number of open streams can either close streams that exceed the new value or allow
- streams to complete.
-
-
-
-
-
-
- Using streams for multiplexing introduces contention over use of the TCP connection,
- resulting in blocked streams. A flow control scheme ensures that streams on the same
- connection do not destructively interfere with each other. Flow control is used for both
- individual streams and for the connection as a whole.
-
-
- HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame .
-
-
-
-
- HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be
- used without requiring protocol changes. Flow control in HTTP/2 has the following
- characteristics:
-
-
- Flow control is specific to a connection; i.e., it is "hop-by-hop", not
- "end-to-end".
-
-
- Flow control is based on window update frames. Receivers advertise how many octets
- they are prepared to receive on a stream and for the entire connection. This is a
- credit-based scheme.
-
-
- Flow control is directional with overall control provided by the receiver. A
- receiver MAY choose to set any window size that it desires for each stream and for
- the entire connection. A sender MUST respect flow control limits imposed by a
- receiver. Clients, servers and intermediaries all independently advertise their
- flow control window as a receiver and abide by the flow control limits set by
- their peer when sending.
-
-
- The initial value for the flow control window is 65,535 octets for both new streams
- and the overall connection.
-
-
- The frame type determines whether flow control applies to a frame. Of the frames
- specified in this document, only DATA frames are subject to flow
- control; all other frame types do not consume space in the advertised flow control
- window. This ensures that important control frames are not blocked by flow control.
-
-
- Flow control cannot be disabled.
-
-
- HTTP/2 defines only the format and semantics of the WINDOW_UPDATE
- frame ( ). This document does not stipulate how a
- receiver decides when to send this frame or the value that it sends, nor does it
- specify how a sender chooses to send packets. Implementations are able to select
- any algorithm that suits their needs.
-
-
-
-
- Implementations are also responsible for managing how requests and responses are sent
- based on priority; choosing how to avoid head of line blocking for requests; and
- managing the creation of new streams. Algorithm choices for these could interact with
- any flow control algorithm.
-
-
-
-
-
- Flow control is defined to protect endpoints that are operating under resource
- constraints. For example, a proxy needs to share memory between many connections, and
- also might have a slow upstream connection and a fast downstream one. Flow control
- addresses cases where the receiver is unable process data on one stream, yet wants to
- continue to process other streams in the same connection.
-
-
- Deployments that do not require this capability can advertise a flow control window of
- the maximum size, incrementing the available space when new data is received. This
- effectively disables flow control for that receiver. Conversely, a sender is always
- subject to the flow control window advertised by the receiver.
-
-
- Deployments with constrained resources (for example, memory) can employ flow control to
- limit the amount of memory a peer can consume. Note, however, that this can lead to
- suboptimal use of available network resources if flow control is enabled without
- knowledge of the bandwidth-delay product (see ).
-
-
- Even with full awareness of the current bandwidth-delay product, implementation of flow
- control can be difficult. When using flow control, the receiver MUST read from the TCP
- receive buffer in a timely fashion. Failure to do so could lead to a deadlock when
- critical frames, such as WINDOW_UPDATE , are not read and acted upon.
-
-
-
-
-
-
- A client can assign a priority for a new stream by including prioritization information in
- the HEADERS frame that opens the stream. For an existing
- stream, the PRIORITY frame can be used to change the
- priority.
-
-
- The purpose of prioritization is to allow an endpoint to express how it would prefer its
- peer allocate resources when managing concurrent streams. Most importantly, priority can
- be used to select streams for transmitting frames when there is limited capacity for
- sending.
-
-
- Streams can be prioritized by marking them as dependent on the completion of other streams
- ( ). Each dependency is assigned a relative weight, a number
- that is used to determine the relative proportion of available resources that are assigned
- to streams dependent on the same stream.
-
-
-
- Explicitly setting the priority for a stream is input to a prioritization process. It
- does not guarantee any particular processing or transmission order for the stream relative
- to any other stream. An endpoint cannot force a peer to process concurrent streams in a
- particular order using priority. Expressing priority is therefore only ever a suggestion.
-
-
- Providing prioritization information is optional, so default values are used if no
- explicit indicator is provided ( ).
-
-
-
-
- Each stream can be given an explicit dependency on another stream. Including a
- dependency expresses a preference to allocate resources to the identified stream rather
- than to the dependent stream.
-
-
- A stream that is not dependent on any other stream is given a stream dependency of 0x0.
- In other words, the non-existent stream 0 forms the root of the tree.
-
-
- A stream that depends on another stream is a dependent stream. The stream upon which a
- stream is dependent is a parent stream. A dependency on a stream that is not currently
- in the tree - such as a stream in the "idle" state - results in that stream being given
- a default priority .
-
-
- When assigning a dependency on another stream, the stream is added as a new dependency
- of the parent stream. Dependent streams that share the same parent are not ordered with
- respect to each other. For example, if streams B and C are dependent on stream A, and
- if stream D is created with a dependency on stream A, this results in a dependency order
- of A followed by B, C, and D in any order.
-
-
- /|\
- B C B D C
-]]>
-
-
- An exclusive flag allows for the insertion of a new level of dependencies. The
- exclusive flag causes the stream to become the sole dependency of its parent stream,
- causing other dependencies to become dependent on the exclusive stream. In the
- previous example, if stream D is created with an exclusive dependency on stream A, this
- results in D becoming the dependency parent of B and C.
-
-
- D
- B C / \
- B C
-]]>
-
-
- Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all
- of the streams that it depends on (the chain of parent streams up to 0x0) are either
- closed, or it is not possible to make progress on them.
-
-
- A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR .
-
-
-
-
-
- All dependent streams are allocated an integer weight between 1 and 256 (inclusive).
-
-
- Streams with the same parent SHOULD be allocated resources proportionally based on their
- weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A
- with weight 12, and if no progress can be made on A, stream B ideally receives one third
- of the resources allocated to stream C.
-
-
-
-
-
- Stream priorities are changed using the PRIORITY frame. Setting a
- dependency causes a stream to become dependent on the identified parent stream.
-
-
- Dependent streams move with their parent stream if the parent is reprioritized. Setting
- a dependency with the exclusive flag for a reprioritized stream moves all the
- dependencies of the new parent stream to become dependent on the reprioritized stream.
-
-
- If a stream is made dependent on one of its own dependencies, the formerly dependent
- stream is first moved to be dependent on the reprioritized stream's previous parent.
- The moved dependency retains its weight.
-
-
-
- For example, consider an original dependency tree where B and C depend on A, D and E
- depend on C, and F depends on D. If A is made dependent on D, then D takes the place
- of A. All other dependency relationships stay the same, except for F, which becomes
- dependent on A if the reprioritization is exclusive.
-
- F B C ==> F A OR A
- / \ | / \ /|\
- D E E B C B C F
- | | |
- F E E
- (intermediate) (non-exclusive) (exclusive)
-]]>
-
-
-
-
-
- When a stream is removed from the dependency tree, its dependencies can be moved to
- become dependent on the parent of the closed stream. The weights of new dependencies
- are recalculated by distributing the weight of the dependency of the closed stream
- proportionally based on the weights of its dependencies.
-
-
- Streams that are removed from the dependency tree cause some prioritization information
- to be lost. Resources are shared between streams with the same parent stream, which
- means that if a stream in that set closes or becomes blocked, any spare capacity
- allocated to a stream is distributed to the immediate neighbors of the stream. However,
- if the common dependency is removed from the tree, those streams share resources with
- streams at the next highest level.
-
-
- For example, assume streams A and B share a parent, and streams C and D both depend on
- stream A. Prior to the removal of stream A, if streams A and D are unable to proceed,
- then stream C receives all the resources dedicated to stream A. If stream A is removed
- from the tree, the weight of stream A is divided between streams C and D. If stream D
- is still unable to proceed, this results in stream C receiving a reduced proportion of
- resources. For equal starting weights, C receives one third, rather than one half, of
- available resources.
-
-
- It is possible for a stream to become closed while prioritization information that
- creates a dependency on that stream is in transit. If a stream identified in a
- dependency has no associated priority information, then the dependent stream is instead
- assigned a default priority . This potentially creates
- suboptimal prioritization, since the stream could be given a priority that is different
- to what is intended.
-
-
- To avoid these problems, an endpoint SHOULD retain stream prioritization state for a
- period after streams become closed. The longer state is retained, the lower the chance
- that streams are assigned incorrect or default priority values.
-
-
- This could create a large state burden for an endpoint, so this state MAY be limited.
- An endpoint MAY apply a fixed upper limit on the number of closed streams for which
- prioritization state is tracked to limit state exposure. The amount of additional state
- an endpoint maintains could be dependent on load; under high load, prioritization state
- can be discarded to limit resource commitments. In extreme cases, an endpoint could
- even discard prioritization state for active or reserved streams. If a fixed limit is
- applied, endpoints SHOULD maintain state for at least as many streams as allowed by
- their setting for SETTINGS_MAX_CONCURRENT_STREAMS .
-
-
- An endpoint receiving a PRIORITY frame that changes the priority of a
- closed stream SHOULD alter the dependencies of the streams that depend on it, if it has
- retained enough state to do so.
-
-
-
-
-
- Providing priority information is optional. Streams are assigned a non-exclusive
- dependency on stream 0x0 by default. Pushed streams
- initially depend on their associated stream. In both cases, streams are assigned a
- default weight of 16.
-
-
-
-
-
-
- HTTP/2 framing permits two classes of error:
-
-
- An error condition that renders the entire connection unusable is a connection error.
-
-
- An error in an individual stream is a stream error.
-
-
-
-
- A list of error codes is included in .
-
-
-
-
- A connection error is any error which prevents further processing of the framing layer,
- or which corrupts any connection state.
-
-
- An endpoint that encounters a connection error SHOULD first send a GOAWAY
- frame ( ) with the stream identifier of the last stream that it
- successfully received from its peer. The GOAWAY frame includes an error
- code that indicates why the connection is terminating. After sending the
- GOAWAY frame, the endpoint MUST close the TCP connection.
-
-
- It is possible that the GOAWAY will not be reliably received by the
- receiving endpoint (see ). In the event of a connection error,
- GOAWAY only provides a best effort attempt to communicate with the peer
- about why the connection is being terminated.
-
-
- An endpoint can end a connection at any time. In particular, an endpoint MAY choose to
- treat a stream error as a connection error. Endpoints SHOULD send a
- GOAWAY frame when ending a connection, providing that circumstances
- permit it.
-
-
-
-
-
- A stream error is an error related to a specific stream that does not affect processing
- of other streams.
-
-
- An endpoint that detects a stream error sends a RST_STREAM frame ( ) that contains the stream identifier of the stream where the error
- occurred. The RST_STREAM frame includes an error code that indicates the
- type of error.
-
-
- A RST_STREAM is the last frame that an endpoint can send on a stream.
- The peer that sends the RST_STREAM frame MUST be prepared to receive any
- frames that were sent or enqueued for sending by the remote peer. These frames can be
- ignored, except where they modify connection state (such as the state maintained for
- header compression , or flow control).
-
-
- Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for
- any stream. However, an endpoint MAY send additional RST_STREAM frames if
- it receives frames on a closed stream after more than a round-trip time. This behavior
- is permitted to deal with misbehaving implementations.
-
-
- An endpoint MUST NOT send a RST_STREAM in response to an
- RST_STREAM frame, to avoid looping.
-
-
-
-
-
- If the TCP connection is closed or reset while streams remain in open or half closed
- states, then the endpoint MUST assume that those streams were abnormally interrupted and
- could be incomplete.
-
-
-
-
-
-
- HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide
- additional services or alter any aspect of the protocol, within the limitations described
- in this section. Extensions are effective only within the scope of a single HTTP/2
- connection.
-
-
- Extensions are permitted to use new frame types , new
- settings , or new error
- codes . Registries are established for managing these extension points: frame types , settings and
- error codes .
-
-
- Implementations MUST ignore unknown or unsupported values in all extensible protocol
- elements. Implementations MUST discard frames that have unknown or unsupported types.
- This means that any of these extension points can be safely used by extensions without
- prior arrangement or negotiation. However, extension frames that appear in the middle of
- a header block are not permitted; these MUST be treated
- as a connection error of type
- PROTOCOL_ERROR .
-
-
- However, extensions that could change the semantics of existing protocol components MUST
- be negotiated before being used. For example, an extension that changes the layout of the
- HEADERS frame cannot be used until the peer has given a positive signal
- that this is acceptable. In this case, it could also be necessary to coordinate when the
- revised layout comes into effect. Note that treating any frame other than
- DATA frames as flow controlled is such a change in semantics, and can only
- be done through negotiation.
-
-
- This document doesn't mandate a specific method for negotiating the use of an extension,
- but notes that a setting could be used for that
- purpose. If both peers set a value that indicates willingness to use the extension, then
- the extension can be used. If a setting is used for extension negotiation, the initial
- value MUST be defined so that the extension is initially disabled.
-
-
-
-
-
-
- This specification defines a number of frame types, each identified by a unique 8-bit type
- code. Each frame type serves a distinct purpose either in the establishment and management
- of the connection as a whole, or of individual streams.
-
-
- The transmission of specific frame types can alter the state of a connection. If endpoints
- fail to maintain a synchronized view of the connection state, successful communication
- within the connection will no longer be possible. Therefore, it is important that endpoints
- have a shared comprehension of how the state is affected by the use any given frame.
-
-
-
-
- DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated
- with a stream. One or more DATA frames are used, for instance, to carry HTTP request or
- response payloads.
-
-
- DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to
- obscure the size of messages.
-
-
-
-
-
- The DATA frame contains the following fields:
-
-
- An 8-bit field containing the length of the frame padding in units of octets. This
- field is optional and is only present if the PADDED flag is set.
-
-
- Application data. The amount of data is the remainder of the frame payload after
- subtracting the length of the other fields that are present.
-
-
- Padding octets that contain no application semantic value. Padding octets MUST be set
- to zero when sending and ignored when receiving.
-
-
-
-
-
- The DATA frame defines the following flags:
-
-
- Bit 1 being set indicates that this frame is the last that the endpoint will send for
- the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state .
-
-
- Bit 4 being set indicates that the Pad Length field and any padding that it describes
- is present.
-
-
-
-
- DATA frames MUST be associated with a stream. If a DATA frame is received whose stream
- identifier field is 0x0, the recipient MUST respond with a connection error of type
- PROTOCOL_ERROR .
-
-
- DATA frames are subject to flow control and can only be sent when a stream is in the
- "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow
- control, including Pad Length and Padding fields if present. If a DATA frame is received
- whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond
- with a stream error of type
- STREAM_CLOSED .
-
-
- The total number of padding octets is determined by the value of the Pad Length field. If
- the length of the padding is greater than the length of the frame payload, the recipient
- MUST treat this as a connection error of
- type PROTOCOL_ERROR .
-
-
- A frame can be increased in size by one octet by including a Pad Length field with a
- value of zero.
-
-
-
-
- Padding is a security feature; see .
-
-
-
-
-
- The HEADERS frame (type=0x1) is used to open a stream ,
- and additionally carries a header block fragment. HEADERS frames can be sent on a stream
- in the "open" or "half closed (remote)" states.
-
-
-
-
-
- The HEADERS frame payload has the following fields:
-
-
- An 8-bit field containing the length of the frame padding in units of octets. This
- field is only present if the PADDED flag is set.
-
-
- A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set.
-
-
- A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set.
-
-
- An 8-bit weight for the stream, see . Add one to the
- value to obtain a weight between 1 and 256. This field is only present if the
- PRIORITY flag is set.
-
-
- A header block fragment .
-
-
- Padding octets that contain no application semantic value. Padding octets MUST be set
- to zero when sending and ignored when receiving.
-
-
-
-
-
- The HEADERS frame defines the following flags:
-
-
-
- Bit 1 being set indicates that the header block is
- the last that the endpoint will send for the identified stream. Setting this flag
- causes the stream to enter one of "half closed"
- states .
-
-
- A HEADERS frame carries the END_STREAM flag that signals the end of a stream.
- However, a HEADERS frame with the END_STREAM flag set can be followed by
- CONTINUATION frames on the same stream. Logically, the
- CONTINUATION frames are part of the HEADERS frame.
-
-
-
-
- Bit 3 being set indicates that this frame contains an entire header block and is not followed by any
- CONTINUATION frames.
-
-
- A HEADERS frame without the END_HEADERS flag set MUST be followed by a
- CONTINUATION frame for the same stream. A receiver MUST treat the
- receipt of any other type of frame or a frame on a different stream as a connection error of type
- PROTOCOL_ERROR .
-
-
-
-
- Bit 4 being set indicates that the Pad Length field and any padding that it
- describes is present.
-
-
-
-
- Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight
- fields are present; see .
-
-
-
-
-
-
- The payload of a HEADERS frame contains a header block
- fragment . A header block that does not fit within a HEADERS frame is continued in
- a CONTINUATION frame .
-
-
-
- HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose
- stream identifier field is 0x0, the recipient MUST respond with a connection error of type
- PROTOCOL_ERROR .
-
-
-
- The HEADERS frame changes the connection state as described in .
-
-
-
- The HEADERS frame includes optional padding. Padding fields and flags are identical to
- those defined for DATA frames .
-
-
- Prioritization information in a HEADERS frame is logically equivalent to a separate
- PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in
- stream prioritization when new streams are created. Priorization fields in HEADERS frames
- subsequent to the first on a stream reprioritize the
- stream .
-
-
-
-
-
- The PRIORITY frame (type=0x2) specifies the sender-advised
- priority of a stream . It can be sent at any time for an existing stream, including
- closed streams. This enables reprioritization of existing streams.
-
-
-
-
-
- The payload of a PRIORITY frame contains the following fields:
-
-
- A single bit flag indicates that the stream dependency is exclusive, see .
-
-
- A 31-bit stream identifier for the stream that this stream depends on, see .
-
-
- An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256.
-
-
-
-
-
- The PRIORITY frame does not define any flags.
-
-
-
- The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received
- with a stream identifier of 0x0, the recipient MUST respond with a connection error of type
- PROTOCOL_ERROR .
-
-
- The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open",
- "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be
- sent between consecutive frames that comprise a single header
- block . Note that this frame could arrive after processing or frame sending has
- completed, which would cause it to have no effect on the current stream. For a stream
- that is in the "half closed (remote)" or "closed" - state, this frame can only affect
- processing of the current stream and not frame transmission.
-
-
- The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state.
- This allows for the reprioritization of a group of dependent streams by altering the
- priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a
- closed stream risks being ignored due to the peer having discarded priority state
- information for that stream.
-
-
-
-
-
- The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by
- the initiator of a stream, it indicates that they wish to cancel the stream or that an
- error condition has occurred. When sent by the receiver of a stream, it indicates that
- either the receiver is rejecting the stream, requesting that the stream be cancelled, or
- that an error condition has occurred.
-
-
-
-
-
-
- The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code . The error code indicates why the stream is being
- terminated.
-
-
-
- The RST_STREAM frame does not define any flags.
-
-
-
- The RST_STREAM frame fully terminates the referenced stream and causes it to enter the
- closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send
- additional frames for that stream, with the exception of PRIORITY . However,
- after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process
- additional frames sent on the stream that might have been sent by the peer prior to the
- arrival of the RST_STREAM.
-
-
-
- RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received
- with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type
- PROTOCOL_ERROR .
-
-
-
- RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM
- frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type
- PROTOCOL_ERROR .
-
-
-
-
-
-
- The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints
- communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is
- also used to acknowledge the receipt of those parameters. Individually, a SETTINGS
- parameter can also be referred to as a "setting".
-
-
- SETTINGS parameters are not negotiated; they describe characteristics of the sending peer,
- which are used by the receiving peer. Different values for the same parameter can be
- advertised by each peer. For example, a client might set a high initial flow control
- window, whereas a server might set a lower value to conserve resources.
-
-
-
- A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be
- sent at any other time by either endpoint over the lifetime of the connection.
- Implementations MUST support all of the parameters defined by this specification.
-
-
-
- Each parameter in a SETTINGS frame replaces any existing value for that parameter.
- Parameters are processed in the order in which they appear, and a receiver of a SETTINGS
- frame does not need to maintain any state other than the current value of its
- parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by
- a receiver.
-
-
- SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS
- frame defines the following flag:
-
-
- Bit 1 being set indicates that this frame acknowledges receipt and application of the
- peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST
- be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value
- other than 0 MUST be treated as a connection
- error of type FRAME_SIZE_ERROR . For more info, see Settings Synchronization .
-
-
-
-
- SETTINGS frames always apply to a connection, never a single stream. The stream
- identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS
- frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond
- with a connection error of type
- PROTOCOL_ERROR .
-
-
- The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame
- MUST be treated as a connection error of type
- PROTOCOL_ERROR .
-
-
-
-
- The payload of a SETTINGS frame consists of zero or more parameters, each consisting of
- an unsigned 16-bit setting identifier and an unsigned 32-bit value.
-
-
-
-
-
-
-
-
-
- The following parameters are defined:
-
-
-
- Allows the sender to inform the remote endpoint of the maximum size of the header
- compression table used to decode header blocks, in octets. The encoder can select
- any size equal to or less than this value by using signaling specific to the
- header compression format inside a header block. The initial value is 4,096
- octets.
-
-
-
-
- This setting can be use to disable server
- push . An endpoint MUST NOT send a PUSH_PROMISE frame if it
- receives this parameter set to a value of 0. An endpoint that has both set this
- parameter to 0 and had it acknowledged MUST treat the receipt of a
- PUSH_PROMISE frame as a connection error of type
- PROTOCOL_ERROR .
-
-
- The initial value is 1, which indicates that server push is permitted. Any value
- other than 0 or 1 MUST be treated as a connection error of type
- PROTOCOL_ERROR .
-
-
-
-
- Indicates the maximum number of concurrent streams that the sender will allow.
- This limit is directional: it applies to the number of streams that the sender
- permits the receiver to create. Initially there is no limit to this value. It is
- recommended that this value be no smaller than 100, so as to not unnecessarily
- limit parallelism.
-
-
- A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special
- by endpoints. A zero value does prevent the creation of new streams, however this
- can also happen for any limit that is exhausted with active streams. Servers
- SHOULD only set a zero value for short durations; if a server does not wish to
- accept requests, closing the connection could be preferable.
-
-
-
-
- Indicates the sender's initial window size (in octets) for stream level flow
- control. The initial value is 216 -1 (65,535) octets.
-
-
- This setting affects the window size of all streams, including existing streams,
- see .
-
-
- Values above the maximum flow control window size of 231 -1 MUST
- be treated as a connection error of
- type FLOW_CONTROL_ERROR .
-
-
-
-
- Indicates the size of the largest frame payload that the sender is willing to
- receive, in octets.
-
-
- The initial value is 214 (16,384) octets. The value advertised by
- an endpoint MUST be between this initial value and the maximum allowed frame size
- (224 -1 or 16,777,215 octets), inclusive. Values outside this range
- MUST be treated as a connection error
- of type PROTOCOL_ERROR .
-
-
-
-
- This advisory setting informs a peer of the maximum size of header list that the
- sender is prepared to accept, in octets. The value is based on the uncompressed
- size of header fields, including the length of the name and value in octets plus
- an overhead of 32 octets for each header field.
-
-
- For any given request, a lower limit than what is advertised MAY be enforced. The
- initial value of this setting is unlimited.
-
-
-
-
-
- An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier
- MUST ignore that setting.
-
-
-
-
-
- Most values in SETTINGS benefit from or require an understanding of when the peer has
- received and applied the changed parameter values. In order to provide
- such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag
- is not set MUST apply the updated parameters as soon as possible upon receipt.
-
-
- The values in the SETTINGS frame MUST be processed in the order they appear, with no
- other frame processing between values. Unsupported parameters MUST be ignored. Once
- all values have been processed, the recipient MUST immediately emit a SETTINGS frame
- with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender
- of the altered parameters can rely on the setting having been applied.
-
-
- If the sender of a SETTINGS frame does not receive an acknowledgement within a
- reasonable amount of time, it MAY issue a connection error of type
- SETTINGS_TIMEOUT .
-
-
-
-
-
-
- The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of
- streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned
- 31-bit identifier of the stream the endpoint plans to create along with a set of headers
- that provide additional context for the stream. contains a
- thorough description of the use of PUSH_PROMISE frames.
-
-
-
-
-
-
- The PUSH_PROMISE frame payload has the following fields:
-
-
- An 8-bit field containing the length of the frame padding in units of octets. This
- field is only present if the PADDED flag is set.
-
-
- A single reserved bit.
-
-
- An unsigned 31-bit integer that identifies the stream that is reserved by the
- PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next
- stream sent by the sender (see new stream
- identifier ).
-
-
- A header block fragment containing request header
- fields.
-
-
- Padding octets.
-
-
-
-
-
- The PUSH_PROMISE frame defines the following flags:
-
-
-
- Bit 3 being set indicates that this frame contains an entire header block and is not followed by any
- CONTINUATION frames.
-
-
- A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a
- CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any
- other type of frame or a frame on a different stream as a connection error of type
- PROTOCOL_ERROR .
-
-
-
-
- Bit 4 being set indicates that the Pad Length field and any padding that it
- describes is present.
-
-
-
-
-
-
- PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream
- identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the
- stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type
- PROTOCOL_ERROR .
-
-
-
- Promised streams are not required to be used in the order they are promised. The
- PUSH_PROMISE only reserves stream identifiers for later use.
-
-
-
- PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the
- peer endpoint is set to 0. An endpoint that has set this setting and has received
- acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type
- PROTOCOL_ERROR .
-
-
- Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a
- RST_STREAM referencing the promised stream identifier back to the sender of
- the PUSH_PROMISE.
-
-
-
- A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for
- header compression. PUSH_PROMISE also reserves a stream for later use, causing the
- promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a
- stream unless that stream is either "open" or "half closed (remote)"; the sender MUST
- ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST
- be in the "idle" state).
-
-
- Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream
- state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a
- stream that is neither "open" nor "half closed (local)" as a connection error of type
- PROTOCOL_ERROR . However, an endpoint that has sent
- RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that
- might have been created before the RST_STREAM frame is received and
- processed.
-
-
- A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a
- stream that is not currently in the "idle" state) as a connection error of type
- PROTOCOL_ERROR .
-
-
-
- The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical
- to those defined for DATA frames .
-
-
-
-
-
- The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the
- sender, as well as determining whether an idle connection is still functional. PING
- frames can be sent from any endpoint.
-
-
-
-
-
-
- In addition to the frame header, PING frames MUST contain 8 octets of data in the payload.
- A sender can include any value it chooses and use those bytes in any fashion.
-
-
- Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with
- the ACK flag set in response, with an identical payload. PING responses SHOULD be given
- higher priority than any other frame.
-
-
-
- The PING frame defines the following flags:
-
-
- Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST
- set this flag in PING responses. An endpoint MUST NOT respond to PING frames
- containing this flag.
-
-
-
-
- PING frames are not associated with any individual stream. If a PING frame is received
- with a stream identifier field value other than 0x0, the recipient MUST respond with a
- connection error of type
- PROTOCOL_ERROR .
-
-
- Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type
- FRAME_SIZE_ERROR .
-
-
-
-
-
-
- The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this
- connection. GOAWAY can be sent by either the client or the server. Once sent, the sender
- will ignore frames sent on any new streams with identifiers higher than the included last
- stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the
- connection, although a new connection can be established for new streams.
-
-
- The purpose of this frame is to allow an endpoint to gracefully stop accepting new
- streams, while still finishing processing of previously established streams. This enables
- administrative actions, like server maintenance.
-
-
- There is an inherent race condition between an endpoint starting new streams and the
- remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream
- identifier of the last peer-initiated stream which was or might be processed on the
- sending endpoint in this connection. For instance, if the server sends a GOAWAY frame,
- the identified stream is the highest numbered stream initiated by the client.
-
-
- If the receiver of the GOAWAY has sent data on streams with a higher stream identifier
- than what is indicated in the GOAWAY frame, those streams are not or will not be
- processed. The receiver of the GOAWAY frame can treat the streams as though they had
- never been created at all, thereby allowing those streams to be retried later on a new
- connection.
-
-
- Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote
- can know whether a stream has been partially processed or not. For example, if an HTTP
- client sends a POST at the same time that a server closes a connection, the client cannot
- know if the server started to process that POST request if the server does not send a
- GOAWAY frame to indicate what streams it might have acted on.
-
-
- An endpoint might choose to close a connection without sending GOAWAY for misbehaving
- peers.
-
-
-
-
-
-
- The GOAWAY frame does not define any flags.
-
-
- The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat
- a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type
- PROTOCOL_ERROR .
-
-
- The last stream identifier in the GOAWAY frame contains the highest numbered stream
- identifier for which the sender of the GOAWAY frame might have taken some action on, or
- might yet take action on. All streams up to and including the identified stream might
- have been processed in some way. The last stream identifier can be set to 0 if no streams
- were processed.
-
-
- In this context, "processed" means that some data from the stream was passed to some
- higher layer of software that might have taken some action as a result.
-
-
- If a connection terminates without a GOAWAY frame, the last stream identifier is
- effectively the highest possible stream identifier.
-
-
- On streams with lower or equal numbered identifiers that were not closed completely prior
- to the connection being closed, re-attempting requests, transactions, or any protocol
- activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or
- DELETE. Any protocol activity that uses higher numbered streams can be safely retried
- using a new connection.
-
-
- Activity on streams numbered lower or equal to the last stream identifier might still
- complete successfully. The sender of a GOAWAY frame might gracefully shut down a
- connection by sending a GOAWAY frame, maintaining the connection in an open state until
- all in-progress streams complete.
-
-
- An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an
- endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could
- subsequently encounter an condition that requires immediate termination of the connection.
- The last stream identifier from the last GOAWAY frame received indicates which streams
- could have been acted upon. Endpoints MUST NOT increase the value they send in the last
- stream identifier, since the peers might already have retried unprocessed requests on
- another connection.
-
-
- A client that is unable to retry requests loses all requests that are in flight when the
- server closes the connection. This is especially true for intermediaries that might
- not be serving clients using HTTP/2. A server that is attempting to gracefully shut down
- a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to
- 231 -1 and a NO_ERROR code. This signals to the client that
- a shutdown is imminent and that no further requests can be initiated. After waiting at
- least one round trip time, the server can send another GOAWAY frame with an updated last
- stream identifier. This ensures that a connection can be cleanly shut down without losing
- requests.
-
-
-
- After sending a GOAWAY frame, the sender can discard frames for streams with identifiers
- higher than the identified last stream. However, any frames that alter connection state
- cannot be completely ignored. For instance, HEADERS ,
- PUSH_PROMISE and CONTINUATION frames MUST be minimally
- processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow
- control window. Failure to process these frames can cause flow control or header
- compression state to become unsynchronized.
-
-
-
- The GOAWAY frame also contains a 32-bit error code that
- contains the reason for closing the connection.
-
-
- Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug
- data is intended for diagnostic purposes only and carries no semantic value. Debug
- information could contain security- or privacy-sensitive data. Logged or otherwise
- persistently stored debug data MUST have adequate safeguards to prevent unauthorized
- access.
-
-
-
-
-
- The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview.
-
-
- Flow control operates at two levels: on each individual stream and on the entire
- connection.
-
-
- Both types of flow control are hop-by-hop; that is, only between the two endpoints.
- Intermediaries do not forward WINDOW_UPDATE frames between dependent connections.
- However, throttling of data transfer by any receiver can indirectly cause the propagation
- of flow control information toward the original sender.
-
-
- Flow control only applies to frames that are identified as being subject to flow control.
- Of the frame types defined in this document, this includes only DATA frames.
- Frames that are exempt from flow control MUST be accepted and processed, unless the
- receiver is unable to assign resources to handling the frame. A receiver MAY respond with
- a stream error or connection error of type
- FLOW_CONTROL_ERROR if it is unable to accept a frame.
-
-
-
-
-
- The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer
- indicating the number of octets that the sender can transmit in addition to the existing
- flow control window. The legal range for the increment to the flow control window is 1 to
- 231 -1 (0x7fffffff) octets.
-
-
- The WINDOW_UPDATE frame does not define any flags.
-
-
- The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the
- former case, the frame's stream identifier indicates the affected stream; in the latter,
- the value "0" indicates that the entire connection is the subject of the frame.
-
-
- A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window
- increment of 0 as a stream error of type
- PROTOCOL_ERROR ; errors on the connection flow control window MUST be
- treated as a connection error .
-
-
- WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag.
- This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)"
- or "closed" stream. A receiver MUST NOT treat this as an error, see .
-
-
- A receiver that receives a flow controlled frame MUST always account for its contribution
- against the connection flow control window, unless the receiver treats this as a connection error . This is necessary even if the
- frame is in error. Since the sender counts the frame toward the flow control window, if
- the receiver does not, the flow control window at sender and receiver can become
- different.
-
-
-
-
- Flow control in HTTP/2 is implemented using a window kept by each sender on every
- stream. The flow control window is a simple integer value that indicates how many octets
- of data the sender is permitted to transmit; as such, its size is a measure of the
- buffering capacity of the receiver.
-
-
- Two flow control windows are applicable: the stream flow control window and the
- connection flow control window. The sender MUST NOT send a flow controlled frame with a
- length that exceeds the space available in either of the flow control windows advertised
- by the receiver. Frames with zero length with the END_STREAM flag set (that is, an
- empty DATA frame) MAY be sent if there is no available space in either
- flow control window.
-
-
- For flow control calculations, the 9 octet frame header is not counted.
-
-
- After sending a flow controlled frame, the sender reduces the space available in both
- windows by the length of the transmitted frame.
-
-
- The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up
- space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream
- and connection level flow control windows.
-
-
- A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the
- amount specified in the frame.
-
-
- A sender MUST NOT allow a flow control window to exceed 231 -1 octets.
- If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this
- maximum it MUST terminate either the stream or the connection, as appropriate. For
- streams, the sender sends a RST_STREAM with the error code of
- FLOW_CONTROL_ERROR code; for the connection, a GOAWAY
- frame with a FLOW_CONTROL_ERROR code.
-
-
- Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are
- completely asynchronous with respect to each other. This property allows a receiver to
- aggressively update the window size kept by the sender to prevent streams from stalling.
-
-
-
-
-
- When an HTTP/2 connection is first established, new streams are created with an initial
- flow control window size of 65,535 octets. The connection flow control window is 65,535
- octets. Both endpoints can adjust the initial window size for new streams by including
- a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS
- frame that forms part of the connection preface. The connection flow control window can
- only be changed using WINDOW_UPDATE frames.
-
-
- Prior to receiving a SETTINGS frame that sets a value for
- SETTINGS_INITIAL_WINDOW_SIZE , an endpoint can only use the default
- initial window size when sending flow controlled frames. Similarly, the connection flow
- control window is set to the default initial window size until a WINDOW_UPDATE frame is
- received.
-
-
- A SETTINGS frame can alter the initial flow control window size for all
- current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes,
- a receiver MUST adjust the size of all stream flow control windows that it maintains by
- the difference between the new value and the old value.
-
-
- A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in
- a flow control window to become negative. A sender MUST track the negative flow control
- window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE
- frames that cause the flow control window to become positive.
-
-
- For example, if the client sends 60KB immediately on connection establishment, and the
- server sets the initial window size to be 16KB, the client will recalculate the
- available flow control window to be -44KB on receipt of the SETTINGS
- frame. The client retains a negative flow control window until WINDOW_UPDATE frames
- restore the window to being positive, after which the client can resume sending.
-
-
- A SETTINGS frame cannot alter the connection flow control window.
-
-
- An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that
- causes any flow control window to exceed the maximum size as a connection error of type
- FLOW_CONTROL_ERROR .
-
-
-
-
-
- A receiver that wishes to use a smaller flow control window than the current size can
- send a new SETTINGS frame. However, the receiver MUST be prepared to
- receive data that exceeds this window size, since the sender might send data that
- exceeds the lower limit prior to processing the SETTINGS frame.
-
-
- After sending a SETTINGS frame that reduces the initial flow control window size, a
- receiver has two options for handling streams that exceed flow control limits:
-
-
- The receiver can immediately send RST_STREAM with
- FLOW_CONTROL_ERROR error code for the affected streams.
-
-
- The receiver can accept the streams and tolerate the resulting head of line
- blocking, sending WINDOW_UPDATE frames as it consumes data.
-
-
-
-
-
-
-
-
- The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments . Any number of CONTINUATION frames can
- be sent on an existing stream, as long as the preceding frame is on the same stream and is
- a HEADERS , PUSH_PROMISE or CONTINUATION frame without the
- END_HEADERS flag set.
-
-
-
-
-
-
- The CONTINUATION frame payload contains a header block
- fragment .
-
-
-
- The CONTINUATION frame defines the following flag:
-
-
-
- Bit 3 being set indicates that this frame ends a header
- block .
-
-
- If the END_HEADERS bit is not set, this frame MUST be followed by another
- CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or
- a frame on a different stream as a connection
- error of type PROTOCOL_ERROR .
-
-
-
-
-
-
- The CONTINUATION frame changes the connection state as defined in .
-
-
-
- CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received
- whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR.
-
-
-
- A CONTINUATION frame MUST be preceded by a HEADERS ,
- PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A
- recipient that observes violation of this rule MUST respond with a connection error of type
- PROTOCOL_ERROR .
-
-
-
-
-
-
- Error codes are 32-bit fields that are used in RST_STREAM and
- GOAWAY frames to convey the reasons for the stream or connection error.
-
-
-
- Error codes share a common code space. Some error codes apply only to either streams or the
- entire connection and have no defined semantics in the other context.
-
-
-
- The following error codes are defined:
-
-
- The associated condition is not as a result of an error. For example, a
- GOAWAY might include this code to indicate graceful shutdown of a
- connection.
-
-
- The endpoint detected an unspecific protocol error. This error is for use when a more
- specific error code is not available.
-
-
- The endpoint encountered an unexpected internal error.
-
-
- The endpoint detected that its peer violated the flow control protocol.
-
-
- The endpoint sent a SETTINGS frame, but did not receive a response in a
- timely manner. See Settings Synchronization .
-
-
- The endpoint received a frame after a stream was half closed.
-
-
- The endpoint received a frame with an invalid size.
-
-
- The endpoint refuses the stream prior to performing any application processing, see
- for details.
-
-
- Used by the endpoint to indicate that the stream is no longer needed.
-
-
- The endpoint is unable to maintain the header compression context for the connection.
-
-
- The connection established in response to a CONNECT
- request was reset or abnormally closed.
-
-
- The endpoint detected that its peer is exhibiting a behavior that might be generating
- excessive load.
-
-
- The underlying transport has properties that do not meet minimum security
- requirements (see ).
-
-
-
-
- Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be
- treated by an implementation as being equivalent to INTERNAL_ERROR .
-
-
-
-
-
- HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means
- that, from the application perspective, the features of the protocol are largely
- unchanged. To achieve this, all request and response semantics are preserved, although the
- syntax of conveying those semantics has changed.
-
-
- Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax
- and Routing , such as the HTTP and HTTPS URI schemes, are also
- applicable in HTTP/2, but the expression of those semantics for this protocol are defined
- in the sections below.
-
-
-
-
- A client sends an HTTP request on a new stream, using a previously unused stream identifier . A server sends an HTTP response on
- the same stream as the request.
-
-
- An HTTP message (request or response) consists of:
-
-
- for a response only, zero or more HEADERS frames (each followed by zero
- or more CONTINUATION frames) containing the message headers of
- informational (1xx) HTTP responses (see and ),
- and
-
-
- one HEADERS frame (followed by zero or more CONTINUATION
- frames) containing the message headers (see ), and
-
-
- zero or more DATA frames containing the message payload (see ), and
-
-
- optionally, one HEADERS frame, followed by zero or more
- CONTINUATION frames containing the trailer-part, if present (see ).
-
-
- The last frame in the sequence bears an END_STREAM flag, noting that a
- HEADERS frame bearing the END_STREAM flag can be followed by
- CONTINUATION frames that carry any remaining portions of the header block.
-
-
- Other frames (from any stream) MUST NOT occur between either HEADERS frame
- and any CONTINUATION frames that might follow.
-
-
-
- Trailing header fields are carried in a header block that also terminates the stream.
- That is, a sequence starting with a HEADERS frame, followed by zero or more
- CONTINUATION frames, where the HEADERS frame bears an
- END_STREAM flag. Header blocks after the first that do not terminate the stream are not
- part of an HTTP request or response.
-
-
- A HEADERS frame (and associated CONTINUATION frames) can
- only appear at the start or end of a stream. An endpoint that receives a
- HEADERS frame without the END_STREAM flag set after receiving a final
- (non-informational) status code MUST treat the corresponding request or response as malformed .
-
-
-
- An HTTP request/response exchange fully consumes a single stream. A request starts with
- the HEADERS frame that puts the stream into an "open" state. The request
- ends with a frame bearing END_STREAM, which causes the stream to become "half closed
- (local)" for the client and "half closed (remote)" for the server. A response starts with
- a HEADERS frame and ends with a frame bearing END_STREAM, which places the
- stream in the "closed" state.
-
-
-
-
-
- HTTP/2 removes support for the 101 (Switching Protocols) informational status code
- ( ).
-
-
- The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol.
- Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate
- their use (see ).
-
-
-
-
-
- HTTP header fields carry information as a series of key-value pairs. For a listing of
- registered HTTP headers, see the Message Header Field Registry maintained at .
-
-
-
-
- While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the
- status code for the response, HTTP/2 uses special pseudo-header fields beginning with
- ':' character (ASCII 0x3a) for this purpose.
-
-
- Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate
- pseudo-header fields other than those defined in this document.
-
-
- Pseudo-header fields are only valid in the context in which they are defined.
- Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header
- fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST
- NOT appear in trailers. Endpoints MUST treat a request or response that contains
- undefined or invalid pseudo-header fields as malformed .
-
-
- Just as in HTTP/1.x, header field names are strings of ASCII characters that are
- compared in a case-insensitive fashion. However, header field names MUST be converted
- to lowercase prior to their encoding in HTTP/2. A request or response containing
- uppercase header field names MUST be treated as malformed .
-
-
- All pseudo-header fields MUST appear in the header block before regular header fields.
- Any request or response that contains a pseudo-header field that appears in a header
- block after a regular header field MUST be treated as malformed .
-
-
-
-
-
- HTTP/2 does not use the Connection header field to
- indicate connection-specific header fields; in this protocol, connection-specific
- metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message
- containing connection-specific header fields; any message containing
- connection-specific header fields MUST be treated as malformed .
-
-
- This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need
- to remove any header fields nominated by the Connection header field, along with the
- Connection header field itself. Such intermediaries SHOULD also remove other
- connection-specific header fields, such as Keep-Alive, Proxy-Connection,
- Transfer-Encoding and Upgrade, even if they are not nominated by Connection.
-
-
- One exception to this is the TE header field, which MAY be present in an HTTP/2
- request, but when it is MUST NOT contain any value other than "trailers".
-
-
-
-
- HTTP/2 purposefully does not support upgrade to another protocol. The handshake
- methods described in are believed sufficient to
- negotiate the use of alternative protocols.
-
-
-
-
-
-
-
- The following pseudo-header fields are defined for HTTP/2 requests:
-
-
-
- The :method pseudo-header field includes the HTTP
- method ( ).
-
-
-
-
- The :scheme pseudo-header field includes the scheme
- portion of the target URI ( ).
-
-
- :scheme is not restricted to http and https schemed URIs. A
- proxy or gateway can translate requests for non-HTTP schemes, enabling the use
- of HTTP to interact with non-HTTP services.
-
-
-
-
- The :authority pseudo-header field includes the
- authority portion of the target URI ( ). The authority MUST NOT include the deprecated userinfo subcomponent for http
- or https schemed URIs.
-
-
- To ensure that the HTTP/1.1 request line can be reproduced accurately, this
- pseudo-header field MUST be omitted when translating from an HTTP/1.1 request
- that has a request target in origin or asterisk form (see ). Clients that generate
- HTTP/2 requests directly SHOULD use the :authority pseudo-header
- field instead of the Host header field. An
- intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by
- copying the value of the :authority pseudo-header
- field.
-
-
-
-
- The :path pseudo-header field includes the path and
- query parts of the target URI (the path-absolute
- production from and optionally a '?' character
- followed by the query production, see and ). A request in asterisk form includes the value '*' for the
- :path pseudo-header field.
-
-
- This pseudo-header field MUST NOT be empty for http
- or https URIs; http or
- https URIs that do not contain a path component
- MUST include a value of '/'. The exception to this rule is an OPTIONS request
- for an http or https
- URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ).
-
-
-
-
-
- All HTTP/2 requests MUST include exactly one valid value for the :method , :scheme , and :path pseudo-header fields, unless it is a CONNECT request . An HTTP request that omits mandatory
- pseudo-header fields is malformed .
-
-
- HTTP/2 does not define a way to carry the version identifier that is included in the
- HTTP/1.1 request line.
-
-
-
-
-
- For HTTP/2 responses, a single :status pseudo-header
- field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all
- responses, otherwise the response is malformed .
-
-
- HTTP/2 does not define a way to carry the version or reason phrase that is included in
- an HTTP/1.1 status line.
-
-
-
-
-
- The Cookie header field can carry a significant amount of
- redundant data.
-
-
- The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs").
- This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from
- being separated into different name-value pairs. This can significantly reduce
- compression efficiency as individual cookie-pairs are updated.
-
-
- To allow for better compression efficiency, the Cookie header field MAY be split into
- separate header fields, each with one or more cookie-pairs. If there are multiple
- Cookie header fields after decompression, these MUST be concatenated into a single
- octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ")
- before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a
- generic HTTP server application.
-
-
-
- Therefore, the following two lists of Cookie header fields are semantically
- equivalent.
-
-
-
-
-
-
-
- A malformed request or response is one that is an otherwise valid sequence of HTTP/2
- frames, but is otherwise invalid due to the presence of extraneous frames, prohibited
- header fields, the absence of mandatory header fields, or the inclusion of uppercase
- header field names.
-
-
- A request or response that includes an entity body can include a content-length header field. A request or response is also
- malformed if the value of a content-length header field
- does not equal the sum of the DATA frame payload lengths that form the
- body. A response that is defined to have no payload, as described in , can have a non-zero
- content-length header field, even though no content is
- included in DATA frames.
-
-
- Intermediaries that process HTTP requests or responses (i.e., any intermediary not
- acting as a tunnel) MUST NOT forward a malformed request or response. Malformed
- requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR .
-
-
- For malformed requests, a server MAY send an HTTP response prior to closing or
- resetting the stream. Clients MUST NOT accept a malformed response. Note that these
- requirements are intended to protect against several types of common attacks against
- HTTP; they are deliberately strict, because being permissive can expose
- implementations to these vulnerabilities.
-
-
-
-
-
-
- This section shows HTTP/1.1 requests and responses, with illustrations of equivalent
- HTTP/2 requests and responses.
-
-
- An HTTP GET request includes request header fields and no body and is therefore
- transmitted as a single HEADERS frame, followed by zero or more
- CONTINUATION frames containing the serialized block of request header
- fields. The HEADERS frame in the following has both the END_HEADERS and
- END_STREAM flags set; no CONTINUATION frames are sent:
-
-
-
- + END_STREAM
- Accept: image/jpeg + END_HEADERS
- :method = GET
- :scheme = https
- :path = /resource
- host = example.org
- accept = image/jpeg
-]]>
-
-
-
- Similarly, a response that includes only response header fields is transmitted as a
- HEADERS frame (again, followed by zero or more
- CONTINUATION frames) containing the serialized block of response header
- fields.
-
-
-
- + END_STREAM
- Expires: Thu, 23 Jan ... + END_HEADERS
- :status = 304
- etag = "xyzzy"
- expires = Thu, 23 Jan ...
-]]>
-
-
-
- An HTTP POST request that includes request header fields and payload data is transmitted
- as one HEADERS frame, followed by zero or more
- CONTINUATION frames containing the request header fields, followed by one
- or more DATA frames, with the last CONTINUATION (or
- HEADERS ) frame having the END_HEADERS flag set and the final
- DATA frame having the END_STREAM flag set:
-
-
-
- - END_STREAM
- Content-Type: image/jpeg - END_HEADERS
- Content-Length: 123 :method = POST
- :path = /resource
- {binary data} :scheme = https
-
- CONTINUATION
- + END_HEADERS
- content-type = image/jpeg
- host = example.org
- content-length = 123
-
- DATA
- + END_STREAM
- {binary data}
-]]>
-
- Note that data contributing to any given header field could be spread between header
- block fragments. The allocation of header fields to frames in this example is
- illustrative only.
-
-
-
-
- A response that includes header fields and payload data is transmitted as a
- HEADERS frame, followed by zero or more CONTINUATION
- frames, followed by one or more DATA frames, with the last
- DATA frame in the sequence having the END_STREAM flag set:
-
-
-
- - END_STREAM
- Content-Length: 123 + END_HEADERS
- :status = 200
- {binary data} content-type = image/jpeg
- content-length = 123
-
- DATA
- + END_STREAM
- {binary data}
-]]>
-
-
-
- Trailing header fields are sent as a header block after both the request or response
- header block and all the DATA frames have been sent. The
- HEADERS frame starting the trailers header block has the END_STREAM flag
- set.
-
-
-
- - END_STREAM
- Transfer-Encoding: chunked + END_HEADERS
- Trailer: Foo :status = 200
- content-length = 123
- 123 content-type = image/jpeg
- {binary data} trailer = Foo
- 0
- Foo: bar DATA
- - END_STREAM
- {binary data}
-
- HEADERS
- + END_STREAM
- + END_HEADERS
- foo = bar
-]]>
-
-
-
-
-
- An informational response using a 1xx status code other than 101 is transmitted as a
- HEADERS frame, followed by zero or more CONTINUATION
- frames:
-
- - END_STREAM
- + END_HEADERS
- :status = 103
- extension-field = bar
-]]>
-
-
-
-
-
- In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error
- occurs, because there is no means to determine the nature of the error. It is possible
- that some server processing occurred prior to the error, which could result in
- undesirable effects if the request were reattempted.
-
-
- HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has
- not been processed:
-
-
- The GOAWAY frame indicates the highest stream number that might have
- been processed. Requests on streams with higher numbers are therefore guaranteed to
- be safe to retry.
-
-
- The REFUSED_STREAM error code can be included in a
- RST_STREAM frame to indicate that the stream is being closed prior to
- any processing having occurred. Any request that was sent on the reset stream can
- be safely retried.
-
-
-
-
- Requests that have not been processed have not failed; clients MAY automatically retry
- them, even those with non-idempotent methods.
-
-
- A server MUST NOT indicate that a stream has not been processed unless it can guarantee
- that fact. If frames that are on a stream are passed to the application layer for any
- stream, then REFUSED_STREAM MUST NOT be used for that stream, and a
- GOAWAY frame MUST include a stream identifier that is greater than or
- equal to the given stream identifier.
-
-
- In addition to these mechanisms, the PING frame provides a way for a
- client to easily test a connection. Connections that remain idle can become broken as
- some middleboxes (for instance, network address translators, or load balancers) silently
- discard connection bindings. The PING frame allows a client to safely
- test whether a connection is still active without sending a request.
-
-
-
-
-
-
- HTTP/2 allows a server to pre-emptively send (or "push") responses (along with
- corresponding "promised" requests) to a client in association with a previous
- client-initiated request. This can be useful when the server knows the client will need
- to have those responses available in order to fully process the response to the original
- request.
-
-
-
- Pushing additional message exchanges in this fashion is optional, and is negotiated
- between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set
- to 0 to indicate that server push is disabled.
-
-
- Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a
- promised request that is not cacheable, unsafe or that includes a request body MUST
- reset the stream with a stream error of type
- PROTOCOL_ERROR .
-
-
- Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP
- cache. Pushed responses are considered successfully validated on the origin server (e.g.,
- if the "no-cache" cache response directive is present) while the stream identified by the
- promised stream ID is still open.
-
-
- Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY
- be made available to the application separately.
-
-
- An intermediary can receive pushes from the server and choose not to forward them on to
- the client. In other words, how to make use of the pushed information is up to that
- intermediary. Equally, the intermediary might choose to make additional pushes to the
- client, without any action taken by the server.
-
-
- A client cannot push. Thus, servers MUST treat the receipt of a
- PUSH_PROMISE frame as a connection
- error of type PROTOCOL_ERROR . Clients MUST reject any attempt to
- change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating
- the message as a connection error of type
- PROTOCOL_ERROR .
-
-
-
-
- Server push is semantically equivalent to a server responding to a request; however, in
- this case that request is also sent by the server, as a PUSH_PROMISE
- frame.
-
-
- The PUSH_PROMISE frame includes a header block that contains a complete
- set of request header fields that the server attributes to the request. It is not
- possible to push a response to a request that includes a request body.
-
-
-
- Pushed responses are always associated with an explicit request from the client. The
- PUSH_PROMISE frames sent by the server are sent on that explicit
- request's stream. The PUSH_PROMISE frame also includes a promised stream
- identifier, chosen from the stream identifiers available to the server (see ).
-
-
-
- The header fields in PUSH_PROMISE and any subsequent
- CONTINUATION frames MUST be a valid and complete set of request header fields . The server MUST include a method in
- the :method header field that is safe and cacheable. If a
- client receives a PUSH_PROMISE that does not include a complete and valid
- set of header fields, or the :method header field identifies
- a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR .
-
-
-
- The server SHOULD send PUSH_PROMISE ( )
- frames prior to sending any frames that reference the promised responses. This avoids a
- race where clients issue requests prior to receiving any PUSH_PROMISE
- frames.
-
-
- For example, if the server receives a request for a document containing embedded links
- to multiple image files, and the server chooses to push those additional images to the
- client, sending push promises before the DATA frames that contain the
- image links ensures that the client is able to see the promises before discovering
- embedded links. Similarly, if the server pushes responses referenced by the header block
- (for instance, in Link header fields), sending the push promises before sending the
- header block ensures that clients do not request them.
-
-
-
- PUSH_PROMISE frames MUST NOT be sent by the client.
-
-
- PUSH_PROMISE frames can be sent by the server in response to any
- client-initiated stream, but the stream MUST be in either the "open" or "half closed
- (remote)" state with respect to the server. PUSH_PROMISE frames are
- interspersed with the frames that comprise a response, though they cannot be
- interspersed with HEADERS and CONTINUATION frames that
- comprise a single header block.
-
-
- Sending a PUSH_PROMISE frame creates a new stream and puts the stream
- into the “reserved (local)” state for the server and the “reserved (remote)” state for
- the client.
-
-
-
-
-
- After sending the PUSH_PROMISE frame, the server can begin delivering the
- pushed response as a response on a server-initiated
- stream that uses the promised stream identifier. The server uses this stream to
- transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed"
- to the client after the initial HEADERS frame is sent.
-
-
-
- Once a client receives a PUSH_PROMISE frame and chooses to accept the
- pushed response, the client SHOULD NOT issue any requests for the promised response
- until after the promised stream has closed.
-
-
-
- If the client determines, for any reason, that it does not wish to receive the pushed
- response from the server, or if the server takes too long to begin sending the promised
- response, the client can send an RST_STREAM frame, using either the
- CANCEL or REFUSED_STREAM codes, and referencing the pushed
- stream's identifier.
-
-
- A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the
- number of responses that can be concurrently pushed by a server. Advertising a
- SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by
- preventing the server from creating the necessary streams. This does not prohibit a
- server from sending PUSH_PROMISE frames; clients need to reset any
- promised streams that are not wanted.
-
-
-
- Clients receiving a pushed response MUST validate that either the server is
- authoritative (see ), or the proxy that provided the pushed
- response is configured for the corresponding request. For example, a server that offers
- a certificate for only the example.com DNS-ID or Common Name
- is not permitted to push a response for https://www.example.org/doc .
-
-
- The response for a PUSH_PROMISE stream begins with a
- HEADERS frame, which immediately puts the stream into the “half closed
- (remote)” state for the server and “half closed (local)” state for the client, and ends
- with a frame bearing END_STREAM, which places the stream in the "closed" state.
-
-
- The client never sends a frame with the END_STREAM flag for a server push.
-
-
-
-
-
-
-
-
-
- In HTTP/1.x, the pseudo-method CONNECT ( ) is used to convert an HTTP connection into a tunnel to a remote host.
- CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin
- server for the purposes of interacting with https resources.
-
-
- In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to
- a remote host, for similar purposes. The HTTP header field mapping works as defined in
- Request Header Fields , with a few
- differences. Specifically:
-
-
- The :method header field is set to CONNECT .
-
-
- The :scheme and :path header
- fields MUST be omitted.
-
-
- The :authority header field contains the host and port to
- connect to (equivalent to the authority-form of the request-target of CONNECT
- requests, see ).
-
-
-
-
- A proxy that supports CONNECT establishes a TCP connection to
- the server identified in the :authority header field. Once
- this connection is successfully established, the proxy sends a HEADERS
- frame containing a 2xx series status code to the client, as defined in .
-
-
- After the initial HEADERS frame sent by each peer, all subsequent
- DATA frames correspond to data sent on the TCP connection. The payload of
- any DATA frames sent by the client is transmitted by the proxy to the TCP
- server; data received from the TCP server is assembled into DATA frames by
- the proxy. Frame types other than DATA or stream management frames
- (RST_STREAM , WINDOW_UPDATE , and PRIORITY )
- MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received.
-
-
- The TCP connection can be closed by either peer. The END_STREAM flag on a
- DATA frame is treated as being equivalent to the TCP FIN bit. A client is
- expected to send a DATA frame with the END_STREAM flag set after receiving
- a frame bearing the END_STREAM flag. A proxy that receives a DATA frame
- with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP
- segment. A proxy that receives a TCP segment with the FIN bit set sends a
- DATA frame with the END_STREAM flag set. Note that the final TCP segment
- or DATA frame could be empty.
-
-
- A TCP connection error is signaled with RST_STREAM . A proxy treats any
- error in the TCP connection, which includes receiving a TCP segment with the RST bit set,
- as a stream error of type
- CONNECT_ERROR . Correspondingly, a proxy MUST send a TCP segment with the
- RST bit set if it detects an error with the stream or the HTTP/2 connection.
-
-
-
-
-
-
- This section outlines attributes of the HTTP protocol that improve interoperability, reduce
- exposure to known security vulnerabilities, or reduce the potential for implementation
- variation.
-
-
-
-
- HTTP/2 connections are persistent. For best performance, it is expected clients will not
- close connections until it is determined that no further communication with a server is
- necessary (for example, when a user navigates away from a particular web page), or until
- the server closes the connection.
-
-
- Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair,
- where host is derived from a URI, a selected alternative
- service , or a configured proxy.
-
-
- A client can create additional connections as replacements, either to replace connections
- that are near to exhausting the available stream
- identifier space , to refresh the keying material for a TLS connection, or to
- replace connections that have encountered errors .
-
-
- A client MAY open multiple connections to the same IP address and TCP port using different
- Server Name Indication values or to provide different TLS
- client certificates, but SHOULD avoid creating multiple connections with the same
- configuration.
-
-
- Servers are encouraged to maintain open connections for as long as possible, but are
- permitted to terminate idle connections if necessary. When either endpoint chooses to
- close the transport-layer TCP connection, the terminating endpoint SHOULD first send a
- GOAWAY ( ) frame so that both endpoints can reliably
- determine whether previously sent frames have been processed and gracefully complete or
- terminate any necessary remaining tasks.
-
-
-
-
- Connections that are made to an origin servers, either directly or through a tunnel
- created using the CONNECT method MAY be reused for
- requests with multiple different URI authority components. A connection can be reused
- as long as the origin server is authoritative . For
- http resources, this depends on the host having resolved to
- the same IP address.
-
-
- For https resources, connection reuse additionally depends
- on having a certificate that is valid for the host in the URI. An origin server might
- offer a certificate with multiple subjectAltName attributes,
- or names with wildcards, one of which is valid for the authority in the URI. For
- example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for
- requests to URIs starting with https://a.example.com/ and
- https://b.example.com/ .
-
-
- In some deployments, reusing a connection for multiple origins can result in requests
- being directed to the wrong origin server. For example, TLS termination might be
- performed by a middlebox that uses the TLS Server Name Indication
- (SNI) extension to select an origin server. This means that it is possible
- for clients to send confidential information to servers that might not be the intended
- target for the request, even though the server is otherwise authoritative.
-
-
- A server that does not wish clients to reuse connections can indicate that it is not
- authoritative for a request by sending a 421 (Misdirected Request) status code in response
- to the request (see ).
-
-
- A client that is configured to use a proxy over HTTP/2 directs requests to that proxy
- through a single connection. That is, all requests sent via a proxy reuse the
- connection to the proxy.
-
-
-
-
-
- The 421 (Misdirected Request) status code indicates that the request was directed at a
- server that is not able to produce a response. This can be sent by a server that is not
- configured to produce responses for the combination of scheme and authority that are
- included in the request URI.
-
-
- Clients receiving a 421 (Misdirected Request) response from a server MAY retry the
- request - whether the request method is idempotent or not - over a different connection.
- This is possible if a connection is reused ( ) or if an alternative
- service is selected ( ).
-
-
- This status code MUST NOT be generated by proxies.
-
-
- A 421 response is cacheable by default; i.e., unless otherwise indicated by the method
- definition or explicit cache controls (see ).
-
-
-
-
-
-
- Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over
- TLS. The general TLS usage guidance in SHOULD be followed, with
- some additional restrictions that are specific to HTTP/2.
-
-
-
- An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on
- feature set and cipher suite described in this section. Due to implementation
- limitations, it might not be possible to fail TLS negotiation. An endpoint MUST
- immediately terminate an HTTP/2 connection that does not meet these minimum requirements
- with a connection error of type
- INADEQUATE_SECURITY .
-
-
-
-
- The TLS implementation MUST support the Server Name Indication
- (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when
- negotiating TLS.
-
-
- The TLS implementation MUST disable compression. TLS compression can lead to the
- exposure of information that would not otherwise be revealed .
- Generic compression is unnecessary since HTTP/2 provides compression features that are
- more aware of context and therefore likely to be more appropriate for use for
- performance, security or other reasons.
-
-
- The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS
- renegotiation as a connection error of type
- PROTOCOL_ERROR . Note that disabling renegotiation can result in
- long-lived connections becoming unusable due to limits on the number of messages the
- underlying cipher suite can encipher.
-
-
- A client MAY use renegotiation to provide confidentiality protection for client
- credentials offered in the handshake, but any renegotiation MUST occur prior to sending
- the connection preface. A server SHOULD request a client certificate if it sees a
- renegotiation request immediately after establishing a connection.
-
-
- This effectively prevents the use of renegotiation in response to a request for a
- specific protected resource. A future specification might provide a way to support this
- use case.
-
-
-
-
-
- The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST
- only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE) . Ephemeral key exchange MUST
- have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE.
- Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher
- suites that use stream or block ciphers. Authenticated Encryption with Additional Data
- (AEAD) modes, such as the Galois Counter Model (GCM) mode for
- AES are acceptable.
-
-
- The effect of these restrictions is that TLS 1.2 implementations could have
- non-intersecting sets of available cipher suites, since these prevent the use of the
- cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of
- HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 .
-
-
- Clients MAY advertise support of cipher suites that are prohibited by the above
- restrictions in order to allow for connection to servers that do not support HTTP/2.
- This enables a fallback to protocols without these constraints without the additional
- latency imposed by using a separate connection for fallback.
-
-
-
-
-
-
-
-
- HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is
- authoritative in providing a given response, see . This relies on local name resolution for the "http"
- URI scheme, and the authenticated server identity for the "https" scheme (see ).
-
-
-
-
-
- In a cross-protocol attack, an attacker causes a client to initiate a transaction in one
- protocol toward a server that understands a different protocol. An attacker might be able
- to cause the transaction to appear as valid transaction in the second protocol. In
- combination with the capabilities of the web context, this can be used to interact with
- poorly protected servers in private networks.
-
-
- Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient
- protection against cross protocol attacks. ALPN provides a positive indication that a
- server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based
- protocols.
-
-
- The encryption in TLS makes it difficult for attackers to control the data which could be
- used in a cross-protocol attack on a cleartext protocol.
-
-
- The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks.
- The connection preface contains a string that is
- designed to confuse HTTP/1.1 servers, but no special protection is offered for other
- protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an
- Upgrade header field in addition to the client connection preface could be exposed to a
- cross-protocol attack.
-
-
-
-
-
- HTTP/2 header field names and values are encoded as sequences of octets with a length
- prefix. This enables HTTP/2 to carry any string of octets as the name or value of a
- header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1
- directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might
- exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal
- header fields, extra header fields, or even new messages that are entirely falsified.
-
-
- Header field names or values that contain characters not permitted by HTTP/1.1, including
- carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an
- intermediary, as stipulated in .
-
-
- Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker.
- Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values.
-
-
-
-
-
- Pushed responses do not have an explicit request from the client; the request
- is provided by the server in the PUSH_PROMISE frame.
-
-
- Caching responses that are pushed is possible based on the guidance provided by the origin
- server in the Cache-Control header field. However, this can cause issues if a single
- server hosts more than one tenant. For example, a server might offer multiple users each
- a small portion of its URI space.
-
-
- Where multiple tenants share space on the same server, that server MUST ensure that
- tenants are not able to push representations of resources that they do not have authority
- over. Failure to enforce this would allow a tenant to provide a representation that would
- be served out of cache, overriding the actual representation that the authoritative tenant
- provides.
-
-
- Pushed responses for which an origin server is not authoritative (see
- ) are never cached or used.
-
-
-
-
-
- An HTTP/2 connection can demand a greater commitment of resources to operate than a
- HTTP/1.1 connection. The use of header compression and flow control depend on a
- commitment of resources for storing a greater amount of state. Settings for these
- features ensure that memory commitments for these features are strictly bounded.
-
-
- The number of PUSH_PROMISE frames is not constrained in the same fashion.
- A client that accepts server push SHOULD limit the number of streams it allows to be in
- the "reserved (remote)" state. Excessive number of server push streams can be treated as
- a stream error of type
- ENHANCE_YOUR_CALM .
-
-
- Processing capacity cannot be guarded as effectively as state capacity.
-
-
- The SETTINGS frame can be abused to cause a peer to expend additional
- processing time. This might be done by pointlessly changing SETTINGS parameters, setting
- multiple undefined parameters, or changing the same setting multiple times in the same
- frame. WINDOW_UPDATE or PRIORITY frames can be abused to
- cause an unnecessary waste of resources.
-
-
- Large numbers of small or empty frames can be abused to cause a peer to expend time
- processing frame headers. Note however that some uses are entirely legitimate, such as
- the sending of an empty DATA frame to end a stream.
-
-
- Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses.
-
-
- Limits in SETTINGS parameters cannot be reduced instantaneously, which
- leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In
- particular, immediately after establishing a connection, limits set by a server are not
- known to clients and could be exceeded without being an obvious protocol violation.
-
-
- All these features - i.e., SETTINGS changes, small frames, header
- compression - have legitimate uses. These features become a burden only when they are
- used unnecessarily or to excess.
-
-
- An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of
- service attack. Implementations SHOULD track the use of these features and set limits on
- their use. An endpoint MAY treat activity that is suspicious as a connection error of type
- ENHANCE_YOUR_CALM .
-
-
-
-
- A large header block can cause an implementation to
- commit a large amount of state. Header fields that are critical for routing can appear
- toward the end of a header block, which prevents streaming of header fields to their
- ultimate destination. For this an other reasons, such as ensuring cache correctness,
- means that an endpoint might need to buffer the entire header block. Since there is no
- hard limit to the size of a header block, some endpoints could be forced commit a large
- amount of available memory for header fields.
-
-
- An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of
- limits that might apply on the size of header blocks. This setting is only advisory, so
- endpoints MAY choose to send header blocks that exceed this limit and risk having the
- request or response being treated as malformed. This setting specific to a connection,
- so any request or response could encounter a hop with a lower, unknown limit. An
- intermediary can attempt to avoid this problem by passing on values presented by
- different peers, but they are not obligated to do so.
-
-
- A server that receives a larger header block than it is willing to handle can send an
- HTTP 431 (Request Header Fields Too Large) status code . A
- client can discard responses that it cannot process. The header block MUST be processed
- to ensure a consistent connection state, unless the connection is closed.
-
-
-
-
-
-
- HTTP/2 enables greater use of compression for both header fields ( ) and entity bodies. Compression can allow an attacker to recover
- secret data when it is compressed in the same context as data under attacker control.
-
-
- There are demonstrable attacks on compression that exploit the characteristics of the web
- (e.g., ). The attacker induces multiple requests containing
- varying plaintext, observing the length of the resulting ciphertext in each, which
- reveals a shorter length when a guess about the secret is correct.
-
-
- Implementations communicating on a secure channel MUST NOT compress content that includes
- both confidential and attacker-controlled data unless separate compression dictionaries
- are used for each source of data. Compression MUST NOT be used if the source of data
- cannot be reliably determined. Generic stream compression, such as that provided by TLS
- MUST NOT be used with HTTP/2 ( ).
-
-
- Further considerations regarding the compression of header fields are described in .
-
-
-
-
-
- Padding within HTTP/2 is not intended as a replacement for general purpose padding, such
- as might be provided by TLS . Redundant padding could even be
- counterproductive. Correct application can depend on having specific knowledge of the
- data that is being padded.
-
-
- To mitigate attacks that rely on compression, disabling or limiting compression might be
- preferable to padding as a countermeasure.
-
-
- Padding can be used to obscure the exact size of frame content, and is provided to
- mitigate specific attacks within HTTP. For example, attacks where compressed content
- includes both attacker-controlled plaintext and secret data (see for example, ).
-
-
- Use of padding can result in less protection than might seem immediately obvious. At
- best, padding only makes it more difficult for an attacker to infer length information by
- increasing the number of frames an attacker has to observe. Incorrectly implemented
- padding schemes can be easily defeated. In particular, randomized padding with a
- predictable distribution provides very little protection; similarly, padding payloads to a
- fixed size exposes information as payload sizes cross the fixed size boundary, which could
- be possible if an attacker can control plaintext.
-
-
- Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding
- for HEADERS and PUSH_PROMISE frames. A valid reason for an
- intermediary to change the amount of padding of frames is to improve the protections that
- padding provides.
-
-
-
-
-
- Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions
- of a single client or server over time. This includes the value of settings, the manner
- in which flow control windows are managed, the way priorities are allocated to streams,
- timing of reactions to stimulus, and handling of any optional features.
-
-
- As far as this creates observable differences in behavior, they could be used as a basis
- for fingerprinting a specific client, as defined in .
-
-
-
-
-
-
- A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation
- (ALPN) Protocol IDs" registry established in .
-
-
- This document establishes a registry for frame types, settings, and error codes. These new
- registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section.
-
-
- This document registers the HTTP2-Settings header field for
- use in HTTP; and the 421 (Misdirected Request) status code.
-
-
- This document registers the PRI method for use in HTTP, to avoid
- collisions with the connection preface .
-
-
-
-
- This document creates two registrations for the identification of HTTP/2 in the
- "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in .
-
-
- The "h2" string identifies HTTP/2 when used over TLS:
-
- HTTP/2 over TLS
- 0x68 0x32 ("h2")
- This document
-
-
-
- The "h2c" string identifies HTTP/2 when used over cleartext TCP:
-
- HTTP/2 over TCP
- 0x68 0x32 0x63 ("h2c")
- This document
-
-
-
-
-
-
- This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame
- Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under
- either of the "IETF Review" or "IESG Approval" policies for
- values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for
- experimental use.
-
-
- New entries in this registry require the following information:
-
-
- A name or label for the frame type.
-
-
- The 8-bit code assigned to the frame type.
-
-
- A reference to a specification that includes a description of the frame layout,
- it's semantics and flags that the frame type uses, including any parts of the frame
- that are conditionally present based on the value of flags.
-
-
-
-
- The entries in the following table are registered by this document.
-
-
- Frame Type
- Code
- Section
- DATA 0x0
- HEADERS 0x1
- PRIORITY 0x2
- RST_STREAM 0x3
- SETTINGS 0x4
- PUSH_PROMISE 0x5
- PING 0x6
- GOAWAY 0x7
- WINDOW_UPDATE 0x8
- CONTINUATION 0x9
-
-
-
-
-
- This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry
- manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to
- 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use.
-
-
- New registrations are advised to provide the following information:
-
-
- A symbolic name for the setting. Specifying a setting name is optional.
-
-
- The 16-bit code assigned to the setting.
-
-
- An initial value for the setting.
-
-
- An optional reference to a specification that describes the use of the setting.
-
-
-
-
- An initial set of setting registrations can be found in .
-
-
- Name
- Code
- Initial Value
- Specification
- HEADER_TABLE_SIZE
- 0x1 4096
- ENABLE_PUSH
- 0x2 1
- MAX_CONCURRENT_STREAMS
- 0x3 (infinite)
- INITIAL_WINDOW_SIZE
- 0x4 65535
- MAX_FRAME_SIZE
- 0x5 16384
- MAX_HEADER_LIST_SIZE
- 0x6 (infinite)
-
-
-
-
-
-
- This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code"
- registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the
- "Expert Review" policy .
-
-
- Registrations for error codes are required to include a description of the error code. An
- expert reviewer is advised to examine new registrations for possible duplication with
- existing error codes. Use of existing registrations is to be encouraged, but not
- mandated.
-
-
- New registrations are advised to provide the following information:
-
-
- A name for the error code. Specifying an error code name is optional.
-
-
- The 32-bit error code value.
-
-
- A brief description of the error code semantics, longer if no detailed specification
- is provided.
-
-
- An optional reference for a specification that defines the error code.
-
-
-
-
- The entries in the following table are registered by this document.
-
-
- Name
- Code
- Description
- Specification
- NO_ERROR 0x0
- Graceful shutdown
-
- PROTOCOL_ERROR 0x1
- Protocol error detected
-
- INTERNAL_ERROR 0x2
- Implementation fault
-
- FLOW_CONTROL_ERROR 0x3
- Flow control limits exceeded
-
- SETTINGS_TIMEOUT 0x4
- Settings not acknowledged
-
- STREAM_CLOSED 0x5
- Frame received for closed stream
-
- FRAME_SIZE_ERROR 0x6
- Frame size incorrect
-
- REFUSED_STREAM 0x7
- Stream not processed
-
- CANCEL 0x8
- Stream cancelled
-
- COMPRESSION_ERROR 0x9
- Compression state not updated
-
- CONNECT_ERROR 0xa
- TCP connection error for CONNECT method
-
- ENHANCE_YOUR_CALM 0xb
- Processing capacity exceeded
-
- INADEQUATE_SECURITY 0xc
- Negotiated TLS parameters not acceptable
-
-
-
-
-
-
-
- This section registers the HTTP2-Settings header field in the
- Permanent Message Header Field Registry .
-
-
- HTTP2-Settings
-
-
- http
-
-
- standard
-
-
- IETF
-
-
- of this document
-
-
- This header field is only used by an HTTP/2 client for Upgrade-based negotiation.
-
-
-
-
-
-
-
- This section registers the PRI method in the HTTP Method
- Registry ( ).
-
-
- PRI
-
-
- No
-
-
- No
-
-
- of this document
-
-
- This method is never used by an actual client. This method will appear to be used
- when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection
- preface.
-
-
-
-
-
-
-
- This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext
- Transfer Protocol (HTTP) Status Code Registry ( ).
-
-
-
-
- 421
-
-
- Misdirected Request
-
-
- of this document
-
-
-
-
-
-
-
-
-
- This document includes substantial input from the following individuals:
-
-
- Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin
- Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin
- Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY
- contributors).
-
-
- Gabriel Montenegro and Willy Tarreau (Upgrade mechanism).
-
-
- William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto
- Peon, Rob Trace (Flow control).
-
-
- Mike Bishop (Extensibility).
-
-
- Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan
- (Substantial editorial contributions).
-
-
- Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp.
-
-
- Alexey Melnikov was an editor of this document during 2013.
-
-
- A substantial proportion of Martin's contribution was supported by Microsoft during his
- employment there.
-
-
-
-
-
-
-
-
-
-
- HPACK - Header Compression for HTTP/2
-
-
-
-
-
-
-
-
-
-
-
- Transmission Control Protocol
-
-
- University of Southern California (USC)/Information Sciences
- Institute
-
-
-
-
-
-
-
-
-
-
- Key words for use in RFCs to Indicate Requirement Levels
-
-
- Harvard University
- sob@harvard.edu
-
-
-
-
-
-
-
-
-
-
- HTTP Over TLS
-
-
-
-
-
-
-
-
-
- Uniform Resource Identifier (URI): Generic
- Syntax
-
-
-
-
-
-
-
-
-
-
-
- The Base16, Base32, and Base64 Data Encodings
-
-
-
-
-
-
-
-
- Guidelines for Writing an IANA Considerations Section in RFCs
-
-
-
-
-
-
-
-
-
-
- Augmented BNF for Syntax Specifications: ABNF
-
-
-
-
-
-
-
-
-
-
- The Transport Layer Security (TLS) Protocol Version 1.2
-
-
-
-
-
-
-
-
-
-
- Transport Layer Security (TLS) Extensions: Extension Definitions
-
-
-
-
-
-
-
-
-
- Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension
-
-
-
-
-
-
-
-
-
-
-
-
- TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois
- Counter Mode (GCM)
-
-
-
-
-
-
-
-
-
-
- Digital Signature Standard (DSS)
-
- NIST
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Range Requests
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- World Wide Web Consortium
- ylafon@w3.org
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Caching
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- Akamai
- mnot@mnot.net
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Authentication
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
-
- HTTP State Management Mechanism
-
-
-
-
-
-
-
-
-
-
-
- TCP Extensions for High Performance
-
-
-
-
-
-
-
-
-
-
-
- Transport Layer Security Protocol Compression Methods
-
-
-
-
-
-
-
-
- Additional HTTP Status Codes
-
-
-
-
-
-
-
-
-
-
- Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- AES Galois Counter Mode (GCM) Cipher Suites for TLS
-
-
-
-
-
-
-
-
-
-
-
- HTML5
-
-
-
-
-
-
-
-
-
-
- Latest version available at
- .
-
-
-
-
-
-
- Talking to Yourself for Fun and Profit
-
-
-
-
-
-
-
-
-
-
-
-
-
- BREACH: Reviving the CRIME Attack
-
-
-
-
-
-
-
-
-
-
- Registration Procedures for Message Header Fields
-
- Nine by Nine
- GK-IETF@ninebynine.org
-
-
- BEA Systems
- mnot@pobox.com
-
-
- HP Labs
- JeffMogul@acm.org
-
-
-
-
-
-
-
-
-
- Recommendations for Secure Use of TLS and DTLS
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- HTTP Alternative Services
-
-
- Akamai
-
-
- Mozilla
-
-
- greenbytes
-
-
-
-
-
-
-
-
-
-
- This section is to be removed by RFC Editor before publication.
-
-
-
-
- Renamed Not Authoritative status code to Misdirected Request.
-
-
-
-
-
- Pseudo-header fields are now required to appear strictly before regular ones.
-
-
- Restored 1xx series status codes, except 101.
-
-
- Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting
- to limit the damage.
-
-
- Added a setting to advise peers of header set size limits.
-
-
- Removed segments.
-
-
- Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping.
-
-
-
-
-
- Restored extensibility options.
-
-
- Restricting TLS cipher suites to AEAD only.
-
-
- Removing Content-Encoding requirements.
-
-
- Permitting the use of PRIORITY after stream close.
-
-
- Removed ALTSVC frame.
-
-
- Removed BLOCKED frame.
-
-
- Reducing the maximum padding size to 256 octets; removing padding from
- CONTINUATION frames.
-
-
- Removed per-frame GZIP compression.
-
-
-
-
-
- Added BLOCKED frame (at risk).
-
-
- Simplified priority scheme.
-
-
- Added DATA per-frame GZIP compression.
-
-
-
-
-
- Changed "connection header" to "connection preface" to avoid confusion.
-
-
- Added dependency-based stream prioritization.
-
-
- Added "h2c" identifier to distinguish between cleartext and secured HTTP/2.
-
-
- Adding missing padding to PUSH_PROMISE .
-
-
- Integrate ALTSVC frame and supporting text.
-
-
- Dropping requirement on "deflate" Content-Encoding.
-
-
- Improving security considerations around use of compression.
-
-
-
-
-
- Adding padding for data frames.
-
-
- Renumbering frame types, error codes, and settings.
-
-
- Adding INADEQUATE_SECURITY error code.
-
-
- Updating TLS usage requirements to 1.2; forbidding TLS compression.
-
-
- Removing extensibility for frames and settings.
-
-
- Changing setting identifier size.
-
-
- Removing the ability to disable flow control.
-
-
- Changing the protocol identification token to "h2".
-
-
- Changing the use of :authority to make it optional and to allow userinfo in non-HTTP
- cases.
-
-
- Allowing split on 0x0 for Cookie.
-
-
- Reserved PRI method in HTTP/1.1 to avoid possible future collisions.
-
-
-
-
-
- Added cookie crumbling for more efficient header compression.
-
-
- Added header field ordering with the value-concatenation mechanism.
-
-
-
-
-
- Marked draft for implementation.
-
-
-
-
-
- Adding definition for CONNECT method.
-
-
- Constraining the use of push to safe, cacheable methods with no request body.
-
-
- Changing from :host to :authority to remove any potential confusion.
-
-
- Adding setting for header compression table size.
-
-
- Adding settings acknowledgement.
-
-
- Removing unnecessary and potentially problematic flags from CONTINUATION.
-
-
- Added denial of service considerations.
-
-
-
-
- Marking the draft ready for implementation.
-
-
- Renumbering END_PUSH_PROMISE flag.
-
-
- Editorial clarifications and changes.
-
-
-
-
-
- Added CONTINUATION frame for HEADERS and PUSH_PROMISE.
-
-
- PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is
- zero.
-
-
- Push expanded to allow all safe methods without a request body.
-
-
- Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1
- hop-by-hop header fields.
-
-
- Requiring that intermediaries not forward requests with missing or illegal routing
- :-headers.
-
-
- Clarified requirements around handling different frames after stream close, stream reset
- and GOAWAY .
-
-
- Added more specific prohibitions for sending of different frame types in various stream
- states.
-
-
- Making the last received setting value the effective value.
-
-
- Clarified requirements on TLS version, extension and ciphers.
-
-
-
-
-
- Committed major restructuring atrocities.
-
-
- Added reference to first header compression draft.
-
-
- Added more formal description of frame lifecycle.
-
-
- Moved END_STREAM (renamed from FINAL) back to HEADERS /DATA .
-
-
- Removed HEADERS+PRIORITY, added optional priority to HEADERS frame.
-
-
- Added PRIORITY frame.
-
-
-
-
-
- Added continuations to frames carrying header blocks.
-
-
- Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful
- concepts, like cookies.
-
-
- Removed "message".
-
-
- Switched to TLS ALPN from NPN.
-
-
- Editorial changes.
-
-
-
-
-
- Added IANA considerations section for frame types, error codes and settings.
-
-
- Removed data frame compression.
-
-
- Added PUSH_PROMISE .
-
-
- Added globally applicable flags to framing.
-
-
- Removed zlib-based header compression mechanism.
-
-
- Updated references.
-
-
- Clarified stream identifier reuse.
-
-
- Removed CREDENTIALS frame and associated mechanisms.
-
-
- Added advice against naive implementation of flow control.
-
-
- Added session header section.
-
-
- Restructured frame header. Removed distinction between data and control frames.
-
-
- Altered flow control properties to include session-level limits.
-
-
- Added note on cacheability of pushed resources and multiple tenant servers.
-
-
- Changed protocol label form based on discussions.
-
-
-
-
-
- Changed title throughout.
-
-
- Removed section on Incompatibilities with SPDY draft#2.
-
-
- Changed INTERNAL_ERROR on GOAWAY to have a value of 2 .
-
-
- Replaced abstract and introduction.
-
-
- Added section on starting HTTP/2.0, including upgrade mechanism.
-
-
- Removed unused references.
-
-
- Added flow control principles based on .
-
-
-
-
-
- Adopted as base for draft-ietf-httpbis-http2.
-
-
- Updated authors/editors list.
-
-
- Added status note.
-
-
-
-
-
-
-
diff --git a/http2/testsync.go b/http2/testsync.go
deleted file mode 100644
index 61075bd16d..0000000000
--- a/http2/testsync.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package http2
-
-import (
- "context"
- "sync"
- "time"
-)
-
-// testSyncHooks coordinates goroutines in tests.
-//
-// For example, a call to ClientConn.RoundTrip involves several goroutines, including:
-// - the goroutine running RoundTrip;
-// - the clientStream.doRequest goroutine, which writes the request; and
-// - the clientStream.readLoop goroutine, which reads the response.
-//
-// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines
-// are blocked waiting for some condition such as reading the Request.Body or waiting for
-// flow control to become available.
-//
-// The testSyncHooks also manage timers and synthetic time in tests.
-// This permits us to, for example, start a request and cause it to time out waiting for
-// response headers without resorting to time.Sleep calls.
-type testSyncHooks struct {
- // active/inactive act as a mutex and condition variable.
- //
- // - neither chan contains a value: testSyncHooks is locked.
- // - active contains a value: unlocked, and at least one goroutine is not blocked
- // - inactive contains a value: unlocked, and all goroutines are blocked
- active chan struct{}
- inactive chan struct{}
-
- // goroutine counts
- total int // total goroutines
- condwait map[*sync.Cond]int // blocked in sync.Cond.Wait
- blocked []*testBlockedGoroutine // otherwise blocked
-
- // fake time
- now time.Time
- timers []*fakeTimer
-
- // Transport testing: Report various events.
- newclientconn func(*ClientConn)
- newstream func(*clientStream)
-}
-
-// testBlockedGoroutine is a blocked goroutine.
-type testBlockedGoroutine struct {
- f func() bool // blocked until f returns true
- ch chan struct{} // closed when unblocked
-}
-
-func newTestSyncHooks() *testSyncHooks {
- h := &testSyncHooks{
- active: make(chan struct{}, 1),
- inactive: make(chan struct{}, 1),
- condwait: map[*sync.Cond]int{},
- }
- h.inactive <- struct{}{}
- h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
- return h
-}
-
-// lock acquires the testSyncHooks mutex.
-func (h *testSyncHooks) lock() {
- select {
- case <-h.active:
- case <-h.inactive:
- }
-}
-
-// waitInactive waits for all goroutines to become inactive.
-func (h *testSyncHooks) waitInactive() {
- for {
- <-h.inactive
- if !h.unlock() {
- break
- }
- }
-}
-
-// unlock releases the testSyncHooks mutex.
-// It reports whether any goroutines are active.
-func (h *testSyncHooks) unlock() (active bool) {
- // Look for a blocked goroutine which can be unblocked.
- blocked := h.blocked[:0]
- unblocked := false
- for _, b := range h.blocked {
- if !unblocked && b.f() {
- unblocked = true
- close(b.ch)
- } else {
- blocked = append(blocked, b)
- }
- }
- h.blocked = blocked
-
- // Count goroutines blocked on condition variables.
- condwait := 0
- for _, count := range h.condwait {
- condwait += count
- }
-
- if h.total > condwait+len(blocked) {
- h.active <- struct{}{}
- return true
- } else {
- h.inactive <- struct{}{}
- return false
- }
-}
-
-// goRun starts a new goroutine.
-func (h *testSyncHooks) goRun(f func()) {
- h.lock()
- h.total++
- h.unlock()
- go func() {
- defer func() {
- h.lock()
- h.total--
- h.unlock()
- }()
- f()
- }()
-}
-
-// blockUntil indicates that a goroutine is blocked waiting for some condition to become true.
-// It waits until f returns true before proceeding.
-//
-// Example usage:
-//
-// h.blockUntil(func() bool {
-// // Is the context done yet?
-// select {
-// case <-ctx.Done():
-// default:
-// return false
-// }
-// return true
-// })
-// // Wait for the context to become done.
-// <-ctx.Done()
-//
-// The function f passed to blockUntil must be non-blocking and idempotent.
-func (h *testSyncHooks) blockUntil(f func() bool) {
- if f() {
- return
- }
- ch := make(chan struct{})
- h.lock()
- h.blocked = append(h.blocked, &testBlockedGoroutine{
- f: f,
- ch: ch,
- })
- h.unlock()
- <-ch
-}
-
-// broadcast is sync.Cond.Broadcast.
-func (h *testSyncHooks) condBroadcast(cond *sync.Cond) {
- h.lock()
- delete(h.condwait, cond)
- h.unlock()
- cond.Broadcast()
-}
-
-// broadcast is sync.Cond.Wait.
-func (h *testSyncHooks) condWait(cond *sync.Cond) {
- h.lock()
- h.condwait[cond]++
- h.unlock()
-}
-
-// newTimer creates a new fake timer.
-func (h *testSyncHooks) newTimer(d time.Duration) timer {
- h.lock()
- defer h.unlock()
- t := &fakeTimer{
- hooks: h,
- when: h.now.Add(d),
- c: make(chan time.Time),
- }
- h.timers = append(h.timers, t)
- return t
-}
-
-// afterFunc creates a new fake AfterFunc timer.
-func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer {
- h.lock()
- defer h.unlock()
- t := &fakeTimer{
- hooks: h,
- when: h.now.Add(d),
- f: f,
- }
- h.timers = append(h.timers, t)
- return t
-}
-
-func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
- ctx, cancel := context.WithCancel(ctx)
- t := h.afterFunc(d, cancel)
- return ctx, func() {
- t.Stop()
- cancel()
- }
-}
-
-func (h *testSyncHooks) timeUntilEvent() time.Duration {
- h.lock()
- defer h.unlock()
- var next time.Time
- for _, t := range h.timers {
- if next.IsZero() || t.when.Before(next) {
- next = t.when
- }
- }
- if d := next.Sub(h.now); d > 0 {
- return d
- }
- return 0
-}
-
-// advance advances time and causes synthetic timers to fire.
-func (h *testSyncHooks) advance(d time.Duration) {
- h.lock()
- defer h.unlock()
- h.now = h.now.Add(d)
- timers := h.timers[:0]
- for _, t := range h.timers {
- t := t // remove after go.mod depends on go1.22
- t.mu.Lock()
- switch {
- case t.when.After(h.now):
- timers = append(timers, t)
- case t.when.IsZero():
- // stopped timer
- default:
- t.when = time.Time{}
- if t.c != nil {
- close(t.c)
- }
- if t.f != nil {
- h.total++
- go func() {
- defer func() {
- h.lock()
- h.total--
- h.unlock()
- }()
- t.f()
- }()
- }
- }
- t.mu.Unlock()
- }
- h.timers = timers
-}
-
-// A timer wraps a time.Timer, or a synthetic equivalent in tests.
-// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires.
-type timer interface {
- C() <-chan time.Time
- Stop() bool
- Reset(d time.Duration) bool
-}
-
-// timeTimer implements timer using real time.
-type timeTimer struct {
- t *time.Timer
- c chan time.Time
-}
-
-// newTimeTimer creates a new timer using real time.
-func newTimeTimer(d time.Duration) timer {
- ch := make(chan time.Time)
- t := time.AfterFunc(d, func() {
- close(ch)
- })
- return &timeTimer{t, ch}
-}
-
-// newTimeAfterFunc creates an AfterFunc timer using real time.
-func newTimeAfterFunc(d time.Duration, f func()) timer {
- return &timeTimer{
- t: time.AfterFunc(d, f),
- }
-}
-
-func (t timeTimer) C() <-chan time.Time { return t.c }
-func (t timeTimer) Stop() bool { return t.t.Stop() }
-func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) }
-
-// fakeTimer implements timer using fake time.
-type fakeTimer struct {
- hooks *testSyncHooks
-
- mu sync.Mutex
- when time.Time // when the timer will fire
- c chan time.Time // closed when the timer fires; mutually exclusive with f
- f func() // called when the timer fires; mutually exclusive with c
-}
-
-func (t *fakeTimer) C() <-chan time.Time { return t.c }
-
-func (t *fakeTimer) Stop() bool {
- t.mu.Lock()
- defer t.mu.Unlock()
- stopped := t.when.IsZero()
- t.when = time.Time{}
- return stopped
-}
-
-func (t *fakeTimer) Reset(d time.Duration) bool {
- if t.c != nil || t.f == nil {
- panic("fakeTimer only supports Reset on AfterFunc timers")
- }
- t.mu.Lock()
- defer t.mu.Unlock()
- t.hooks.lock()
- defer t.hooks.unlock()
- active := !t.when.IsZero()
- t.when = t.hooks.now.Add(d)
- if !active {
- t.hooks.timers = append(t.hooks.timers, t)
- }
- return active
-}
diff --git a/http2/timer.go b/http2/timer.go
new file mode 100644
index 0000000000..0b1c17b812
--- /dev/null
+++ b/http2/timer.go
@@ -0,0 +1,20 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package http2
+
+import "time"
+
+// A timer is a time.Timer, as an interface which can be replaced in tests.
+type timer = interface {
+ C() <-chan time.Time
+ Reset(d time.Duration) bool
+ Stop() bool
+}
+
+// timeTimer adapts a time.Timer to the timer interface.
+type timeTimer struct {
+ *time.Timer
+}
+
+func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/http2/transport.go b/http2/transport.go
index ce375c8c75..090d0e1bdb 100644
--- a/http2/transport.go
+++ b/http2/transport.go
@@ -25,7 +25,6 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
- "os"
"sort"
"strconv"
"strings"
@@ -185,42 +184,80 @@ type Transport struct {
connPoolOnce sync.Once
connPoolOrDef ClientConnPool // non-nil version of ConnPool
- syncHooks *testSyncHooks
+ *transportTestHooks
}
-func (t *Transport) maxHeaderListSize() uint32 {
- if t.MaxHeaderListSize == 0 {
- return 10 << 20
+// Hook points used for testing.
+// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations.
+// Inside tests, see the testSyncHooks function docs.
+
+type transportTestHooks struct {
+ newclientconn func(*ClientConn)
+ group synctestGroupInterface
+}
+
+func (t *Transport) markNewGoroutine() {
+ if t != nil && t.transportTestHooks != nil {
+ t.transportTestHooks.group.Join()
}
- if t.MaxHeaderListSize == 0xffffffff {
- return 0
+}
+
+func (t *Transport) now() time.Time {
+ if t != nil && t.transportTestHooks != nil {
+ return t.transportTestHooks.group.Now()
}
- return t.MaxHeaderListSize
+ return time.Now()
}
-func (t *Transport) maxFrameReadSize() uint32 {
- if t.MaxReadFrameSize == 0 {
- return 0 // use the default provided by the peer
+func (t *Transport) timeSince(when time.Time) time.Duration {
+ if t != nil && t.transportTestHooks != nil {
+ return t.now().Sub(when)
}
- if t.MaxReadFrameSize < minMaxFrameSize {
- return minMaxFrameSize
+ return time.Since(when)
+}
+
+// newTimer creates a new time.Timer, or a synthetic timer in tests.
+func (t *Transport) newTimer(d time.Duration) timer {
+ if t.transportTestHooks != nil {
+ return t.transportTestHooks.group.NewTimer(d)
}
- if t.MaxReadFrameSize > maxFrameSize {
- return maxFrameSize
+ return timeTimer{time.NewTimer(d)}
+}
+
+// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
+func (t *Transport) afterFunc(d time.Duration, f func()) timer {
+ if t.transportTestHooks != nil {
+ return t.transportTestHooks.group.AfterFunc(d, f)
}
- return t.MaxReadFrameSize
+ return timeTimer{time.AfterFunc(d, f)}
}
-func (t *Transport) disableCompression() bool {
- return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
+ if t.transportTestHooks != nil {
+ return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
+ }
+ return context.WithTimeout(ctx, d)
}
-func (t *Transport) pingTimeout() time.Duration {
- if t.PingTimeout == 0 {
- return 15 * time.Second
+func (t *Transport) maxHeaderListSize() uint32 {
+ n := int64(t.MaxHeaderListSize)
+ if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
+ n = t.t1.MaxResponseHeaderBytes
+ if n > 0 {
+ n = adjustHTTP1MaxHeaderSize(n)
+ }
}
- return t.PingTimeout
+ if n <= 0 {
+ return 10 << 20
+ }
+ if n >= 0xffffffff {
+ return 0
+ }
+ return uint32(n)
+}
+func (t *Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
@@ -258,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) {
if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
}
- upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
- addr := authorityAddr("https", authority)
+ upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper {
+ addr := authorityAddr(scheme, authority)
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
go c.Close()
return erringRoundTripper{err}
@@ -270,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) {
// was unknown)
go c.Close()
}
+ if scheme == "http" {
+ return (*unencryptedTransport)(t2)
+ }
return t2
}
- if m := t1.TLSNextProto; len(m) == 0 {
- t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
- "h2": upgradeFn,
+ if t1.TLSNextProto == nil {
+ t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper)
+ }
+ t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper {
+ return upgradeFn("https", authority, c)
+ }
+ // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
+ t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper {
+ nc, err := unencryptedNetConnFromTLSConn(c)
+ if err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
}
- } else {
- m["h2"] = upgradeFn
+ return upgradeFn("http", authority, nc)
}
return t2, nil
}
+// unencryptedTransport is a Transport with a RoundTrip method that
+// always permits http:// URLs.
+type unencryptedTransport Transport
+
+func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true})
+}
+
func (t *Transport) connPool() ClientConnPool {
t.connPoolOnce.Do(t.initConnPool)
return t.connPoolOrDef
@@ -301,7 +357,7 @@ type ClientConn struct {
t *Transport
tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls
- reused uint32 // whether conn is being reused; atomic
+ atomicReused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request
getConnCalled bool // used by clientConnPool
@@ -312,31 +368,54 @@ type ClientConn struct {
idleTimeout time.Duration // or 0 for never
idleTimer timer
- mu sync.Mutex // guards following
- cond *sync.Cond // hold mu; broadcast on flow/closed changes
- flow outflow // our conn-level flow control quota (cs.outflow is per stream)
- inflow inflow // peer's conn-level flow control
- doNotReuse bool // whether conn is marked to not be reused for any future requests
- closing bool
- closed bool
- seenSettings bool // true if we've seen a settings frame, false otherwise
- wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
- goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
- goAwayDebug string // goAway frame's debug data, retained as a string
- streams map[uint32]*clientStream // client-initiated
- streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
- nextStreamID uint32
- pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
- pings map[[8]byte]chan struct{} // in flight ping data to notification channel
- br *bufio.Reader
- lastActive time.Time
- lastIdle time.Time // time last idle
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow outflow // our conn-level flow control quota (cs.outflow is per stream)
+ inflow inflow // peer's conn-level flow control
+ doNotReuse bool // whether conn is marked to not be reused for any future requests
+ closing bool
+ closed bool
+ seenSettings bool // true if we've seen a settings frame, false otherwise
+ seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails
+ wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ goAwayDebug string // goAway frame's debug data, retained as a string
+ streams map[uint32]*clientStream // client-initiated
+ streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
+ nextStreamID uint32
+ pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
+ pings map[[8]byte]chan struct{} // in flight ping data to notification channel
+ br *bufio.Reader
+ lastActive time.Time
+ lastIdle time.Time // time last idle
// Settings from peer: (also guarded by wmu)
- maxFrameSize uint32
- maxConcurrentStreams uint32
- peerMaxHeaderListSize uint64
- peerMaxHeaderTableSize uint32
- initialWindowSize uint32
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ peerMaxHeaderTableSize uint32
+ initialWindowSize uint32
+ initialStreamRecvWindowSize int32
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
+ extendedConnectAllowed bool
+
+ // rstStreamPingsBlocked works around an unfortunate gRPC behavior.
+ // gRPC strictly limits the number of PING frames that it will receive.
+ // The default is two pings per two hours, but the limit resets every time
+ // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575.
+ //
+ // rstStreamPingsBlocked is set after receiving a response to a PING frame
+ // bundled with an RST_STREAM (see pendingResets below), and cleared after
+ // receiving a HEADERS or DATA frame.
+ rstStreamPingsBlocked bool
+
+ // pendingResets is the number of RST_STREAM frames we have sent to the peer,
+ // without confirming that the peer has received them. When we send a RST_STREAM,
+ // we bundle it with a PING frame, unless a PING is already in flight. We count
+ // the reset stream against the connection's concurrency limit until we get
+ // a PING response. This limits the number of requests we'll try to send to a
+ // completely unresponsive connection.
+ pendingResets int
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
@@ -352,60 +431,6 @@ type ClientConn struct {
werr error // first write error that has occurred
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
-
- syncHooks *testSyncHooks // can be nil
-}
-
-// Hook points used for testing.
-// Outside of tests, cc.syncHooks is nil and these all have minimal implementations.
-// Inside tests, see the testSyncHooks function docs.
-
-// goRun starts a new goroutine.
-func (cc *ClientConn) goRun(f func()) {
- if cc.syncHooks != nil {
- cc.syncHooks.goRun(f)
- return
- }
- go f()
-}
-
-// condBroadcast is cc.cond.Broadcast.
-func (cc *ClientConn) condBroadcast() {
- if cc.syncHooks != nil {
- cc.syncHooks.condBroadcast(cc.cond)
- }
- cc.cond.Broadcast()
-}
-
-// condWait is cc.cond.Wait.
-func (cc *ClientConn) condWait() {
- if cc.syncHooks != nil {
- cc.syncHooks.condWait(cc.cond)
- }
- cc.cond.Wait()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (cc *ClientConn) newTimer(d time.Duration) timer {
- if cc.syncHooks != nil {
- return cc.syncHooks.newTimer(d)
- }
- return newTimeTimer(d)
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer {
- if cc.syncHooks != nil {
- return cc.syncHooks.afterFunc(d, f)
- }
- return newTimeAfterFunc(d, f)
-}
-
-func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
- if cc.syncHooks != nil {
- return cc.syncHooks.contextWithTimeout(ctx, d)
- }
- return context.WithTimeout(ctx, d)
}
// clientStream is the state for a single HTTP/2 stream. One of these
@@ -448,12 +473,12 @@ type clientStream struct {
sentHeaders bool
// owned by clientConnReadLoop:
- firstByte bool // got the first response byte
- pastHeaders bool // got first MetaHeadersFrame (actual headers)
- pastTrailers bool // got optional second MetaHeadersFrame (trailers)
- num1xx uint8 // number of 1xx responses seen
- readClosed bool // peer sent an END_STREAM flag
- readAborted bool // read loop reset the stream
+ firstByte bool // got the first response byte
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+ readClosed bool // peer sent an END_STREAM flag
+ readAborted bool // read loop reset the stream
+ totalHeaderSize int64 // total size of 1xx headers seen
trailer http.Header // accumulated trailers
resTrailer *http.Header // client's Response.Trailer
@@ -487,7 +512,7 @@ func (cs *clientStream) abortStreamLocked(err error) {
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
if cs.cc.cond != nil {
// Wake up writeRequestBody if it is waiting on flow control.
- cs.cc.condBroadcast()
+ cs.cc.cond.Broadcast()
}
}
@@ -497,7 +522,7 @@ func (cs *clientStream) abortRequestBodyWrite() {
defer cc.mu.Unlock()
if cs.reqBody != nil && cs.reqBodyClosed == nil {
cs.closeReqBodyLocked()
- cc.condBroadcast()
+ cc.cond.Broadcast()
}
}
@@ -507,13 +532,15 @@ func (cs *clientStream) closeReqBodyLocked() {
}
cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed
- cs.cc.goRun(func() {
+ go func() {
+ cs.cc.t.markNewGoroutine()
cs.reqBody.Close()
close(reqBodyClosed)
- })
+ }()
}
type stickyErrWriter struct {
+ group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@@ -523,22 +550,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
- for {
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
- }
- nn, err := sew.conn.Write(p[n:])
- n += nn
- if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
- // Keep extending the deadline so long as we're making progress.
- continue
- }
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Time{})
- }
- *sew.err = err
- return n, err
- }
+ n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ *sew.err = err
+ return n, err
}
// noCachedConnError is the concrete type of ErrNoCachedConn, which
@@ -569,6 +583,8 @@ type RoundTripOpt struct {
// no cached connection is available, RoundTripOpt
// will return ErrNoCachedConn.
OnlyCachedConn bool
+
+ allowHTTP bool // allow http:// URLs
}
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
@@ -601,7 +617,14 @@ func authorityAddr(scheme string, authority string) (addr string) {
// RoundTripOpt is like RoundTrip, but takes options.
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
- if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
+ switch req.URL.Scheme {
+ case "https":
+ // Always okay.
+ case "http":
+ if !t.AllowHTTP && !opt.allowHTTP {
+ return nil, errors.New("http2: unencrypted HTTP/2 not enabled")
+ }
+ default:
return nil, errors.New("http2: unsupported scheme")
}
@@ -612,7 +635,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
return nil, err
}
- reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
+ reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1)
traceGotConn(req, cc, reused)
res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
@@ -626,21 +649,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff)
- var tm timer
- if t.syncHooks != nil {
- tm = t.syncHooks.newTimer(d)
- t.syncHooks.blockUntil(func() bool {
- select {
- case <-tm.C():
- case <-req.Context().Done():
- default:
- return false
- }
- return true
- })
- } else {
- tm = newTimeTimer(d)
- }
+ tm := t.newTimer(d)
select {
case <-tm.C():
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
@@ -651,6 +660,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
}
}
}
+ if err == errClientConnNotEstablished {
+ // This ClientConn was created recently,
+ // this is the first request to use it,
+ // and the connection is closed and not usable.
+ //
+ // In this state, cc.idleTimer will remove the conn from the pool
+ // when it fires. Stop the timer and remove it here so future requests
+ // won't try to use this connection.
+ //
+ // If the timer has already fired and we're racing it, the redundant
+ // call to MarkDead is harmless.
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+ t.connPool().MarkDead(cc)
+ }
if err != nil {
t.vlogf("RoundTrip failure: %v", err)
return nil, err
@@ -669,9 +694,10 @@ func (t *Transport) CloseIdleConnections() {
}
var (
- errClientConnClosed = errors.New("http2: client conn is closed")
- errClientConnUnusable = errors.New("http2: client conn not usable")
- errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+ errClientConnNotEstablished = errors.New("http2: client conn could not be established")
+ errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
)
// shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -725,8 +751,8 @@ func canRetryError(err error) bool {
}
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
- if t.syncHooks != nil {
- return t.newClientConn(nil, singleUse, t.syncHooks)
+ if t.transportTestHooks != nil {
+ return t.newClientConn(nil, singleUse)
}
host, _, err := net.SplitHostPort(addr)
if err != nil {
@@ -736,7 +762,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
if err != nil {
return nil, err
}
- return t.newClientConn(tconn, singleUse, nil)
+ return t.newClientConn(tconn, singleUse)
}
func (t *Transport) newTLSConfig(host string) *tls.Config {
@@ -787,48 +813,38 @@ func (t *Transport) expectContinueTimeout() time.Duration {
return t.t1.ExpectContinueTimeout
}
-func (t *Transport) maxDecoderHeaderTableSize() uint32 {
- if v := t.MaxDecoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-func (t *Transport) maxEncoderHeaderTableSize() uint32 {
- if v := t.MaxEncoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- return t.newClientConn(c, t.disableKeepAlives(), nil)
+ return t.newClientConn(c, t.disableKeepAlives())
}
-func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) {
+func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+ conf := configFromTransport(t)
cc := &ClientConn{
- t: t,
- tconn: c,
- readerDone: make(chan struct{}),
- nextStreamID: 1,
- maxFrameSize: 16 << 10, // spec default
- initialWindowSize: 65535, // spec default
- maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
- streams: make(map[uint32]*clientStream),
- singleUse: singleUse,
- wantSettingsAck: true,
- pings: make(map[[8]byte]chan struct{}),
- reqHeaderMu: make(chan struct{}, 1),
- syncHooks: hooks,
- }
- if hooks != nil {
- hooks.newclientconn(cc)
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
+ maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ streams: make(map[uint32]*clientStream),
+ singleUse: singleUse,
+ seenSettingsChan: make(chan struct{}),
+ wantSettingsAck: true,
+ readIdleTimeout: conf.SendPingTimeout,
+ pingTimeout: conf.PingTimeout,
+ pings: make(map[[8]byte]chan struct{}),
+ reqHeaderMu: make(chan struct{}, 1),
+ lastActive: t.now(),
+ }
+ var group synctestGroupInterface
+ if t.transportTestHooks != nil {
+ t.markNewGoroutine()
+ t.transportTestHooks.newclientconn(cc)
c = cc.tconn
- }
- if d := t.idleConnTimeout(); d != 0 {
- cc.idleTimeout = d
- cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout)
+ group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -840,30 +856,25 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
+ group: group,
conn: c,
- timeout: t.WriteByteTimeout,
+ timeout: conf.WriteByteTimeout,
err: &cc.werr,
})
cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br)
- if t.maxFrameReadSize() != 0 {
- cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
- }
+ cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
if t.CountError != nil {
cc.fr.countError = t.CountError
}
- maxHeaderTableSize := t.maxDecoderHeaderTableSize()
+ maxHeaderTableSize := conf.MaxDecoderHeaderTableSize
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
cc.henc = hpack.NewEncoder(&cc.hbuf)
- cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
+ cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
cc.peerMaxHeaderTableSize = initialHeaderTableSize
- if t.AllowHTTP {
- cc.nextStreamID = 3
- }
-
if cs, ok := c.(connectionStater); ok {
state := cs.ConnectionState()
cc.tlsState = &state
@@ -871,11 +882,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo
initialSettings := []Setting{
{ID: SettingEnablePush, Val: 0},
- {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
- }
- if max := t.maxFrameReadSize(); max != 0 {
- initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
+ {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)},
}
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
}
@@ -885,23 +894,29 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...)
- cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
- cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
+ cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection))
+ cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize)
cc.bw.Flush()
if cc.werr != nil {
cc.Close()
return nil, cc.werr
}
- cc.goRun(cc.readLoop)
+ // Start the idle timer after the connection is fully initialized.
+ if d := t.idleConnTimeout(); d != 0 {
+ cc.idleTimeout = d
+ cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+ }
+
+ go cc.readLoop()
return cc, nil
}
func (cc *ClientConn) healthCheck() {
- pingTimeout := cc.t.pingTimeout()
+ pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
- ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout)
+ ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
defer cancel()
cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
@@ -936,7 +951,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
}
last := f.LastStreamID
for streamID, cs := range cc.streams {
- if streamID > last {
+ if streamID <= last {
+ // The server's GOAWAY indicates that it received this stream.
+ // It will either finish processing it, or close the connection
+ // without doing so. Either way, leave the stream alone for now.
+ continue
+ }
+ if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo {
+ // Don't retry the first stream on a connection if we get a non-NO error.
+ // If the server is sending an error on a new connection,
+ // retrying the request on a new one probably isn't going to work.
+ cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode))
+ } else {
+ // Aborting the stream with errClentConnGotGoAway indicates that
+ // the request should be retried on a new connection.
cs.abortStreamLocked(errClientConnGotGoAway)
}
}
@@ -1013,7 +1041,7 @@ func (cc *ClientConn) State() ClientConnState {
return ClientConnState{
Closed: cc.closed,
Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil,
- StreamsActive: len(cc.streams),
+ StreamsActive: len(cc.streams) + cc.pendingResets,
StreamsReserved: cc.streamsReserved,
StreamsPending: cc.pendingRequests,
LastIdle: cc.lastIdle,
@@ -1045,16 +1073,38 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
// writing it.
maxConcurrentOkay = true
} else {
- maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams)
+ // We can take a new request if the total of
+ // - active streams;
+ // - reservation slots for new streams; and
+ // - streams for which we have sent a RST_STREAM and a PING,
+ // but received no subsequent frame
+ // is less than the concurrency limit.
+ maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams)
}
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
!cc.doNotReuse &&
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
!cc.tooIdleLocked()
+
+ // If this connection has never been used for a request and is closed,
+ // then let it take a request (which will fail).
+ //
+ // This avoids a situation where an error early in a connection's lifetime
+ // goes unreported.
+ if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed {
+ st.canTakeNewRequest = true
+ }
+
return
}
+// currentRequestCountLocked reports the number of concurrency slots currently in use,
+// including active streams, reserved slots, and reset streams waiting for acknowledgement.
+func (cc *ClientConn) currentRequestCountLocked() int {
+ return len(cc.streams) + cc.streamsReserved + cc.pendingResets
+}
+
func (cc *ClientConn) canTakeNewRequestLocked() bool {
st := cc.idleStateLocked()
return st.canTakeNewRequest
@@ -1067,7 +1117,7 @@ func (cc *ClientConn) tooIdleLocked() bool {
// times are compared based on their wall time. We don't want
// to reuse a connection that's been sitting idle during
// VM/laptop suspend if monotonic time was also frozen.
- return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1131,7 +1181,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
// Wait for all in-flight streams to complete or connection to close
done := make(chan struct{})
cancelled := false // guarded by cc.mu
- cc.goRun(func() {
+ go func() {
+ cc.t.markNewGoroutine()
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1143,9 +1194,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
if cancelled {
break
}
- cc.condWait()
+ cc.cond.Wait()
}
- })
+ }()
shutdownEnterWaitStateHook()
select {
case <-done:
@@ -1155,7 +1206,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
cc.mu.Lock()
// Free the goroutine above
cancelled = true
- cc.condBroadcast()
+ cc.cond.Broadcast()
cc.mu.Unlock()
return ctx.Err()
}
@@ -1193,7 +1244,7 @@ func (cc *ClientConn) closeForError(err error) {
for _, cs := range cc.streams {
cs.abortStreamLocked(err)
}
- cc.condBroadcast()
+ cc.cond.Broadcast()
cc.mu.Unlock()
cc.closeConn()
}
@@ -1308,23 +1359,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
respHeaderRecv: make(chan struct{}),
donec: make(chan struct{}),
}
- cc.goRun(func() {
- cs.doRequest(req)
- })
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ !cs.isHead {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: https://zlib.net/zlib_faq.html#faq39
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ cs.requestedGzip = true
+ }
+
+ go cs.doRequest(req, streamf)
waitDone := func() error {
- if cc.syncHooks != nil {
- cc.syncHooks.blockUntil(func() bool {
- select {
- case <-cs.donec:
- case <-ctx.Done():
- case <-cs.reqCancel:
- default:
- return false
- }
- return true
- })
- }
select {
case <-cs.donec:
return nil
@@ -1385,24 +1443,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
return err
}
- if streamf != nil {
- streamf(cs)
- }
-
for {
- if cc.syncHooks != nil {
- cc.syncHooks.blockUntil(func() bool {
- select {
- case <-cs.respHeaderRecv:
- case <-cs.abort:
- case <-ctx.Done():
- case <-cs.reqCancel:
- default:
- return false
- }
- return true
- })
- }
select {
case <-cs.respHeaderRecv:
return handleResponseHeaders()
@@ -1432,11 +1473,14 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
// doRequest runs for the duration of the request lifetime.
//
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
-func (cs *clientStream) doRequest(req *http.Request) {
- err := cs.writeRequest(req)
+func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
+ cs.cc.t.markNewGoroutine()
+ err := cs.writeRequest(req, streamf)
cs.cleanupWriteRequest(err)
}
+var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer")
+
// writeRequest sends a request.
//
// It returns nil after the request is written, the response read,
@@ -1444,7 +1488,7 @@ func (cs *clientStream) doRequest(req *http.Request) {
//
// It returns non-nil if the request ends otherwise.
// If the returned error is StreamError, the error Code may be used in resetting the stream.
-func (cs *clientStream) writeRequest(req *http.Request) (err error) {
+func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) {
cc := cs.cc
ctx := cs.ctx
@@ -1452,26 +1496,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
return err
}
+ // wait for setting frames to be received, a server can change this value later,
+ // but we just wait for the first settings frame
+ var isExtendedConnect bool
+ if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" {
+ isExtendedConnect = true
+ }
+
// Acquire the new-request lock by writing to reqHeaderMu.
// This lock guards the critical section covering allocating a new stream ID
// (requires mu) and creating the stream (requires wmu).
if cc.reqHeaderMu == nil {
panic("RoundTrip on uninitialized ClientConn") // for tests
}
- var newStreamHook func(*clientStream)
- if cc.syncHooks != nil {
- newStreamHook = cc.syncHooks.newstream
- cc.syncHooks.blockUntil(func() bool {
- select {
- case cc.reqHeaderMu <- struct{}{}:
- <-cc.reqHeaderMu
- case <-cs.reqCancel:
- case <-ctx.Done():
- default:
- return false
+ if isExtendedConnect {
+ select {
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cc.seenSettingsChan:
+ if !cc.extendedConnectAllowed {
+ return errExtendedConnectNotSupported
}
- return true
- })
+ }
}
select {
case cc.reqHeaderMu <- struct{}{}:
@@ -1497,28 +1545,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
}
cc.mu.Unlock()
- if newStreamHook != nil {
- newStreamHook(cs)
- }
-
- // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
- if !cc.t.disableCompression() &&
- req.Header.Get("Accept-Encoding") == "" &&
- req.Header.Get("Range") == "" &&
- !cs.isHead {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: https://zlib.net/zlib_faq.html#faq39
- //
- // Note that we don't request this for HEAD requests,
- // due to a bug in nginx:
- // http://trac.nginx.org/nginx/ticket/358
- // https://golang.org/issue/5522
- //
- // We don't request gzip if the request is for a range, since
- // auto-decoding a portion of a gzipped document will just fail
- // anyway. See https://golang.org/issue/8923
- cs.requestedGzip = true
+ if streamf != nil {
+ streamf(cs)
}
continueTimeout := cc.t.expectContinueTimeout()
@@ -1581,7 +1609,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 {
- timer := cc.newTimer(d)
+ timer := cc.t.newTimer(d)
defer timer.Stop()
respHeaderTimer = timer.C()
respHeaderRecv = cs.respHeaderRecv
@@ -1590,21 +1618,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
// or until the request is aborted (via context, error, or otherwise),
// whichever comes first.
for {
- if cc.syncHooks != nil {
- cc.syncHooks.blockUntil(func() bool {
- select {
- case <-cs.peerClosed:
- case <-respHeaderTimer:
- case <-respHeaderRecv:
- case <-cs.abort:
- case <-ctx.Done():
- case <-cs.reqCancel:
- default:
- return false
- }
- return true
- })
- }
select {
case <-cs.peerClosed:
return nil
@@ -1689,6 +1702,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
cs.reqBodyClosed = make(chan struct{})
}
bodyClosed := cs.reqBodyClosed
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
cc.mu.Unlock()
if mustCloseBody {
cs.reqBody.Close()
@@ -1713,16 +1727,44 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
if cs.sentHeaders {
if se, ok := err.(StreamError); ok {
if se.Cause != errFromPeer {
- cc.writeStreamReset(cs.ID, se.Code, err)
+ cc.writeStreamReset(cs.ID, se.Code, false, err)
}
} else {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
+ // We're cancelling an in-flight request.
+ //
+ // This could be due to the server becoming unresponsive.
+ // To avoid sending too many requests on a dead connection,
+ // we let the request continue to consume a concurrency slot
+ // until we can confirm the server is still responding.
+ // We do this by sending a PING frame along with the RST_STREAM
+ // (unless a ping is already in flight).
+ //
+ // For simplicity, we don't bother tracking the PING payload:
+ // We reset cc.pendingResets any time we receive a PING ACK.
+ //
+ // We skip this if the conn is going to be closed on idle,
+ // because it's short lived and will probably be closed before
+ // we get the ping response.
+ ping := false
+ if !closeOnIdle {
+ cc.mu.Lock()
+ // rstStreamPingsBlocked works around a gRPC behavior:
+ // see comment on the field for details.
+ if !cc.rstStreamPingsBlocked {
+ if cc.pendingResets == 0 {
+ ping = true
+ }
+ cc.pendingResets++
+ }
+ cc.mu.Unlock()
+ }
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err)
}
}
cs.bufPipe.CloseWithError(err) // no-op if already closed
} else {
if cs.sentHeaders && !cs.sentEndStream {
- cc.writeStreamReset(cs.ID, ErrCodeNo, nil)
+ cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil)
}
cs.bufPipe.CloseWithError(errRequestCanceled)
}
@@ -1744,16 +1786,21 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
// Must hold cc.mu.
func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
for {
- cc.lastActive = time.Now()
+ if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 {
+ // This is the very first request sent to this connection.
+ // Return a fatal error which aborts the retry loop.
+ return errClientConnNotEstablished
+ }
+ cc.lastActive = cc.t.now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
cc.lastIdle = time.Time{}
- if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) {
+ if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) {
return nil
}
cc.pendingRequests++
- cc.condWait()
+ cc.cond.Wait()
cc.pendingRequests--
select {
case <-cs.abort:
@@ -2015,13 +2062,13 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
cs.flow.take(take)
return take, nil
}
- cc.condWait()
+ cc.cond.Wait()
}
}
func validateHeaders(hdrs http.Header) string {
for k, vv := range hdrs {
- if !httpguts.ValidHeaderFieldName(k) {
+ if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
return fmt.Sprintf("name %q", k)
}
for _, v := range vv {
@@ -2037,6 +2084,10 @@ func validateHeaders(hdrs http.Header) string {
var errNilRequestURL = errors.New("http2: Request.URI is nil")
+func isNormalConnect(req *http.Request) bool {
+ return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
+}
+
// requires cc.wmu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
@@ -2057,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
}
var path string
- if req.Method != "CONNECT" {
+ if !isNormalConnect(req) {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
@@ -2094,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
m = http.MethodGet
}
f(":method", m)
- if req.Method != "CONNECT" {
+ if !isNormalConnect(req) {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
@@ -2275,7 +2326,7 @@ type resAndError struct {
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
- cs.inflow.init(transportDefaultStreamFlow)
+ cs.inflow.init(cc.initialStreamRecvWindowSize)
cs.ID = cc.nextStreamID
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
@@ -2291,14 +2342,14 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
if len(cc.streams) != slen-1 {
panic("forgetting unknown stream id")
}
- cc.lastActive = time.Now()
+ cc.lastActive = cc.t.now()
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = time.Now()
+ cc.lastIdle = cc.t.now()
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
- cc.condBroadcast()
+ cc.cond.Broadcast()
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
@@ -2320,6 +2371,7 @@ type clientConnReadLoop struct {
// readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() {
+ cc.t.markNewGoroutine()
rl := &clientConnReadLoop{cc: cc}
defer rl.cleanup()
cc.readerErr = rl.run()
@@ -2353,7 +2405,6 @@ func isEOFOrNetReadError(err error) bool {
func (rl *clientConnReadLoop) cleanup() {
cc := rl.cc
- cc.t.connPool().MarkDead(cc)
defer cc.closeConn()
defer close(cc.readerDone)
@@ -2377,6 +2428,24 @@ func (rl *clientConnReadLoop) cleanup() {
}
cc.closed = true
+ // If the connection has never been used, and has been open for only a short time,
+ // leave it in the connection pool for a little while.
+ //
+ // This avoids a situation where new connections are constantly created,
+ // added to the pool, fail, and are removed from the pool, without any error
+ // being surfaced to the user.
+ const unusedWaitTime = 5 * time.Second
+ idleTime := cc.t.now().Sub(cc.lastActive)
+ if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
+ cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+ cc.t.connPool().MarkDead(cc)
+ })
+ } else {
+ cc.mu.Unlock() // avoid any deadlocks in MarkDead
+ cc.t.connPool().MarkDead(cc)
+ cc.mu.Lock()
+ }
+
for _, cs := range cc.streams {
select {
case <-cs.peerClosed:
@@ -2386,7 +2455,7 @@ func (rl *clientConnReadLoop) cleanup() {
cs.abortStreamLocked(err)
}
}
- cc.condBroadcast()
+ cc.cond.Broadcast()
cc.mu.Unlock()
}
@@ -2420,10 +2489,10 @@ func (cc *ClientConn) countReadFrameError(err error) {
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
- readIdleTimeout := cc.t.ReadIdleTimeout
+ readIdleTimeout := cc.readIdleTimeout
var t timer
if readIdleTimeout != 0 {
- t = cc.afterFunc(readIdleTimeout, cc.healthCheck)
+ t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
}
for {
f, err := cc.fr.ReadFrame()
@@ -2434,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
if se, ok := err.(StreamError); ok {
- if cs := rl.streamByID(se.StreamID); cs != nil {
+ if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil {
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
@@ -2480,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error {
if VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
}
+ if !cc.seenSettings {
+ close(cc.seenSettingsChan)
+ }
return err
}
}
}
func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, headerOrDataFrame)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
@@ -2604,15 +2676,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
if f.StreamEnded() {
return nil, errors.New("1xx informational response with END_STREAM flag")
}
- cs.num1xx++
- const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
- if cs.num1xx > max1xxResponses {
- return nil, errors.New("http2: too many 1xx informational responses")
- }
if fn := cs.get1xxTraceFunc(); fn != nil {
+ // If the 1xx response is being delivered to the user,
+ // then they're responsible for limiting the number
+ // of responses.
if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
return nil, err
}
+ } else {
+ // If the user didn't examine the 1xx response, then we
+ // limit the size of all 1xx headers.
+ //
+ // This differs a bit from the HTTP/1 implementation, which
+ // limits the size of all 1xx headers plus the final response.
+ // Use the larger limit of MaxHeaderListSize and
+ // net/http.Transport.MaxResponseHeaderBytes.
+ limit := int64(cs.cc.t.maxHeaderListSize())
+ if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit {
+ limit = t1.MaxResponseHeaderBytes
+ }
+ for _, h := range f.Fields {
+ cs.totalHeaderSize += int64(h.Size())
+ }
+ if cs.totalHeaderSize > limit {
+ if VerboseLogs {
+ log.Printf("http2: 1xx informational responses too large")
+ }
+ return nil, errors.New("header list too large")
+ }
}
if statusCode == 100 {
traceGot100Continue(cs.trace)
@@ -2796,7 +2887,7 @@ func (b transportResponseBody) Close() error {
func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc := rl.cc
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, headerOrDataFrame)
data := f.Data()
if cs == nil {
cc.mu.Lock()
@@ -2931,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
cs.abortStream(err)
}
-func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream {
+// Constants passed to streamByID for documentation purposes.
+const (
+ headerOrDataFrame = true
+ notHeaderOrDataFrame = false
+)
+
+// streamByID returns the stream with the given id, or nil if no stream has that id.
+// If headerOrData is true, it clears rst.StreamPingsBlocked.
+func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream {
rl.cc.mu.Lock()
defer rl.cc.mu.Unlock()
+ if headerOrData {
+ // Work around an unfortunate gRPC behavior.
+ // See comment on ClientConn.rstStreamPingsBlocked for details.
+ rl.cc.rstStreamPingsBlocked = false
+ }
cs := rl.cc.streams[id]
if cs != nil && !cs.readAborted {
return cs
@@ -3021,12 +3125,27 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
for _, cs := range cc.streams {
cs.flow.add(delta)
}
- cc.condBroadcast()
+ cc.cond.Broadcast()
cc.initialWindowSize = s.Val
case SettingHeaderTableSize:
cc.henc.SetMaxDynamicTableSize(s.Val)
cc.peerMaxHeaderTableSize = s.Val
+ case SettingEnableConnectProtocol:
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL,
+ // we require that it do so in the first SETTINGS frame.
+ //
+ // When we attempt to use extended CONNECT, we wait for the first
+ // SETTINGS frame to see if the server supports it. If we let the
+ // server enable the feature with a later SETTINGS frame, then
+ // users will see inconsistent results depending on whether we've
+ // seen that frame or not.
+ if !cc.seenSettings {
+ cc.extendedConnectAllowed = s.Val == 1
+ }
default:
cc.vlogf("Unhandled Setting: %v", s)
}
@@ -3044,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
// connection can establish to our default.
cc.maxConcurrentStreams = defaultMaxConcurrentStreams
}
+ close(cc.seenSettingsChan)
cc.seenSettings = true
}
@@ -3052,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
cc := rl.cc
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame)
if f.StreamID != 0 && cs == nil {
return nil
}
@@ -3076,12 +3196,12 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
return ConnectionError(ErrCodeFlowControl)
}
- cc.condBroadcast()
+ cc.cond.Broadcast()
return nil
}
func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame)
if cs == nil {
// TODO: return error if server tries to RST_STREAM an idle stream
return nil
@@ -3120,7 +3240,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
}
var pingError error
errc := make(chan struct{})
- cc.goRun(func() {
+ go func() {
+ cc.t.markNewGoroutine()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if pingError = cc.fr.WritePing(false, p); pingError != nil {
@@ -3131,20 +3252,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
close(errc)
return
}
- })
- if cc.syncHooks != nil {
- cc.syncHooks.blockUntil(func() bool {
- select {
- case <-c:
- case <-errc:
- case <-ctx.Done():
- case <-cc.readerDone:
- default:
- return false
- }
- return true
- })
- }
+ }()
select {
case <-c:
return nil
@@ -3168,6 +3276,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
close(c)
delete(cc.pings, f.Data)
}
+ if cc.pendingResets > 0 {
+ // See clientStream.cleanupWriteRequest.
+ cc.pendingResets = 0
+ cc.rstStreamPingsBlocked = true
+ cc.cond.Broadcast()
+ }
return nil
}
cc := rl.cc
@@ -3190,13 +3304,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
return ConnectionError(ErrCodeProtocol)
}
-func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+// writeStreamReset sends a RST_STREAM frame.
+// When ping is true, it also sends a PING frame with a random payload.
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) {
// TODO: map err to more interesting error codes, once the
// HTTP community comes up with some. But currently for
// RST_STREAM there's no equivalent to GOAWAY frame's debug
// data, and the error codes are all pretty vague ("cancel").
cc.wmu.Lock()
cc.fr.WriteRSTStream(streamID, code)
+ if ping {
+ var payload [8]byte
+ rand.Read(payload[:])
+ cc.fr.WritePing(false, payload)
+ }
cc.bw.Flush()
cc.wmu.Unlock()
}
@@ -3350,7 +3471,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = time.Since(cc.lastActive)
+ ci.IdleTime = cc.t.timeSince(cc.lastActive)
}
cc.mu.Unlock()
diff --git a/http2/transport_test.go b/http2/transport_test.go
index 11ff67b4c8..0e12e0f1c7 100644
--- a/http2/transport_test.go
+++ b/http2/transport_test.go
@@ -16,7 +16,6 @@ import (
"fmt"
"io"
"io/fs"
- "io/ioutil"
"log"
"math/rand"
"net"
@@ -152,7 +151,7 @@ func TestIdleConnTimeout(t *testing.T) {
}
// Respond to the client's request.
- hf := testClientConnReadFrame[*MetaHeadersFrame](tc)
+ hf := readFrame[*HeadersFrame](t, tc)
tc.writeHeaders(HeadersFrameParam{
StreamID: hf.StreamID,
EndHeaders: true,
@@ -169,7 +168,7 @@ func TestIdleConnTimeout(t *testing.T) {
}
tt.advance(test.wait)
- if got, want := tc.netConnClosed, test.wantNewConn; got != want {
+ if got, want := tc.isClosed(), test.wantNewConn; got != want {
t.Fatalf("after waiting %v, conn closed=%v; want %v", test.wait, got, want)
}
}
@@ -206,7 +205,7 @@ func TestTransportH2c(t *testing.T) {
if res.ProtoMajor != 2 {
t.Fatal("proto not h2c")
}
- body, err := ioutil.ReadAll(res.Body)
+ body, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
@@ -220,15 +219,14 @@ func TestTransportH2c(t *testing.T) {
func TestTransport(t *testing.T) {
const body = "sup"
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, body)
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- u, err := url.Parse(st.ts.URL)
+ u, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -264,7 +262,7 @@ func TestTransport(t *testing.T) {
if res.TLS == nil {
t.Errorf("%d: Response.TLS = nil; want non-nil", i)
}
- slurp, err := ioutil.ReadAll(res.Body)
+ slurp, err := io.ReadAll(res.Body)
if err != nil {
t.Errorf("%d: Body read: %v", i, err)
} else if string(slurp) != body {
@@ -275,26 +273,27 @@ func TestTransport(t *testing.T) {
}
func testTransportReusesConns(t *testing.T, useClient, wantSame bool, modReq func(*http.Request)) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, r.RemoteAddr)
- }, optOnlyServer, func(c net.Conn, st http.ConnState) {
- t.Logf("conn %v is now state %v", c.RemoteAddr(), st)
+ }, func(ts *httptest.Server) {
+ ts.Config.ConnState = func(c net.Conn, st http.ConnState) {
+ t.Logf("conn %v is now state %v", c.RemoteAddr(), st)
+ }
})
- defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
if useClient {
tr.ConnPool = noDialClientConnPool{new(clientConnPool)}
}
defer tr.CloseIdleConnections()
get := func() string {
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
modReq(req)
var res *http.Response
if useClient {
- c := st.ts.Client()
+ c := ts.Client()
ConfigureTransports(c.Transport.(*http.Transport))
res, err = c.Do(req)
} else {
@@ -304,7 +303,7 @@ func testTransportReusesConns(t *testing.T, useClient, wantSame bool, modReq fun
t.Fatal(err)
}
defer res.Body.Close()
- slurp, err := ioutil.ReadAll(res.Body)
+ slurp, err := io.ReadAll(res.Body)
if err != nil {
t.Fatalf("Body read: %v", err)
}
@@ -358,15 +357,12 @@ func TestTransportGetGotConnHooks_HTTP2Transport(t *testing.T) {
func TestTransportGetGotConnHooks_Client(t *testing.T) { testTransportGetGotConnHooks(t, true) }
func testTransportGetGotConnHooks(t *testing.T, useClient bool) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, r.RemoteAddr)
- }, func(s *httptest.Server) {
- s.EnableHTTP2 = true
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- client := st.ts.Client()
+ client := ts.Client()
ConfigureTransports(client.Transport.(*http.Transport))
var (
@@ -389,7 +385,7 @@ func testTransportGetGotConnHooks(t *testing.T, useClient bool) {
}
},
}
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
@@ -432,9 +428,8 @@ func (c *testNetConn) Close() error {
// Tests that the Transport only keeps one pending dial open per destination address.
// https://golang.org/issue/13397
func TestTransportGroupsPendingDials(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- }, optOnlyServer)
- defer st.Close()
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
+ })
var (
mu sync.Mutex
dialCount int
@@ -463,7 +458,7 @@ func TestTransportGroupsPendingDials(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Error(err)
return
@@ -486,35 +481,21 @@ func TestTransportGroupsPendingDials(t *testing.T) {
}
}
-func retry(tries int, delay time.Duration, fn func() error) error {
- var err error
- for i := 0; i < tries; i++ {
- err = fn()
- if err == nil {
- return nil
- }
- time.Sleep(delay)
- }
- return err
-}
-
func TestTransportAbortClosesPipes(t *testing.T) {
shutdown := make(chan struct{})
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {
w.(http.Flusher).Flush()
<-shutdown
},
- optOnlyServer,
)
- defer st.Close()
defer close(shutdown) // we must shutdown before st.Close() to avoid hanging
errCh := make(chan error)
go func() {
defer close(errCh)
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
errCh <- err
return
@@ -525,8 +506,8 @@ func TestTransportAbortClosesPipes(t *testing.T) {
return
}
defer res.Body.Close()
- st.closeConn()
- _, err = ioutil.ReadAll(res.Body)
+ ts.CloseClientConnections()
+ _, err = io.ReadAll(res.Body)
if err == nil {
errCh <- errors.New("expected error from res.Body.Read")
return
@@ -548,13 +529,11 @@ func TestTransportAbortClosesPipes(t *testing.T) {
// could be a table-driven test with extra goodies.
func TestTransportPath(t *testing.T) {
gotc := make(chan *url.URL, 1)
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {
gotc <- r.URL
},
- optOnlyServer,
)
- defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
@@ -562,7 +541,7 @@ func TestTransportPath(t *testing.T) {
path = "/testpath"
query = "q=1"
)
- surl := st.ts.URL + path + "?" + query
+ surl := ts.URL + path + "?" + query
req, err := http.NewRequest("POST", surl, nil)
if err != nil {
t.Fatal(err)
@@ -656,18 +635,16 @@ func TestTransportBody(t *testing.T) {
err error
}
gotc := make(chan reqInfo, 1)
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {
- slurp, err := ioutil.ReadAll(r.Body)
+ slurp, err := io.ReadAll(r.Body)
if err != nil {
gotc <- reqInfo{err: err}
} else {
gotc <- reqInfo{req: r, slurp: slurp}
}
},
- optOnlyServer,
)
- defer st.Close()
for i, tt := range bodyTests {
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
@@ -677,7 +654,7 @@ func TestTransportBody(t *testing.T) {
if tt.noContentLen {
body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods
}
- req, err := http.NewRequest("POST", st.ts.URL, body)
+ req, err := http.NewRequest("POST", ts.URL, body)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
@@ -717,15 +694,13 @@ func TestTransportDialTLS(t *testing.T) {
var mu sync.Mutex // guards following
var gotReq, didDial bool
- ts := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {
mu.Lock()
gotReq = true
mu.Unlock()
},
- optOnlyServer,
)
- defer ts.Close()
tr := &Transport{
DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {
mu.Lock()
@@ -741,7 +716,7 @@ func TestTransportDialTLS(t *testing.T) {
}
defer tr.CloseIdleConnections()
client := &http.Client{Transport: tr}
- res, err := client.Get(ts.ts.URL)
+ res, err := client.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -776,18 +751,17 @@ func TestConfigureTransport(t *testing.T) {
}
// And does it work?
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, r.Proto)
- }, optOnlyServer)
- defer st.Close()
+ })
t1.TLSClientConfig.InsecureSkipVerify = true
c := &http.Client{Transport: t1}
- res, err := c.Get(st.ts.URL)
+ res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
- slurp, err := ioutil.ReadAll(res.Body)
+ slurp, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
@@ -838,7 +812,7 @@ func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyA
func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) }
func testTransportReqBodyAfterResponse(t *testing.T, status int) {
- const bodySize = 10 << 20
+ const bodySize = 1 << 10
tc := newTestClientConn(t)
tc.greet()
@@ -891,6 +865,7 @@ func testTransportReqBodyAfterResponse(t *testing.T, status int) {
streamID: rt.streamID(),
endStream: true,
size: bodySize / 2,
+ multiple: true,
})
} else {
// After a 403 response, client gives up and resets the stream.
@@ -902,20 +877,19 @@ func testTransportReqBodyAfterResponse(t *testing.T, status int) {
// See golang.org/issue/13444
func TestTransportFullDuplex(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200) // redundant but for clarity
w.(http.Flusher).Flush()
io.Copy(flushWriter{w}, capitalizeReader{r.Body})
fmt.Fprintf(w, "bye.\n")
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
pr, pw := io.Pipe()
- req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr))
+ req, err := http.NewRequest("PUT", ts.URL, io.NopCloser(pr))
if err != nil {
t.Fatal(err)
}
@@ -953,12 +927,11 @@ func TestTransportFullDuplex(t *testing.T) {
func TestTransportConnectRequest(t *testing.T) {
gotc := make(chan *http.Request, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
gotc <- r
- }, optOnlyServer)
- defer st.Close()
+ })
- u, err := url.Parse(st.ts.URL)
+ u, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -1413,24 +1386,22 @@ func TestPadHeaders(t *testing.T) {
}
func TestTransportChecksRequestHeaderListSize(t *testing.T) {
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {
// Consume body & force client to send
// trailers before writing response.
- // ioutil.ReadAll returns non-nil err for
+ // io.ReadAll returns non-nil err for
// requests that attempt to send greater than
// maxHeaderListSize bytes of trailers, since
// those requests generate a stream reset.
- ioutil.ReadAll(r.Body)
+ io.ReadAll(r.Body)
r.Body.Close()
},
func(ts *httptest.Server) {
ts.Config.MaxHeaderBytes = 16 << 10
},
- optOnlyServer,
optQuiet,
)
- defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
@@ -1438,7 +1409,7 @@ func TestTransportChecksRequestHeaderListSize(t *testing.T) {
checkRoundTrip := func(req *http.Request, wantErr error, desc string) {
// Make an arbitrary request to ensure we get the server's
// settings frame and initialize peerMaxHeaderListSize.
- req0, err := http.NewRequest("GET", st.ts.URL, nil)
+ req0, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatalf("newRequest: NewRequest: %v", err)
}
@@ -1501,13 +1472,29 @@ func TestTransportChecksRequestHeaderListSize(t *testing.T) {
newRequest := func() *http.Request {
// Body must be non-nil to enable writing trailers.
body := strings.NewReader("hello")
- req, err := http.NewRequest("POST", st.ts.URL, body)
+ req, err := http.NewRequest("POST", ts.URL, body)
if err != nil {
t.Fatalf("newRequest: NewRequest: %v", err)
}
return req
}
+ var (
+ scMu sync.Mutex
+ sc *serverConn
+ )
+ testHookGetServerConn = func(v *serverConn) {
+ scMu.Lock()
+ defer scMu.Unlock()
+ if sc != nil {
+ panic("testHookGetServerConn called multiple times")
+ }
+ sc = v
+ }
+ defer func() {
+ testHookGetServerConn = nil
+ }()
+
// Validate peerMaxHeaderListSize.
req := newRequest()
checkRoundTrip(req, nil, "Initial request")
@@ -1519,16 +1506,16 @@ func TestTransportChecksRequestHeaderListSize(t *testing.T) {
cc.mu.Lock()
peerSize := cc.peerMaxHeaderListSize
cc.mu.Unlock()
- st.scMu.Lock()
- wantSize := uint64(st.sc.maxHeaderListSize())
- st.scMu.Unlock()
+ scMu.Lock()
+ wantSize := uint64(sc.maxHeaderListSize())
+ scMu.Unlock()
if peerSize != wantSize {
t.Errorf("peerMaxHeaderListSize = %v; want %v", peerSize, wantSize)
}
// Sanity check peerSize. (*serverConn) maxHeaderListSize adds
// 320 bytes of padding.
- wantHeaderBytes := uint64(st.ts.Config.MaxHeaderBytes) + 320
+ wantHeaderBytes := uint64(ts.Config.MaxHeaderBytes) + 320
if peerSize != wantHeaderBytes {
t.Errorf("peerMaxHeaderListSize = %v; want %v.", peerSize, wantHeaderBytes)
}
@@ -1653,22 +1640,20 @@ func TestTransportCookieHeaderSplit(t *testing.T) {
// a stream error, but others like cancel should be similar)
func TestTransportBodyReadErrorType(t *testing.T) {
doPanic := make(chan bool, 1)
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {
w.(http.Flusher).Flush() // force headers out
<-doPanic
panic("boom")
},
- optOnlyServer,
optQuiet,
)
- defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
- res, err := c.Get(st.ts.URL)
+ res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -1692,7 +1677,7 @@ func TestTransportDoubleCloseOnWriteError(t *testing.T) {
conn net.Conn // to close if set
)
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {
mu.Lock()
defer mu.Unlock()
@@ -1700,9 +1685,7 @@ func TestTransportDoubleCloseOnWriteError(t *testing.T) {
conn.Close()
}
},
- optOnlyServer,
)
- defer st.Close()
tr := &Transport{
TLSClientConfig: tlsConfigInsecure,
@@ -1719,20 +1702,18 @@ func TestTransportDoubleCloseOnWriteError(t *testing.T) {
}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
- c.Get(st.ts.URL)
+ c.Get(ts.URL)
}
// Test that the http1 Transport.DisableKeepAlives option is respected
// and connections are closed as soon as idle.
// See golang.org/issue/14008
func TestTransportDisableKeepAlives(t *testing.T) {
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "hi")
},
- optOnlyServer,
)
- defer st.Close()
connClosed := make(chan struct{}) // closed on tls.Conn.Close
tr := &Transport{
@@ -1749,11 +1730,11 @@ func TestTransportDisableKeepAlives(t *testing.T) {
},
}
c := &http.Client{Transport: tr}
- res, err := c.Get(st.ts.URL)
+ res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
- if _, err := ioutil.ReadAll(res.Body); err != nil {
+ if _, err := io.ReadAll(res.Body); err != nil {
t.Fatal(err)
}
defer res.Body.Close()
@@ -1770,14 +1751,12 @@ func TestTransportDisableKeepAlives(t *testing.T) {
// but when things are totally idle, it still needs to close.
func TestTransportDisableKeepAlives_Concurrency(t *testing.T) {
const D = 25 * time.Millisecond
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {
time.Sleep(D)
io.WriteString(w, "hi")
},
- optOnlyServer,
)
- defer st.Close()
var dials int32
var conns sync.WaitGroup
@@ -1812,12 +1791,12 @@ func TestTransportDisableKeepAlives_Concurrency(t *testing.T) {
}
go func() {
defer reqs.Done()
- res, err := c.Get(st.ts.URL)
+ res, err := c.Get(ts.URL)
if err != nil {
t.Error(err)
return
}
- if _, err := ioutil.ReadAll(res.Body); err != nil {
+ if _, err := io.ReadAll(res.Body); err != nil {
t.Error(err)
return
}
@@ -1892,6 +1871,7 @@ func testTransportResponseHeaderTimeout(t *testing.T, body bool) {
tc.wantData(wantData{
endStream: true,
size: bodySize,
+ multiple: true,
})
}
@@ -1908,15 +1888,14 @@ func testTransportResponseHeaderTimeout(t *testing.T, body bool) {
func TestTransportDisableCompression(t *testing.T) {
const body = "sup"
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
want := http.Header{
"User-Agent": []string{"Go-http-client/2.0"},
}
if !reflect.DeepEqual(r.Header, want) {
t.Errorf("request headers = %v; want %v", r.Header, want)
}
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{
TLSClientConfig: tlsConfigInsecure,
@@ -1926,7 +1905,7 @@ func TestTransportDisableCompression(t *testing.T) {
}
defer tr.CloseIdleConnections()
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
@@ -1939,15 +1918,14 @@ func TestTransportDisableCompression(t *testing.T) {
// RFC 7540 section 8.1.2.2
func TestTransportRejectsConnHeaders(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
var got []string
for k := range r.Header {
got = append(got, k)
}
sort.Strings(got)
w.Header().Set("Got-Header", strings.Join(got, ","))
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
@@ -2035,7 +2013,7 @@ func TestTransportRejectsConnHeaders(t *testing.T) {
}
for _, tt := range tests {
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req, _ := http.NewRequest("GET", ts.URL, nil)
req.Header[tt.key] = tt.value
res, err := tr.RoundTrip(req)
var got string
@@ -2089,14 +2067,13 @@ func TestTransportRejectsContentLengthWithSign(t *testing.T) {
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Length", tt.cl[0])
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- req, _ := http.NewRequest("HEAD", st.ts.URL, nil)
+ req, _ := http.NewRequest("HEAD", ts.URL, nil)
res, err := tr.RoundTrip(req)
var got string
@@ -2117,15 +2094,14 @@ func TestTransportRejectsContentLengthWithSign(t *testing.T) {
// golang.org/issue/14048
// golang.org/issue/64766
func TestTransportFailsOnInvalidHeadersAndTrailers(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
var got []string
for k := range r.Header {
got = append(got, k)
}
sort.Strings(got)
w.Header().Set("Got-Header", strings.Join(got, ","))
- }, optOnlyServer)
- defer st.Close()
+ })
tests := [...]struct {
h http.Header
@@ -2162,7 +2138,7 @@ func TestTransportFailsOnInvalidHeadersAndTrailers(t *testing.T) {
defer tr.CloseIdleConnections()
for i, tt := range tests {
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req, _ := http.NewRequest("GET", ts.URL, nil)
req.Header = tt.h
req.Trailer = tt.t
res, err := tr.RoundTrip(req)
@@ -2191,7 +2167,7 @@ func TestTransportFailsOnInvalidHeadersAndTrailers(t *testing.T) {
// the first Read call's gzip.NewReader returning an error.
func TestGzipReader_DoubleReadCrash(t *testing.T) {
gz := &gzipReader{
- body: ioutil.NopCloser(strings.NewReader("0123456789")),
+ body: io.NopCloser(strings.NewReader("0123456789")),
}
var buf [1]byte
n, err1 := gz.Read(buf[:])
@@ -2210,7 +2186,7 @@ func TestGzipReader_ReadAfterClose(t *testing.T) {
w.Write([]byte("012345679"))
w.Close()
gz := &gzipReader{
- body: ioutil.NopCloser(&body),
+ body: io.NopCloser(&body),
}
var buf [1]byte
n, err := gz.Read(buf[:])
@@ -2372,11 +2348,10 @@ func (b neverEnding) Read(p []byte) (int, error) {
// runs out of flow control tokens)
func TestTransportHandlerBodyClose(t *testing.T) {
const bodySize = 10 << 20
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
r.Body.Close()
io.Copy(w, io.LimitReader(neverEnding('A'), bodySize))
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
@@ -2385,7 +2360,7 @@ func TestTransportHandlerBodyClose(t *testing.T) {
const numReq = 10
for i := 0; i < numReq; i++ {
- req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)})
+ req, err := http.NewRequest("POST", ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)})
if err != nil {
t.Fatal(err)
}
@@ -2393,7 +2368,7 @@ func TestTransportHandlerBodyClose(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- n, err := io.Copy(ioutil.Discard, res.Body)
+ n, err := io.Copy(io.Discard, res.Body)
res.Body.Close()
if n != bodySize || err != nil {
t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize)
@@ -2418,7 +2393,7 @@ func TestTransportFlowControl(t *testing.T) {
}
var wrote int64 // updated atomically
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
b := make([]byte, bufLen)
for wrote < total {
n, err := w.Write(b)
@@ -2429,11 +2404,11 @@ func TestTransportFlowControl(t *testing.T) {
}
w.(http.Flusher).Flush()
}
- }, optOnlyServer)
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal("NewRequest error:", err)
}
@@ -2506,7 +2481,7 @@ func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) {
// the interesting parts of both.
tc.writeGoAway(5, ErrCodeNo, []byte(goAwayDebugData))
tc.writeGoAway(5, goAwayErrCode, nil)
- tc.closeWrite(io.EOF)
+ tc.closeWrite()
res, err := rt.result()
whence := "RoundTrip"
@@ -2584,6 +2559,9 @@ func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) {
}
return true
},
+ func(f *PingFrame) bool {
+ return true
+ },
func(f *WindowUpdateFrame) bool {
if !oneDataFrame && !sentAdditionalData {
t.Fatalf("Got WindowUpdateFrame, don't expect one yet")
@@ -2631,7 +2609,7 @@ func TestTransportAdjustsFlowControl(t *testing.T) {
gotBytes := int64(0)
for {
- f := testClientConnReadFrame[*DataFrame](tc)
+ f := readFrame[*DataFrame](t, tc)
gotBytes += int64(len(f.Data()))
// After we've got half the client's initial flow control window's worth
// of request body data, give it just enough flow control to finish.
@@ -2727,7 +2705,7 @@ func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) {
t.Fatalf("RoundTrip error = %#v; want %#v", err, want)
}
- fr := testClientConnReadFrame[*RSTStreamFrame](tc)
+ fr := readFrame[*RSTStreamFrame](t, tc)
if fr.StreamID != 1 || fr.ErrCode != ErrCodeProtocol {
t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr))
}
@@ -2755,16 +2733,15 @@ func (b byteAndEOFReader) Read(p []byte) (n int, err error) {
// which returns (non-0, io.EOF) and also needs to set the ContentLength
// explicitly.
func TestTransportBodyDoubleEndStream(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
// Nothing.
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
for i := 0; i < 2; i++ {
- req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a'))
+ req, _ := http.NewRequest("POST", ts.URL, byteAndEOFReader('a'))
req.ContentLength = 1
res, err := tr.RoundTrip(req)
if err != nil {
@@ -2907,16 +2884,17 @@ func TestTransportRequestPathPseudo(t *testing.T) {
// before we've determined that the ClientConn is usable.
func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) {
const body = "foo"
- req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body)))
+ req, _ := http.NewRequest("POST", "http://foo.com/", io.NopCloser(strings.NewReader(body)))
cc := &ClientConn{
closed: true,
reqHeaderMu: make(chan struct{}, 1),
+ t: &Transport{},
}
_, err := cc.RoundTrip(req)
if err != errClientConnUnusable {
t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err)
}
- slurp, err := ioutil.ReadAll(req.Body)
+ slurp, err := io.ReadAll(req.Body)
if err != nil {
t.Errorf("ReadAll = %v", err)
}
@@ -2926,12 +2904,11 @@ func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) {
}
func TestClientConnPing(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer)
- defer st.Close()
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {})
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
ctx := context.Background()
- cc, err := tr.dialClientConn(ctx, st.ts.Listener.Addr().String(), false)
+ cc, err := tr.dialClientConn(ctx, ts.Listener.Addr().String(), false)
if err != nil {
t.Fatal(err)
}
@@ -2949,7 +2926,7 @@ func TestTransportCancelDataResponseRace(t *testing.T) {
clientGotResponse := make(chan bool, 1)
const msg = "Hello."
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/hello") {
time.Sleep(50 * time.Millisecond)
io.WriteString(w, msg)
@@ -2964,29 +2941,28 @@ func TestTransportCancelDataResponseRace(t *testing.T) {
}
time.Sleep(10 * time.Millisecond)
}
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req, _ := http.NewRequest("GET", ts.URL, nil)
req.Cancel = cancel
res, err := c.Do(req)
clientGotResponse <- true
if err != nil {
t.Fatal(err)
}
- if _, err = io.Copy(ioutil.Discard, res.Body); err == nil {
+ if _, err = io.Copy(io.Discard, res.Body); err == nil {
t.Fatal("unexpected success")
}
- res, err = c.Get(st.ts.URL + "/hello")
+ res, err = c.Get(ts.URL + "/hello")
if err != nil {
t.Fatal(err)
}
- slurp, err := ioutil.ReadAll(res.Body)
+ slurp, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
@@ -2998,21 +2974,20 @@ func TestTransportCancelDataResponseRace(t *testing.T) {
// Issue 21316: It should be safe to reuse an http.Request after the
// request has completed.
func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
io.WriteString(w, "body")
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req, _ := http.NewRequest("GET", ts.URL, nil)
resp, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
- if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil {
+ if _, err = io.Copy(io.Discard, resp.Body); err != nil {
t.Fatalf("error reading response body: %v", err)
}
if err := resp.Body.Close(); err != nil {
@@ -3045,11 +3020,9 @@ func TestTransportCloseAfterLostPing(t *testing.T) {
}
func TestTransportPingWriteBlocks(t *testing.T) {
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {},
- optOnlyServer,
)
- defer st.Close()
tr := &Transport{
TLSClientConfig: tlsConfigInsecure,
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
@@ -3068,7 +3041,7 @@ func TestTransportPingWriteBlocks(t *testing.T) {
}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
- _, err := c.Get(st.ts.URL)
+ _, err := c.Get(ts.URL)
if err == nil {
t.Fatalf("Get = nil, want error")
}
@@ -3103,7 +3076,7 @@ func TestTransportPingWhenReadingMultiplePings(t *testing.T) {
// ...ping now.
tc.advance(1 * time.Millisecond)
- f := testClientConnReadFrame[*PingFrame](tc)
+ f := readFrame[*PingFrame](t, tc)
tc.writePing(true, f.Data)
}
@@ -3144,13 +3117,40 @@ func TestTransportPingWhenReadingPingDisabled(t *testing.T) {
}
}
-func TestTransportRetryAfterGOAWAY(t *testing.T) {
+func TestTransportRetryAfterGOAWAYNoRetry(t *testing.T) {
tt := newTestTransport(t)
req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
rt := tt.roundTrip(req)
- // First attempt: Server sends a GOAWAY.
+ // First attempt: Server sends a GOAWAY with an error and
+ // a MaxStreamID less than the request ID.
+ // This probably indicates that there was something wrong with our request,
+ // so we don't retry it.
+ tc := tt.getConn()
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+ tc.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
+ tc.writeSettings()
+ tc.writeGoAway(0 /*max id*/, ErrCodeInternal, nil)
+ if rt.err() == nil {
+ t.Fatalf("after GOAWAY, RoundTrip is not done, want error")
+ }
+}
+
+func TestTransportRetryAfterGOAWAYRetry(t *testing.T) {
+ tt := newTestTransport(t)
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tt.roundTrip(req)
+
+ // First attempt: Server sends a GOAWAY with ErrCodeNo and
+ // a MaxStreamID less than the request ID.
+ // We take the server at its word that nothing has really gone wrong,
+ // and retry the request.
tc := tt.getConn()
tc.wantFrameType(FrameSettings)
tc.wantFrameType(FrameWindowUpdate)
@@ -3185,6 +3185,69 @@ func TestTransportRetryAfterGOAWAY(t *testing.T) {
rt.wantStatus(200)
}
+func TestTransportRetryAfterGOAWAYSecondRequest(t *testing.T) {
+ tt := newTestTransport(t)
+
+ // First request succeeds.
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt1 := tt.roundTrip(req)
+ tc := tt.getConn()
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+ tc.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
+ tc.writeSettings()
+ tc.wantFrameType(FrameSettings) // Settings ACK
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt1.wantStatus(200)
+
+ // Second request: Server sends a GOAWAY with
+ // a MaxStreamID less than the request ID.
+ // The server says it didn't see this request,
+ // so we retry it on a new connection.
+ req, _ = http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt2 := tt.roundTrip(req)
+
+ // Second request, first attempt.
+ tc.wantHeaders(wantHeader{
+ streamID: 3,
+ endStream: true,
+ })
+ tc.writeSettings()
+ tc.writeGoAway(1 /*max id*/, ErrCodeProtocol, nil)
+ if rt2.done() {
+ t.Fatalf("after GOAWAY, RoundTrip is done; want it to be retrying")
+ }
+
+ // Second request, second attempt.
+ tc = tt.getConn()
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+ tc.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
+ tc.writeSettings()
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt2.wantStatus(200)
+}
+
func TestTransportRetryAfterRefusedStream(t *testing.T) {
tt := newTestTransport(t)
@@ -3249,8 +3312,8 @@ func TestTransportRetryHasLimit(t *testing.T) {
}
tc.writeRSTStream(streamID, ErrCodeRefusedStream)
- d := tt.tr.syncHooks.timeUntilEvent()
- if d == 0 {
+ d, scheduled := tt.group.TimeUntilEvent()
+ if !scheduled {
if streamID == 1 {
continue
}
@@ -3312,26 +3375,27 @@ func TestTransportMaxFrameReadSize(t *testing.T) {
maxReadFrameSize: 1024,
want: minMaxFrameSize,
}} {
- tc := newTestClientConn(t, func(tr *Transport) {
- tr.MaxReadFrameSize = test.maxReadFrameSize
- })
+ t.Run(fmt.Sprint(test.maxReadFrameSize), func(t *testing.T) {
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.MaxReadFrameSize = test.maxReadFrameSize
+ })
- fr := testClientConnReadFrame[*SettingsFrame](tc)
- got, ok := fr.Value(SettingMaxFrameSize)
- if !ok {
- t.Errorf("Transport.MaxReadFrameSize = %v; server got no setting, want %v", test.maxReadFrameSize, test.want)
- } else if got != test.want {
- t.Errorf("Transport.MaxReadFrameSize = %v; server got %v, want %v", test.maxReadFrameSize, got, test.want)
- }
+ fr := readFrame[*SettingsFrame](t, tc)
+ got, ok := fr.Value(SettingMaxFrameSize)
+ if !ok {
+ t.Errorf("Transport.MaxReadFrameSize = %v; server got no setting, want %v", test.maxReadFrameSize, test.want)
+ } else if got != test.want {
+ t.Errorf("Transport.MaxReadFrameSize = %v; server got %v, want %v", test.maxReadFrameSize, got, test.want)
+ }
+ })
}
}
func TestTransportRequestsLowServerLimit(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- }, optOnlyServer, func(s *Server) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
+ }, func(s *Server) {
s.MaxConcurrentStreams = 1
})
- defer st.Close()
var (
connCountMu sync.Mutex
@@ -3350,7 +3414,7 @@ func TestTransportRequestsLowServerLimit(t *testing.T) {
const reqCount = 3
for i := 0; i < reqCount; i++ {
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
@@ -3459,7 +3523,7 @@ func TestTransportMaxDecoderHeaderTableSize(t *testing.T) {
tr.MaxDecoderHeaderTableSize = reqSize
})
- fr := testClientConnReadFrame[*SettingsFrame](tc)
+ fr := readFrame[*SettingsFrame](t, tc)
if v, ok := fr.Value(SettingHeaderTableSize); !ok {
t.Fatalf("missing SETTINGS_HEADER_TABLE_SIZE setting")
} else if v != reqSize {
@@ -3467,6 +3531,8 @@ func TestTransportMaxDecoderHeaderTableSize(t *testing.T) {
}
tc.writeSettings(Setting{SettingHeaderTableSize, resSize})
+ tc.cc.mu.Lock()
+ defer tc.cc.mu.Unlock()
if got, want := tc.cc.peerMaxHeaderTableSize, resSize; got != want {
t.Fatalf("peerHeaderTableSize = %d, want %d", got, want)
}
@@ -3515,7 +3581,7 @@ func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) {
writeErr := make(chan error, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.(http.Flusher).Flush()
var sum int64
for i := 0; i < 100; i++ {
@@ -3528,13 +3594,12 @@ func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) {
}
t.Logf("wrote all %d bytes", sum)
writeErr <- nil
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
- res, err := c.Get(st.ts.URL)
+ res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -3586,24 +3651,22 @@ func TestTransportNoBodyMeansNoDATA(t *testing.T) {
}
func benchSimpleRoundTrip(b *testing.B, nReqHeaders, nResHeader int) {
- defer disableGoroutineTracking()()
+ disableGoroutineTracking(b)
b.ReportAllocs()
- st := newServerTester(b,
+ ts := newTestServer(b,
func(w http.ResponseWriter, r *http.Request) {
for i := 0; i < nResHeader; i++ {
name := fmt.Sprint("A-", i)
w.Header().Set(name, "*")
}
},
- optOnlyServer,
optQuiet,
)
- defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
b.Fatal(err)
}
@@ -3639,16 +3702,15 @@ func (r infiniteReader) Read(b []byte) (int, error) {
// Issue 20521: it is not an error to receive a response and end stream
// from the server without the body being consumed.
func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
// The request body needs to be big enough to trigger flow control.
- req, _ := http.NewRequest("PUT", st.ts.URL, infiniteReader{})
+ req, _ := http.NewRequest("PUT", ts.URL, infiniteReader{})
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
@@ -3701,10 +3763,10 @@ func BenchmarkDownloadFrameSize(b *testing.B) {
b.Run("512k Frame", func(b *testing.B) { benchLargeDownloadRoundTrip(b, 512*1024) })
}
func benchLargeDownloadRoundTrip(b *testing.B, frameSize uint32) {
- defer disableGoroutineTracking()()
+ disableGoroutineTracking(b)
const transferSize = 1024 * 1024 * 1024 // must be multiple of 1M
b.ReportAllocs()
- st := newServerTester(b,
+ ts := newTestServer(b,
func(w http.ResponseWriter, r *http.Request) {
// test 1GB transfer
w.Header().Set("Content-Length", strconv.Itoa(transferSize))
@@ -3715,12 +3777,11 @@ func benchLargeDownloadRoundTrip(b *testing.B, frameSize uint32) {
}
}, optQuiet,
)
- defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure, MaxReadFrameSize: frameSize}
defer tr.CloseIdleConnections()
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
b.Fatal(err)
}
@@ -3779,7 +3840,7 @@ func testClientConnClose(t *testing.T, closeMode closeMode) {
closeDone := make(chan struct{})
beforeHeader := func() {}
bodyWrite := func(w http.ResponseWriter) {}
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
defer close(handlerDone)
beforeHeader()
w.WriteHeader(http.StatusOK)
@@ -3796,13 +3857,12 @@ func testClientConnClose(t *testing.T, closeMode closeMode) {
t.Error("expected connection closed by client")
}
}
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
ctx := context.Background()
- cc, err := tr.dialClientConn(ctx, st.ts.Listener.Addr().String(), false)
- req, err := http.NewRequest("GET", st.ts.URL, nil)
+ cc, err := tr.dialClientConn(ctx, ts.Listener.Addr().String(), false)
+ req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
@@ -3902,7 +3962,7 @@ func testClientConnClose(t *testing.T, closeMode closeMode) {
case closeAtHeaders, closeAtBody:
if closeMode == closeAtBody {
go close(sendBody)
- if _, err := io.Copy(ioutil.Discard, res.Body); err == nil {
+ if _, err := io.Copy(io.Discard, res.Body); err == nil {
t.Error("expected a Copy error, got nil")
}
}
@@ -3953,7 +4013,7 @@ func TestClientConnShutdownCancel(t *testing.T) {
func TestTransportUsesGetBodyWhenPresent(t *testing.T) {
calls := 0
someBody := func() io.ReadCloser {
- return struct{ io.ReadCloser }{ioutil.NopCloser(bytes.NewReader(nil))}
+ return struct{ io.ReadCloser }{io.NopCloser(bytes.NewReader(nil))}
}
req := &http.Request{
Body: someBody(),
@@ -4080,7 +4140,7 @@ func TestTransportBodyEagerEndStream(t *testing.T) {
tc.roundTrip(req)
tc.wantFrameType(FrameHeaders)
- f := testClientConnReadFrame[*DataFrame](tc)
+ f := readFrame[*DataFrame](t, tc)
if !f.StreamEnded() {
t.Fatalf("data frame without END_STREAM %v", f)
}
@@ -4123,15 +4183,14 @@ func TestTransportBodyLargerThanSpecifiedContentLength_len2(t *testing.T) {
}
func testTransportBodyLargerThanSpecifiedContentLength(t *testing.T, body *chunkReader, contentLen int64) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
r.Body.Read(make([]byte, 6))
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- req, _ := http.NewRequest("POST", st.ts.URL, body)
+ req, _ := http.NewRequest("POST", ts.URL, body)
req.ContentLength = contentLen
_, err := tr.RoundTrip(req)
if err != errReqBodyTooLong {
@@ -4211,13 +4270,12 @@ func TestTransportRoundtripCloseOnWriteError(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer)
- defer st.Close()
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {})
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
ctx := context.Background()
- cc, err := tr.dialClientConn(ctx, st.ts.Listener.Addr().String(), false)
+ cc, err := tr.dialClientConn(ctx, ts.Listener.Addr().String(), false)
if err != nil {
t.Fatal(err)
}
@@ -4244,12 +4302,11 @@ func TestTransportRoundtripCloseOnWriteError(t *testing.T) {
// already. If the request body has started to be sent, one must wait until it
// is completed.
func TestTransportBodyRewindRace(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Connection", "close")
w.WriteHeader(http.StatusOK)
return
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &http.Transport{
TLSClientConfig: tlsConfigInsecure,
@@ -4268,7 +4325,7 @@ func TestTransportBodyRewindRace(t *testing.T) {
var wg sync.WaitGroup
wg.Add(clients)
for i := 0; i < clients; i++ {
- req, err := http.NewRequest("POST", st.ts.URL, bytes.NewBufferString("abcdef"))
+ req, err := http.NewRequest("POST", ts.URL, bytes.NewBufferString("abcdef"))
if err != nil {
t.Fatalf("unexpect new request error: %v", err)
}
@@ -4288,11 +4345,10 @@ func TestTransportBodyRewindRace(t *testing.T) {
// Issue 42498: A request with a body will never be sent if the stream is
// reset prior to sending any data.
func TestTransportServerResetStreamAtHeaders(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusUnauthorized)
return
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &http.Transport{
TLSClientConfig: tlsConfigInsecure,
@@ -4308,7 +4364,7 @@ func TestTransportServerResetStreamAtHeaders(t *testing.T) {
Transport: tr,
}
- req, err := http.NewRequest("POST", st.ts.URL, errorReader{io.EOF})
+ req, err := http.NewRequest("POST", ts.URL, errorReader{io.EOF})
if err != nil {
t.Fatalf("unexpect new request error: %v", err)
}
@@ -4336,15 +4392,14 @@ func (tr *trackingReader) WasRead() bool {
}
func TestTransportExpectContinue(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/reject":
w.WriteHeader(403)
default:
io.Copy(io.Discard, r.Body)
}
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &http.Transport{
TLSClientConfig: tlsConfigInsecure,
@@ -4387,7 +4442,7 @@ func TestTransportExpectContinue(t *testing.T) {
t.Run(tc.Name, func(t *testing.T) {
startTime := time.Now()
- req, err := http.NewRequest("POST", st.ts.URL+tc.Path, tc.Body)
+ req, err := http.NewRequest("POST", ts.URL+tc.Path, tc.Body)
if err != nil {
t.Fatal(err)
}
@@ -4499,11 +4554,11 @@ func (c *blockingWriteConn) Write(b []byte) (n int, err error) {
func TestTransportFrameBufferReuse(t *testing.T) {
filler := hex.EncodeToString([]byte(randString(2048)))
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
if got, want := r.Header.Get("Big"), filler; got != want {
t.Errorf(`r.Header.Get("Big") = %q, want %q`, got, want)
}
- b, err := ioutil.ReadAll(r.Body)
+ b, err := io.ReadAll(r.Body)
if err != nil {
t.Errorf("error reading request body: %v", err)
}
@@ -4513,8 +4568,7 @@ func TestTransportFrameBufferReuse(t *testing.T) {
if got, want := r.Trailer.Get("Big"), filler; got != want {
t.Errorf(`r.Trailer.Get("Big") = %q, want %q`, got, want)
}
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
@@ -4525,7 +4579,7 @@ func TestTransportFrameBufferReuse(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
- req, err := http.NewRequest("POST", st.ts.URL, strings.NewReader(filler))
+ req, err := http.NewRequest("POST", ts.URL, strings.NewReader(filler))
if err != nil {
t.Error(err)
return
@@ -4591,7 +4645,7 @@ func TestTransportBlockingRequestWrite(t *testing.T) {
}} {
test := test
t.Run(test.name, func(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
if v := r.Header.Get("Big"); v != "" && v != filler {
t.Errorf("request header mismatch")
}
@@ -4601,10 +4655,9 @@ func TestTransportBlockingRequestWrite(t *testing.T) {
if v := r.Trailer.Get("Big"); v != "" && v != filler {
t.Errorf("request trailer mismatch\ngot: %q\nwant: %q", string(v), filler)
}
- }, optOnlyServer, func(s *Server) {
+ }, func(s *Server) {
s.MaxConcurrentStreams = 1
})
- defer st.Close()
// This Transport creates connections that block on writes after 1024 bytes.
connc := make(chan *blockingWriteConn, 1)
@@ -4626,7 +4679,7 @@ func TestTransportBlockingRequestWrite(t *testing.T) {
// Request 1: A small request to ensure we read the server MaxConcurrentStreams.
{
- req, err := http.NewRequest("POST", st.ts.URL, nil)
+ req, err := http.NewRequest("POST", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
@@ -4646,7 +4699,7 @@ func TestTransportBlockingRequestWrite(t *testing.T) {
reqc := make(chan struct{})
go func() {
defer close(reqc)
- req, err := test.req(st.ts.URL)
+ req, err := test.req(ts.URL)
if err != nil {
t.Error(err)
return
@@ -4662,7 +4715,7 @@ func TestTransportBlockingRequestWrite(t *testing.T) {
// Request 3: A small request that is sent on a new connection, since request 2
// is hogging the only available stream on the previous connection.
{
- req, err := http.NewRequest("POST", st.ts.URL, nil)
+ req, err := http.NewRequest("POST", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
@@ -4697,15 +4750,14 @@ func TestTransportBlockingRequestWrite(t *testing.T) {
func TestTransportCloseRequestBody(t *testing.T) {
var statusCode int
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(statusCode)
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
ctx := context.Background()
- cc, err := tr.dialClientConn(ctx, st.ts.Listener.Addr().String(), false)
+ cc, err := tr.dialClientConn(ctx, ts.Listener.Addr().String(), false)
if err != nil {
t.Fatal(err)
}
@@ -4809,33 +4861,36 @@ func TestTransportRetriesOnStreamProtocolError(t *testing.T) {
}
func TestClientConnReservations(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- }, func(s *Server) {
- s.MaxConcurrentStreams = initialMaxConcurrentStreams
- })
- defer st.Close()
-
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- defer tr.CloseIdleConnections()
+ tc := newTestClientConn(t)
+ tc.greet(
+ Setting{ID: SettingMaxConcurrentStreams, Val: initialMaxConcurrentStreams},
+ )
- cc, err := tr.newClientConn(st.cc, false, nil)
- if err != nil {
- t.Fatal(err)
+ doRoundTrip := func() {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt.wantStatus(200)
}
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
n := 0
- for n <= initialMaxConcurrentStreams && cc.ReserveNewRequest() {
+ for n <= initialMaxConcurrentStreams && tc.cc.ReserveNewRequest() {
n++
}
if n != initialMaxConcurrentStreams {
t.Errorf("did %v reservations; want %v", n, initialMaxConcurrentStreams)
}
- if _, err := cc.RoundTrip(req); err != nil {
- t.Fatalf("RoundTrip error = %v", err)
- }
+ doRoundTrip()
n2 := 0
- for n2 <= 5 && cc.ReserveNewRequest() {
+ for n2 <= 5 && tc.cc.ReserveNewRequest() {
n2++
}
if n2 != 1 {
@@ -4844,11 +4899,11 @@ func TestClientConnReservations(t *testing.T) {
// Use up all the reservations
for i := 0; i < n; i++ {
- cc.RoundTrip(req)
+ doRoundTrip()
}
n2 = 0
- for n2 <= initialMaxConcurrentStreams && cc.ReserveNewRequest() {
+ for n2 <= initialMaxConcurrentStreams && tc.cc.ReserveNewRequest() {
n2++
}
if n2 != n {
@@ -4882,10 +4937,9 @@ func TestTransportTimeoutServerHangs(t *testing.T) {
func TestTransportContentLengthWithoutBody(t *testing.T) {
contentLength := ""
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Length", contentLength)
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
@@ -4912,7 +4966,7 @@ func TestTransportContentLengthWithoutBody(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
contentLength = test.contentLength
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req, _ := http.NewRequest("GET", ts.URL, nil)
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
@@ -4934,18 +4988,17 @@ func TestTransportContentLengthWithoutBody(t *testing.T) {
}
func TestTransportCloseResponseBodyWhileRequestBodyHangs(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
w.(http.Flusher).Flush()
io.Copy(io.Discard, r.Body)
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
pr, pw := net.Pipe()
- req, err := http.NewRequest("GET", st.ts.URL, pr)
+ req, err := http.NewRequest("GET", ts.URL, pr)
if err != nil {
t.Fatal(err)
}
@@ -4961,19 +5014,18 @@ func TestTransportCloseResponseBodyWhileRequestBodyHangs(t *testing.T) {
func TestTransport300ResponseBody(t *testing.T) {
reqc := make(chan struct{})
body := []byte("response body")
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(300)
w.(http.Flusher).Flush()
<-reqc
w.Write(body)
- }, optOnlyServer)
- defer st.Close()
+ })
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
pr, pw := net.Pipe()
- req, err := http.NewRequest("GET", st.ts.URL, pr)
+ req, err := http.NewRequest("GET", ts.URL, pr)
if err != nil {
t.Fatal(err)
}
@@ -4994,11 +5046,9 @@ func TestTransport300ResponseBody(t *testing.T) {
}
func TestTransportWriteByteTimeout(t *testing.T) {
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {},
- optOnlyServer,
)
- defer st.Close()
tr := &Transport{
TLSClientConfig: tlsConfigInsecure,
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
@@ -5010,7 +5060,7 @@ func TestTransportWriteByteTimeout(t *testing.T) {
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
- _, err := c.Get(st.ts.URL)
+ _, err := c.Get(ts.URL)
if !errors.Is(err, os.ErrDeadlineExceeded) {
t.Fatalf("Get on unresponsive connection: got %q; want ErrDeadlineExceeded", err)
}
@@ -5038,11 +5088,9 @@ func (c *slowWriteConn) Write(b []byte) (n int, err error) {
}
func TestTransportSlowWrites(t *testing.T) {
- st := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {},
- optOnlyServer,
)
- defer st.Close()
tr := &Transport{
TLSClientConfig: tlsConfigInsecure,
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
@@ -5056,7 +5104,7 @@ func TestTransportSlowWrites(t *testing.T) {
c := &http.Client{Transport: tr}
const bodySize = 1 << 20
- resp, err := c.Post(st.ts.URL, "text/foo", io.LimitReader(neverEnding('A'), bodySize))
+ resp, err := c.Post(ts.URL, "text/foo", io.LimitReader(neverEnding('A'), bodySize))
if err != nil {
t.Fatal(err)
}
@@ -5098,12 +5146,12 @@ func testTransportClosesConnAfterGoAway(t *testing.T, lastStream uint32) {
})
}
- tc.closeWrite(io.EOF)
+ tc.closeWrite()
err := rt.err()
if gotErr, wantErr := err != nil, lastStream == 0; gotErr != wantErr {
t.Errorf("RoundTrip got error %v (want error: %v)", err, wantErr)
}
- if !tc.netConnClosed {
+ if !tc.isClosed() {
t.Errorf("ClientConn did not close its net.Conn, expected it to")
}
}
@@ -5124,11 +5172,10 @@ func (r *slowCloser) Close() error {
}
func TestTransportSlowClose(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- }, optOnlyServer)
- defer st.Close()
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
+ })
- client := st.ts.Client()
+ client := ts.Client()
body := &slowCloser{
closing: make(chan struct{}),
closed: make(chan struct{}),
@@ -5137,7 +5184,7 @@ func TestTransportSlowClose(t *testing.T) {
reqc := make(chan struct{})
go func() {
defer close(reqc)
- res, err := client.Post(st.ts.URL, "text/plain", body)
+ res, err := client.Post(ts.URL, "text/plain", body)
if err != nil {
t.Error(err)
}
@@ -5150,7 +5197,7 @@ func TestTransportSlowClose(t *testing.T) {
<-body.closing // wait for POST request to call body.Close
// This GET request should not be blocked by the in-progress POST.
- res, err := client.Get(st.ts.URL)
+ res, err := client.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -5166,12 +5213,10 @@ func TestTransportDialTLSContext(t *testing.T) {
ClientAuth: tls.RequestClientCert,
}
}
- ts := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {},
- optOnlyServer,
serverTLSConfigFunc,
)
- defer ts.Close()
tr := &Transport{
TLSClientConfig: &tls.Config{
GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) {
@@ -5185,7 +5230,7 @@ func TestTransportDialTLSContext(t *testing.T) {
},
}
defer tr.CloseIdleConnections()
- req, err := http.NewRequest(http.MethodGet, ts.ts.URL, nil)
+ req, err := http.NewRequest(http.MethodGet, ts.URL, nil)
if err != nil {
t.Fatal(err)
}
@@ -5230,12 +5275,10 @@ func TestDialRaceResumesDial(t *testing.T) {
ClientAuth: tls.RequestClientCert,
}
}
- ts := newServerTester(t,
+ ts := newTestServer(t,
func(w http.ResponseWriter, r *http.Request) {},
- optOnlyServer,
serverTLSConfigFunc,
)
- defer ts.Close()
tr := &Transport{
TLSClientConfig: &tls.Config{
GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) {
@@ -5253,7 +5296,7 @@ func TestDialRaceResumesDial(t *testing.T) {
},
}
defer tr.CloseIdleConnections()
- req, err := http.NewRequest(http.MethodGet, ts.ts.URL, nil)
+ req, err := http.NewRequest(http.MethodGet, ts.URL, nil)
if err != nil {
t.Fatal(err)
}
@@ -5336,3 +5379,508 @@ func TestTransportDataAfter1xxHeader(t *testing.T) {
}
tc.wantFrameType(FrameRSTStream)
}
+
+func TestIssue66763Race(t *testing.T) {
+ tr := &Transport{
+ IdleConnTimeout: 1 * time.Nanosecond,
+ AllowHTTP: true, // issue 66763 only occurs when AllowHTTP is true
+ }
+ defer tr.CloseIdleConnections()
+
+ cli, srv := net.Pipe()
+ donec := make(chan struct{})
+ go func() {
+ // Creating the client conn may succeed or fail,
+ // depending on when the idle timeout happens.
+ // Either way, the idle timeout will close the net.Conn.
+ tr.NewClientConn(cli)
+ close(donec)
+ }()
+
+ // The client sends its preface and SETTINGS frame,
+ // and then closes its conn after the idle timeout.
+ io.ReadAll(srv)
+ srv.Close()
+
+ <-donec
+}
+
+// Issue 67671: Sending a Connection: close request on a Transport with AllowHTTP
+// set caused a the transport to wedge.
+func TestIssue67671(t *testing.T) {
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {})
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ AllowHTTP: true,
+ }
+ defer tr.CloseIdleConnections()
+ req, _ := http.NewRequest("GET", ts.URL, nil)
+ req.Close = true
+ for i := 0; i < 2; i++ {
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ }
+}
+
+func TestTransport1xxLimits(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ opt any
+ ctxfn func(context.Context) context.Context
+ hcount int
+ limited bool
+ }{{
+ name: "default",
+ hcount: 10,
+ limited: false,
+ }, {
+ name: "MaxHeaderListSize",
+ opt: func(tr *Transport) {
+ tr.MaxHeaderListSize = 10000
+ },
+ hcount: 10,
+ limited: true,
+ }, {
+ name: "MaxResponseHeaderBytes",
+ opt: func(tr *http.Transport) {
+ tr.MaxResponseHeaderBytes = 10000
+ },
+ hcount: 10,
+ limited: true,
+ }, {
+ name: "limit by client trace",
+ ctxfn: func(ctx context.Context) context.Context {
+ count := 0
+ return httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{
+ Got1xxResponse: func(code int, header textproto.MIMEHeader) error {
+ count++
+ if count >= 10 {
+ return errors.New("too many 1xx")
+ }
+ return nil
+ },
+ })
+ },
+ hcount: 10,
+ limited: true,
+ }, {
+ name: "limit disabled by client trace",
+ opt: func(tr *Transport) {
+ tr.MaxHeaderListSize = 10000
+ },
+ ctxfn: func(ctx context.Context) context.Context {
+ return httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{
+ Got1xxResponse: func(code int, header textproto.MIMEHeader) error {
+ return nil
+ },
+ })
+ },
+ hcount: 20,
+ limited: false,
+ }} {
+ t.Run(test.name, func(t *testing.T) {
+ tc := newTestClientConn(t, test.opt)
+ tc.greet()
+
+ ctx := context.Background()
+ if test.ctxfn != nil {
+ ctx = test.ctxfn(ctx)
+ }
+ req, _ := http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+ tc.wantFrameType(FrameHeaders)
+
+ for i := 0; i < test.hcount; i++ {
+ if fr, err := tc.fr.ReadFrame(); err != os.ErrDeadlineExceeded {
+ t.Fatalf("after writing %v 1xx headers: read %v, %v; want idle", i, fr, err)
+ }
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "103",
+ "x-field", strings.Repeat("a", 1000),
+ ),
+ })
+ }
+ if test.limited {
+ tc.wantFrameType(FrameRSTStream)
+ } else {
+ tc.wantIdle()
+ }
+ })
+ }
+}
+
+func TestTransportSendPingWithReset(t *testing.T) {
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.StrictMaxConcurrentStreams = true
+ })
+
+ const maxConcurrent = 3
+ tc.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent})
+
+ // Start several requests.
+ var rts []*testRoundTrip
+ for i := 0; i < maxConcurrent+1; i++ {
+ req := must(http.NewRequest("GET", "https://dummy.tld/", nil))
+ rt := tc.roundTrip(req)
+ if i >= maxConcurrent {
+ tc.wantIdle()
+ continue
+ }
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt.wantStatus(200)
+ rts = append(rts, rt)
+ }
+
+ // Cancel one request. We send a PING frame along with the RST_STREAM.
+ rts[0].response().Body.Close()
+ tc.wantRSTStream(rts[0].streamID(), ErrCodeCancel)
+ pf := readFrame[*PingFrame](t, tc)
+ tc.wantIdle()
+
+ // Cancel another request. No PING frame, since one is in flight.
+ rts[1].response().Body.Close()
+ tc.wantRSTStream(rts[1].streamID(), ErrCodeCancel)
+ tc.wantIdle()
+
+ // Respond to the PING.
+ // This finalizes the previous resets, and allows the pending request to be sent.
+ tc.writePing(true, pf.Data)
+ tc.wantFrameType(FrameHeaders)
+ tc.wantIdle()
+
+ // Receive a byte of data for the remaining stream, which resets our ability
+ // to send pings (see comment on ClientConn.rstStreamPingsBlocked).
+ tc.writeData(rts[2].streamID(), false, []byte{0})
+
+ // Cancel the last request. We send another PING, since none are in flight.
+ rts[2].response().Body.Close()
+ tc.wantRSTStream(rts[2].streamID(), ErrCodeCancel)
+ tc.wantFrameType(FramePing)
+ tc.wantIdle()
+}
+
+// Issue #70505: gRPC gets upset if we send more than 2 pings per HEADERS/DATA frame
+// sent by the server.
+func TestTransportSendNoMoreThanOnePingWithReset(t *testing.T) {
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ makeAndResetRequest := func() {
+ t.Helper()
+ ctx, cancel := context.WithCancel(context.Background())
+ req := must(http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil))
+ rt := tc.roundTrip(req)
+ tc.wantFrameType(FrameHeaders)
+ cancel()
+ tc.wantRSTStream(rt.streamID(), ErrCodeCancel) // client sends RST_STREAM
+ }
+
+ // Create a request and cancel it.
+ // The client sends a PING frame along with the reset.
+ makeAndResetRequest()
+ pf1 := readFrame[*PingFrame](t, tc) // client sends PING
+
+ // Create another request and cancel it.
+ // We do not send a PING frame along with the reset,
+ // because we haven't received a HEADERS or DATA frame from the server
+ // since the last PING we sent.
+ makeAndResetRequest()
+
+ // Server belatedly responds to request 1.
+ // The server has not responded to our first PING yet.
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+
+ // Create yet another request and cancel it.
+ // We still do not send a PING frame along with the reset.
+ // We've received a HEADERS frame, but it came before the response to the PING.
+ makeAndResetRequest()
+
+ // The server responds to our PING.
+ tc.writePing(true, pf1.Data)
+
+ // Create yet another request and cancel it.
+ // Still no PING frame; we got a response to the previous one,
+ // but no HEADERS or DATA.
+ makeAndResetRequest()
+
+ // Server belatedly responds to the second request.
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: 3,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+
+ // One more request.
+ // This time we send a PING frame.
+ makeAndResetRequest()
+ tc.wantFrameType(FramePing)
+}
+
+func TestTransportConnBecomesUnresponsive(t *testing.T) {
+ // We send a number of requests in series to an unresponsive connection.
+ // Each request is canceled or times out without a response.
+ // Eventually, we open a new connection rather than trying to use the old one.
+ tt := newTestTransport(t)
+
+ const maxConcurrent = 3
+
+ t.Logf("first request opens a new connection and succeeds")
+ req1 := must(http.NewRequest("GET", "https://dummy.tld/", nil))
+ rt1 := tt.roundTrip(req1)
+ tc1 := tt.getConn()
+ tc1.wantFrameType(FrameSettings)
+ tc1.wantFrameType(FrameWindowUpdate)
+ hf1 := readFrame[*HeadersFrame](t, tc1)
+ tc1.writeSettings(Setting{SettingMaxConcurrentStreams, maxConcurrent})
+ tc1.wantFrameType(FrameSettings) // ack
+ tc1.writeHeaders(HeadersFrameParam{
+ StreamID: hf1.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc1.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt1.wantStatus(200)
+ rt1.response().Body.Close()
+
+ // Send more requests.
+ // None receive a response.
+ // Each is canceled.
+ for i := 0; i < maxConcurrent; i++ {
+ t.Logf("request %v receives no response and is canceled", i)
+ ctx, cancel := context.WithCancel(context.Background())
+ req := must(http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil))
+ tt.roundTrip(req)
+ if tt.hasConn() {
+ t.Fatalf("new connection created; expect existing conn to be reused")
+ }
+ tc1.wantFrameType(FrameHeaders)
+ cancel()
+ tc1.wantFrameType(FrameRSTStream)
+ if i == 0 {
+ tc1.wantFrameType(FramePing)
+ }
+ tc1.wantIdle()
+ }
+
+ // The conn has hit its concurrency limit.
+ // The next request is sent on a new conn.
+ req2 := must(http.NewRequest("GET", "https://dummy.tld/", nil))
+ rt2 := tt.roundTrip(req2)
+ tc2 := tt.getConn()
+ tc2.wantFrameType(FrameSettings)
+ tc2.wantFrameType(FrameWindowUpdate)
+ hf := readFrame[*HeadersFrame](t, tc2)
+ tc2.writeSettings(Setting{SettingMaxConcurrentStreams, maxConcurrent})
+ tc2.wantFrameType(FrameSettings) // ack
+ tc2.writeHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc2.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt2.wantStatus(200)
+ rt2.response().Body.Close()
+}
+
+// Test that the Transport can use a conn provided to it by a TLSNextProto hook.
+func TestTransportTLSNextProtoConnOK(t *testing.T) {
+ t1 := &http.Transport{}
+ t2, _ := ConfigureTransports(t1)
+ tt := newTestTransport(t, t2)
+
+ // Create a new, fake connection and pass it to the Transport via the TLSNextProto hook.
+ cli, _ := synctestNetPipe(tt.group)
+ cliTLS := tls.Client(cli, tlsConfigInsecure)
+ go func() {
+ tt.group.Join()
+ t1.TLSNextProto["h2"]("dummy.tld", cliTLS)
+ }()
+ tt.sync()
+ tc := tt.getConn()
+ tc.greet()
+
+ // Send a request on the Transport.
+ // It uses the conn we provided.
+ req := must(http.NewRequest("GET", "https://dummy.tld/", nil))
+ rt := tt.roundTrip(req)
+ tc.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ header: http.Header{
+ ":authority": []string{"dummy.tld"},
+ ":method": []string{"GET"},
+ ":path": []string{"/"},
+ },
+ })
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt.wantStatus(200)
+ rt.wantBody(nil)
+}
+
+// Test the case where a conn provided via a TLSNextProto hook immediately encounters an error.
+func TestTransportTLSNextProtoConnImmediateFailureUsed(t *testing.T) {
+ t1 := &http.Transport{}
+ t2, _ := ConfigureTransports(t1)
+ tt := newTestTransport(t, t2)
+
+ // Create a new, fake connection and pass it to the Transport via the TLSNextProto hook.
+ cli, _ := synctestNetPipe(tt.group)
+ cliTLS := tls.Client(cli, tlsConfigInsecure)
+ go func() {
+ tt.group.Join()
+ t1.TLSNextProto["h2"]("dummy.tld", cliTLS)
+ }()
+ tt.sync()
+ tc := tt.getConn()
+
+ // The connection encounters an error before we send a request that uses it.
+ tc.closeWrite()
+
+ // Send a request on the Transport.
+ //
+ // It should fail, because we have no usable connections, but not with ErrNoCachedConn.
+ req := must(http.NewRequest("GET", "https://dummy.tld/", nil))
+ rt := tt.roundTrip(req)
+ if err := rt.err(); err == nil || errors.Is(err, ErrNoCachedConn) {
+ t.Fatalf("RoundTrip with broken conn: got %v, want an error other than ErrNoCachedConn", err)
+ }
+
+ // Send the request again.
+ // This time it should fail with ErrNoCachedConn,
+ // because the dead conn has been removed from the pool.
+ rt = tt.roundTrip(req)
+ if err := rt.err(); !errors.Is(err, ErrNoCachedConn) {
+ t.Fatalf("RoundTrip after broken conn is used: got %v, want ErrNoCachedConn", err)
+ }
+}
+
+// Test the case where a conn provided via a TLSNextProto hook immediately encounters an error,
+// but no requests are sent which would use the bad connection.
+func TestTransportTLSNextProtoConnImmediateFailureUnused(t *testing.T) {
+ t1 := &http.Transport{}
+ t2, _ := ConfigureTransports(t1)
+ tt := newTestTransport(t, t2)
+
+ // Create a new, fake connection and pass it to the Transport via the TLSNextProto hook.
+ cli, _ := synctestNetPipe(tt.group)
+ cliTLS := tls.Client(cli, tlsConfigInsecure)
+ go func() {
+ tt.group.Join()
+ t1.TLSNextProto["h2"]("dummy.tld", cliTLS)
+ }()
+ tt.sync()
+ tc := tt.getConn()
+
+ // The connection encounters an error before we send a request that uses it.
+ tc.closeWrite()
+
+ // Some time passes.
+ // The dead connection is removed from the pool.
+ tc.advance(10 * time.Second)
+
+ // Send a request on the Transport.
+ //
+ // It should fail with ErrNoCachedConn, because the pool contains no conns.
+ req := must(http.NewRequest("GET", "https://dummy.tld/", nil))
+ rt := tt.roundTrip(req)
+ if err := rt.err(); !errors.Is(err, ErrNoCachedConn) {
+ t.Fatalf("RoundTrip after broken conn expires: got %v, want ErrNoCachedConn", err)
+ }
+}
+
+func TestExtendedConnectClientWithServerSupport(t *testing.T) {
+ disableExtendedConnectProtocol = false
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get(":protocol") != "extended-connect" {
+ t.Fatalf("unexpected :protocol header received")
+ }
+ t.Log(io.Copy(w, r.Body))
+ })
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ AllowHTTP: true,
+ }
+ defer tr.CloseIdleConnections()
+ pr, pw := io.Pipe()
+ pwDone := make(chan struct{})
+ req, _ := http.NewRequest("CONNECT", ts.URL, pr)
+ req.Header.Set(":protocol", "extended-connect")
+ go func() {
+ pw.Write([]byte("hello, extended connect"))
+ pw.Close()
+ close(pwDone)
+ }()
+
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body, err := io.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(body, []byte("hello, extended connect")) {
+ t.Fatal("unexpected body received")
+ }
+}
+
+func TestExtendedConnectClientWithoutServerSupport(t *testing.T) {
+ disableExtendedConnectProtocol = true
+ ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {
+ io.Copy(w, r.Body)
+ })
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ AllowHTTP: true,
+ }
+ defer tr.CloseIdleConnections()
+ pr, pw := io.Pipe()
+ pwDone := make(chan struct{})
+ req, _ := http.NewRequest("CONNECT", ts.URL, pr)
+ req.Header.Set(":protocol", "extended-connect")
+ go func() {
+ pw.Write([]byte("hello, extended connect"))
+ pw.Close()
+ close(pwDone)
+ }()
+
+ _, err := tr.RoundTrip(req)
+ if !errors.Is(err, errExtendedConnectNotSupported) {
+ t.Fatalf("expected error errExtendedConnectNotSupported, got: %v", err)
+ }
+}
diff --git a/http2/unencrypted.go b/http2/unencrypted.go
new file mode 100644
index 0000000000..b2de211613
--- /dev/null
+++ b/http2/unencrypted.go
@@ -0,0 +1,32 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+)
+
+const nextProtoUnencryptedHTTP2 = "unencrypted_http2"
+
+// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn.
+//
+// TLSNextProto functions accept a *tls.Conn.
+//
+// When passing an unencrypted HTTP/2 connection to a TLSNextProto function,
+// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection.
+// To be extra careful about mistakes (accidentally dropping TLS encryption in a place
+// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method
+// that returns the actual connection we want to use.
+func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) {
+ conner, ok := tc.NetConn().(interface {
+ UnencryptedNetConn() net.Conn
+ })
+ if !ok {
+ return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff")
+ }
+ return conner.UnencryptedNetConn(), nil
+}
diff --git a/http2/write.go b/http2/write.go
index 33f61398a1..6ff6bee7e9 100644
--- a/http2/write.go
+++ b/http2/write.go
@@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error {
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+type writePing struct {
+ data [8]byte
+}
+
+func (w writePing) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(false, w.data)
+}
+
+func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
+
type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error {
diff --git a/http2/writesched_priority.go b/http2/writesched_priority.go
index 0a242c669e..f6783339d1 100644
--- a/http2/writesched_priority.go
+++ b/http2/writesched_priority.go
@@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
}
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
- for k := n.kids; k != nil; k = k.next {
- k.setParent(n.parent)
+ for n.kids != nil {
+ n.kids.setParent(n.parent)
}
n.setParent(nil)
delete(ws.nodes, n.id)
diff --git a/http2/writesched_priority_test.go b/http2/writesched_priority_test.go
index b579ef9879..5aad057bea 100644
--- a/http2/writesched_priority_test.go
+++ b/http2/writesched_priority_test.go
@@ -562,3 +562,37 @@ func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) {
t.Error(err)
}
}
+
+// https://go.dev/issue/66514
+func TestPriorityIssue66514(t *testing.T) {
+ addDep := func(ws *priorityWriteScheduler, child uint32, parent uint32) {
+ ws.AdjustStream(child, PriorityParam{
+ StreamDep: parent,
+ Exclusive: false,
+ Weight: 16,
+ })
+ }
+
+ validateDepTree := func(ws *priorityWriteScheduler, id uint32, t *testing.T) {
+ for n := ws.nodes[id]; n != nil; n = n.parent {
+ if n.parent == nil {
+ if n.id != uint32(0) {
+ t.Errorf("detected nodes not parented to 0")
+ }
+ }
+ }
+ }
+
+ ws := NewPriorityWriteScheduler(nil).(*priorityWriteScheduler)
+
+ // Root entry
+ addDep(ws, uint32(1), uint32(0))
+ addDep(ws, uint32(3), uint32(1))
+ addDep(ws, uint32(5), uint32(1))
+
+ for id := uint32(7); id < uint32(100); id += uint32(4) {
+ addDep(ws, id, id-uint32(4))
+ addDep(ws, id+uint32(2), id-uint32(4))
+ validateDepTree(ws, id, t)
+ }
+}
diff --git a/http2/z_spec_test.go b/http2/z_spec_test.go
deleted file mode 100644
index 610b2cdbc2..0000000000
--- a/http2/z_spec_test.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "bytes"
- "encoding/xml"
- "flag"
- "fmt"
- "io"
- "os"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "sync"
- "testing"
-)
-
-var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
-
-// The global map of sentence coverage for the http2 spec.
-var defaultSpecCoverage specCoverage
-
-var loadSpecOnce sync.Once
-
-func loadSpec() {
- if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
- panic(err)
- } else {
- defaultSpecCoverage = readSpecCov(f)
- f.Close()
- }
-}
-
-// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
-// "covered" will be included in report outputted by TestSpecCoverage.
-func covers(sec, sentences string) {
- loadSpecOnce.Do(loadSpec)
- defaultSpecCoverage.cover(sec, sentences)
-}
-
-type specPart struct {
- section string
- sentence string
-}
-
-func (ss specPart) Less(oo specPart) bool {
- atoi := func(s string) int {
- n, err := strconv.Atoi(s)
- if err != nil {
- panic(err)
- }
- return n
- }
- a := strings.Split(ss.section, ".")
- b := strings.Split(oo.section, ".")
- for len(a) > 0 {
- if len(b) == 0 {
- return false
- }
- x, y := atoi(a[0]), atoi(b[0])
- if x == y {
- a, b = a[1:], b[1:]
- continue
- }
- return x < y
- }
- if len(b) > 0 {
- return true
- }
- return false
-}
-
-type bySpecSection []specPart
-
-func (a bySpecSection) Len() int { return len(a) }
-func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
-func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type specCoverage struct {
- coverage map[specPart]bool
- d *xml.Decoder
-}
-
-func joinSection(sec []int) string {
- s := fmt.Sprintf("%d", sec[0])
- for _, n := range sec[1:] {
- s = fmt.Sprintf("%s.%d", s, n)
- }
- return s
-}
-
-func (sc specCoverage) readSection(sec []int) {
- var (
- buf = new(bytes.Buffer)
- sub = 0
- )
- for {
- tk, err := sc.d.Token()
- if err != nil {
- if err == io.EOF {
- return
- }
- panic(err)
- }
- switch v := tk.(type) {
- case xml.StartElement:
- if skipElement(v) {
- if err := sc.d.Skip(); err != nil {
- panic(err)
- }
- if v.Name.Local == "section" {
- sub++
- }
- break
- }
- switch v.Name.Local {
- case "section":
- sub++
- sc.readSection(append(sec, sub))
- case "xref":
- buf.Write(sc.readXRef(v))
- }
- case xml.CharData:
- if len(sec) == 0 {
- break
- }
- buf.Write(v)
- case xml.EndElement:
- if v.Name.Local == "section" {
- sc.addSentences(joinSection(sec), buf.String())
- return
- }
- }
- }
-}
-
-func (sc specCoverage) readXRef(se xml.StartElement) []byte {
- var b []byte
- for {
- tk, err := sc.d.Token()
- if err != nil {
- panic(err)
- }
- switch v := tk.(type) {
- case xml.CharData:
- if b != nil {
- panic("unexpected CharData")
- }
- b = []byte(string(v))
- case xml.EndElement:
- if v.Name.Local != "xref" {
- panic("expected ")
- }
- if b != nil {
- return b
- }
- sig := attrSig(se)
- switch sig {
- case "target":
- return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
- case "fmt-of,rel,target", "fmt-,,rel,target":
- return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
- case "fmt-of,sec,target", "fmt-,,sec,target":
- return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
- case "fmt-of,rel,sec,target":
- return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
- default:
- panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
- }
- default:
- panic(fmt.Sprintf("unexpected tag %q", v))
- }
- }
-}
-
-var skipAnchor = map[string]bool{
- "intro": true,
- "Overview": true,
-}
-
-var skipTitle = map[string]bool{
- "Acknowledgements": true,
- "Change Log": true,
- "Document Organization": true,
- "Conventions and Terminology": true,
-}
-
-func skipElement(s xml.StartElement) bool {
- switch s.Name.Local {
- case "artwork":
- return true
- case "section":
- for _, attr := range s.Attr {
- switch attr.Name.Local {
- case "anchor":
- if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
- return true
- }
- case "title":
- if skipTitle[attr.Value] {
- return true
- }
- }
- }
- }
- return false
-}
-
-func readSpecCov(r io.Reader) specCoverage {
- sc := specCoverage{
- coverage: map[specPart]bool{},
- d: xml.NewDecoder(r)}
- sc.readSection(nil)
- return sc
-}
-
-func (sc specCoverage) addSentences(sec string, sentence string) {
- for _, s := range parseSentences(sentence) {
- sc.coverage[specPart{sec, s}] = false
- }
-}
-
-func (sc specCoverage) cover(sec string, sentence string) {
- for _, s := range parseSentences(sentence) {
- p := specPart{sec, s}
- if _, ok := sc.coverage[p]; !ok {
- panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
- }
- sc.coverage[specPart{sec, s}] = true
- }
-
-}
-
-var whitespaceRx = regexp.MustCompile(`\s+`)
-
-func parseSentences(sens string) []string {
- sens = strings.TrimSpace(sens)
- if sens == "" {
- return nil
- }
- ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
- for i, s := range ss {
- s = strings.TrimSpace(s)
- if !strings.HasSuffix(s, ".") {
- s += "."
- }
- ss[i] = s
- }
- return ss
-}
-
-func TestSpecParseSentences(t *testing.T) {
- tests := []struct {
- ss string
- want []string
- }{
- {"Sentence 1. Sentence 2.",
- []string{
- "Sentence 1.",
- "Sentence 2.",
- }},
- {"Sentence 1. \nSentence 2.\tSentence 3.",
- []string{
- "Sentence 1.",
- "Sentence 2.",
- "Sentence 3.",
- }},
- }
-
- for i, tt := range tests {
- got := parseSentences(tt.ss)
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("%d: got = %q, want %q", i, got, tt.want)
- }
- }
-}
-
-func TestSpecCoverage(t *testing.T) {
- if !*coverSpec {
- t.Skip()
- }
-
- loadSpecOnce.Do(loadSpec)
-
- var (
- list []specPart
- cv = defaultSpecCoverage.coverage
- total = len(cv)
- complete = 0
- )
-
- for sp, touched := range defaultSpecCoverage.coverage {
- if touched {
- complete++
- } else {
- list = append(list, sp)
- }
- }
- sort.Stable(bySpecSection(list))
-
- if testing.Short() && len(list) > 5 {
- list = list[:5]
- }
-
- for _, p := range list {
- t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
- }
-
- t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100)
-}
-
-func attrSig(se xml.StartElement) string {
- var names []string
- for _, attr := range se.Attr {
- if attr.Name.Local == "fmt" {
- names = append(names, "fmt-"+attr.Value)
- } else {
- names = append(names, attr.Name.Local)
- }
- }
- sort.Strings(names)
- return strings.Join(names, ",")
-}
-
-func attrValue(se xml.StartElement, attr string) string {
- for _, a := range se.Attr {
- if a.Name.Local == attr {
- return a.Value
- }
- }
- panic("unknown attribute " + attr)
-}
-
-func TestSpecPartLess(t *testing.T) {
- tests := []struct {
- sec1, sec2 string
- want bool
- }{
- {"6.2.1", "6.2", false},
- {"6.2", "6.2.1", true},
- {"6.10", "6.10.1", true},
- {"6.10", "6.1.1", false}, // 10, not 1
- {"6.1", "6.1", false}, // equal, so not less
- }
- for _, tt := range tests {
- got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
- if got != tt.want {
- t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
- }
- }
-}
diff --git a/internal/iana/gen.go b/internal/iana/gen.go
index 0fe65d8998..b4470baa75 100644
--- a/internal/iana/gen.go
+++ b/internal/iana/gen.go
@@ -16,7 +16,6 @@ import (
"fmt"
"go/format"
"io"
- "io/ioutil"
"net/http"
"os"
"strconv"
@@ -69,7 +68,7 @@ func main() {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
- if err := ioutil.WriteFile("const.go", b, 0644); err != nil {
+ if err := os.WriteFile("const.go", b, 0644); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
diff --git a/internal/socket/socket_test.go b/internal/socket/socket_test.go
index faba106063..44c196b014 100644
--- a/internal/socket/socket_test.go
+++ b/internal/socket/socket_test.go
@@ -9,7 +9,6 @@ package socket_test
import (
"bytes"
"fmt"
- "io/ioutil"
"net"
"os"
"os/exec"
@@ -446,7 +445,7 @@ func main() {
if runtime.Compiler == "gccgo" {
t.Skip("skipping race test when built with gccgo")
}
- dir, err := ioutil.TempDir("", "testrace")
+ dir, err := os.MkdirTemp("", "testrace")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
@@ -463,7 +462,7 @@ func main() {
for i, test := range tests {
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
src := filepath.Join(dir, fmt.Sprintf("test%d.go", i))
- if err := ioutil.WriteFile(src, []byte(test), 0644); err != nil {
+ if err := os.WriteFile(src, []byte(test), 0644); err != nil {
t.Fatalf("failed to write file: %v", err)
}
t.Logf("%s run -race %s", goBinary, src)
diff --git a/internal/socket/zsys_openbsd_ppc64.go b/internal/socket/zsys_openbsd_ppc64.go
index cebde7634f..3c9576e2d8 100644
--- a/internal/socket/zsys_openbsd_ppc64.go
+++ b/internal/socket/zsys_openbsd_ppc64.go
@@ -4,27 +4,27 @@
package socket
type iovec struct {
- Base *byte
- Len uint64
+ Base *byte
+ Len uint64
}
type msghdr struct {
- Name *byte
- Namelen uint32
- Iov *iovec
- Iovlen uint32
- Control *byte
- Controllen uint32
- Flags int32
+ Name *byte
+ Namelen uint32
+ Iov *iovec
+ Iovlen uint32
+ Control *byte
+ Controllen uint32
+ Flags int32
}
type cmsghdr struct {
- Len uint32
- Level int32
- Type int32
+ Len uint32
+ Level int32
+ Type int32
}
const (
- sizeofIovec = 0x10
- sizeofMsghdr = 0x30
+ sizeofIovec = 0x10
+ sizeofMsghdr = 0x30
)
diff --git a/internal/socket/zsys_openbsd_riscv64.go b/internal/socket/zsys_openbsd_riscv64.go
index cebde7634f..3c9576e2d8 100644
--- a/internal/socket/zsys_openbsd_riscv64.go
+++ b/internal/socket/zsys_openbsd_riscv64.go
@@ -4,27 +4,27 @@
package socket
type iovec struct {
- Base *byte
- Len uint64
+ Base *byte
+ Len uint64
}
type msghdr struct {
- Name *byte
- Namelen uint32
- Iov *iovec
- Iovlen uint32
- Control *byte
- Controllen uint32
- Flags int32
+ Name *byte
+ Namelen uint32
+ Iov *iovec
+ Iovlen uint32
+ Control *byte
+ Controllen uint32
+ Flags int32
}
type cmsghdr struct {
- Len uint32
- Level int32
- Type int32
+ Len uint32
+ Level int32
+ Type int32
}
const (
- sizeofIovec = 0x10
- sizeofMsghdr = 0x30
+ sizeofIovec = 0x10
+ sizeofMsghdr = 0x30
)
diff --git a/ipv4/gen.go b/ipv4/gen.go
index 121c7643e9..f0182be2da 100644
--- a/ipv4/gen.go
+++ b/ipv4/gen.go
@@ -17,7 +17,6 @@ import (
"fmt"
"go/format"
"io"
- "io/ioutil"
"net/http"
"os"
"os/exec"
@@ -61,7 +60,7 @@ func genzsys() error {
case "freebsd", "linux":
zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
}
- if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
+ if err := os.WriteFile(zsys, b, 0644); err != nil {
return err
}
return nil
@@ -100,7 +99,7 @@ func geniana() error {
if err != nil {
return err
}
- if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
+ if err := os.WriteFile("iana.go", b, 0644); err != nil {
return err
}
return nil
diff --git a/ipv6/gen.go b/ipv6/gen.go
index 2973dff5ce..590568a113 100644
--- a/ipv6/gen.go
+++ b/ipv6/gen.go
@@ -17,7 +17,6 @@ import (
"fmt"
"go/format"
"io"
- "io/ioutil"
"net/http"
"os"
"os/exec"
@@ -61,7 +60,7 @@ func genzsys() error {
case "freebsd", "linux":
zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
}
- if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
+ if err := os.WriteFile(zsys, b, 0644); err != nil {
return err
}
return nil
@@ -100,7 +99,7 @@ func geniana() error {
if err != nil {
return err
}
- if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
+ if err := os.WriteFile("iana.go", b, 0644); err != nil {
return err
}
return nil
diff --git a/nettest/conntest.go b/nettest/conntest.go
index 615f4980c5..4297d408c0 100644
--- a/nettest/conntest.go
+++ b/nettest/conntest.go
@@ -8,7 +8,6 @@ import (
"bytes"
"encoding/binary"
"io"
- "io/ioutil"
"math/rand"
"net"
"runtime"
@@ -173,7 +172,7 @@ func testRacyRead(t *testing.T, c1, c2 net.Conn) {
// testRacyWrite tests that it is safe to mutate the input Write buffer
// immediately after cancelation has occurred.
func testRacyWrite(t *testing.T, c1, c2 net.Conn) {
- go chunkedCopy(ioutil.Discard, c2)
+ go chunkedCopy(io.Discard, c2)
var wg sync.WaitGroup
defer wg.Wait()
@@ -200,7 +199,7 @@ func testRacyWrite(t *testing.T, c1, c2 net.Conn) {
// testReadTimeout tests that Read timeouts do not affect Write.
func testReadTimeout(t *testing.T, c1, c2 net.Conn) {
- go chunkedCopy(ioutil.Discard, c2)
+ go chunkedCopy(io.Discard, c2)
c1.SetReadDeadline(aLongTimeAgo)
_, err := c1.Read(make([]byte, 1024))
diff --git a/nettest/nettest.go b/nettest/nettest.go
index 3656c3c54b..37e6dcb1b4 100644
--- a/nettest/nettest.go
+++ b/nettest/nettest.go
@@ -8,7 +8,6 @@ package nettest
import (
"errors"
"fmt"
- "io/ioutil"
"net"
"os"
"os/exec"
@@ -226,7 +225,7 @@ func LocalPath() (string, error) {
if runtime.GOOS == "darwin" {
dir = "/tmp"
}
- f, err := ioutil.TempFile(dir, "go-nettest")
+ f, err := os.CreateTemp(dir, "go-nettest")
if err != nil {
return "", err
}
diff --git a/proxy/per_host.go b/proxy/per_host.go
index 573fe79e86..d7d4b8b6e3 100644
--- a/proxy/per_host.go
+++ b/proxy/per_host.go
@@ -137,9 +137,7 @@ func (p *PerHost) AddNetwork(net *net.IPNet) {
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
// "example.com" matches "example.com" and all of its subdomains.
func (p *PerHost) AddZone(zone string) {
- if strings.HasSuffix(zone, ".") {
- zone = zone[:len(zone)-1]
- }
+ zone = strings.TrimSuffix(zone, ".")
if !strings.HasPrefix(zone, ".") {
zone = "." + zone
}
@@ -148,8 +146,6 @@ func (p *PerHost) AddZone(zone string) {
// AddHost specifies a host name that will use the bypass proxy.
func (p *PerHost) AddHost(host string) {
- if strings.HasSuffix(host, ".") {
- host = host[:len(host)-1]
- }
+ host = strings.TrimSuffix(host, ".")
p.bypassHosts = append(p.bypassHosts, host)
}
diff --git a/publicsuffix/gen.go b/publicsuffix/gen.go
index 21c191415f..7f7d08dbc2 100644
--- a/publicsuffix/gen.go
+++ b/publicsuffix/gen.go
@@ -26,7 +26,6 @@ import (
"fmt"
"go/format"
"io"
- "io/ioutil"
"net/http"
"os"
"regexp"
@@ -298,7 +297,7 @@ func generate(p func(io.Writer, *node) error, root *node, filename string) error
if err != nil {
return err
}
- return ioutil.WriteFile(filename, b, 0644)
+ return os.WriteFile(filename, b, 0644)
}
func gitCommit() (sha, date string, retErr error) {
@@ -310,7 +309,7 @@ func gitCommit() (sha, date string, retErr error) {
return "", "", fmt.Errorf("bad GET status for %s: %s", gitCommitURL, res.Status)
}
defer res.Body.Close()
- b, err := ioutil.ReadAll(res.Body)
+ b, err := io.ReadAll(res.Body)
if err != nil {
return "", "", err
}
diff --git a/quic/conn.go b/quic/conn.go
index 38e8fe8f4e..bf54409bfe 100644
--- a/quic/conn.go
+++ b/quic/conn.go
@@ -176,6 +176,16 @@ func (c *Conn) String() string {
return fmt.Sprintf("quic.Conn(%v,->%v)", c.side, c.peerAddr)
}
+// LocalAddr returns the local network address, if known.
+func (c *Conn) LocalAddr() netip.AddrPort {
+ return c.localAddr
+}
+
+// RemoteAddr returns the remote network address, if known.
+func (c *Conn) RemoteAddr() netip.AddrPort {
+ return c.peerAddr
+}
+
// confirmHandshake is called when the handshake is confirmed.
// https://www.rfc-editor.org/rfc/rfc9001#section-4.1.2
func (c *Conn) confirmHandshake(now time.Time) {
@@ -206,6 +216,9 @@ func (c *Conn) confirmHandshake(now time.Time) {
// discardKeys discards unused packet protection keys.
// https://www.rfc-editor.org/rfc/rfc9001#section-4.9
func (c *Conn) discardKeys(now time.Time, space numberSpace) {
+ if err := c.crypto[space].discardKeys(); err != nil {
+ c.abort(now, err)
+ }
switch space {
case initialSpace:
c.keysInitial.discard()
diff --git a/quic/conn_recv.go b/quic/conn_recv.go
index b1354cd3a1..dbfe34a343 100644
--- a/quic/conn_recv.go
+++ b/quic/conn_recv.go
@@ -285,6 +285,7 @@ func (c *Conn) handleFrames(now time.Time, dgram *datagram, ptype packetType, sp
__01 = packetType0RTT | packetType1RTT
___1 = packetType1RTT
)
+ hasCrypto := false
for len(payload) > 0 {
switch payload[0] {
case frameTypePadding, frameTypeAck, frameTypeAckECN,
@@ -322,6 +323,7 @@ func (c *Conn) handleFrames(now time.Time, dgram *datagram, ptype packetType, sp
if !frameOK(c, ptype, IH_1) {
return
}
+ hasCrypto = true
n = c.handleCryptoFrame(now, space, payload)
case frameTypeNewToken:
if !frameOK(c, ptype, ___1) {
@@ -406,6 +408,15 @@ func (c *Conn) handleFrames(now time.Time, dgram *datagram, ptype packetType, sp
}
payload = payload[n:]
}
+ if hasCrypto {
+ // Process TLS events after handling all frames in a packet.
+ // TLS events can cause us to drop state for a number space,
+ // so do that last, to avoid handling frames differently
+ // depending on whether they come before or after a CRYPTO frame.
+ if err := c.handleTLSEvents(now); err != nil {
+ c.abort(now, err)
+ }
+ }
return ackEliciting
}
diff --git a/quic/conn_recv_test.go b/quic/conn_recv_test.go
new file mode 100644
index 0000000000..0e94731bf7
--- /dev/null
+++ b/quic/conn_recv_test.go
@@ -0,0 +1,60 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "crypto/tls"
+ "testing"
+)
+
+func TestConnReceiveAckForUnsentPacket(t *testing.T) {
+ tc := newTestConn(t, serverSide, permissiveTransportParameters)
+ tc.handshake()
+ tc.writeFrames(packetType1RTT,
+ debugFrameAck{
+ ackDelay: 0,
+ ranges: []i64range[packetNumber]{{0, 10}},
+ })
+ tc.wantFrame("ACK for unsent packet causes CONNECTION_CLOSE",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errProtocolViolation,
+ })
+}
+
+// Issue #70703: If a packet contains both a CRYPTO frame which causes us to
+// drop state for a number space, and also contains a valid ACK frame for that space,
+// we shouldn't complain about the ACK.
+func TestConnReceiveAckForDroppedSpace(t *testing.T) {
+ tc := newTestConn(t, serverSide, permissiveTransportParameters)
+ tc.ignoreFrame(frameTypeAck)
+ tc.ignoreFrame(frameTypeNewConnectionID)
+
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ tc.wantFrame("send Initial crypto",
+ packetTypeInitial, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelInitial],
+ })
+ tc.wantFrame("send Handshake crypto",
+ packetTypeHandshake, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelHandshake],
+ })
+
+ tc.writeFrames(packetTypeHandshake,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelHandshake],
+ },
+ debugFrameAck{
+ ackDelay: 0,
+ ranges: []i64range[packetNumber]{{0, tc.lastPacket.num + 1}},
+ })
+ tc.wantFrame("handshake finishes",
+ packetType1RTT, debugFrameHandshakeDone{})
+ tc.wantIdle("connection is idle")
+}
diff --git a/quic/crypto_stream.go b/quic/crypto_stream.go
index a4dcb32eb7..806c963943 100644
--- a/quic/crypto_stream.go
+++ b/quic/crypto_stream.go
@@ -139,3 +139,21 @@ func (s *cryptoStream) sendData(off int64, b []byte) {
s.out.copy(off, b)
s.outunsent.sub(off, off+int64(len(b)))
}
+
+// discardKeys is called when the packet protection keys for the stream are dropped.
+func (s *cryptoStream) discardKeys() error {
+ if s.in.end-s.in.start != 0 {
+ // The peer sent some unprocessed CRYPTO data that we're about to discard.
+ // Close the connetion with a TLS unexpected_message alert.
+ // https://www.rfc-editor.org/rfc/rfc5246#section-7.2.2
+ const unexpectedMessage = 10
+ return localTransportError{
+ code: errTLSBase + unexpectedMessage,
+ reason: "excess crypto data",
+ }
+ }
+ // Discard any unacked (but presumably received) data in our output buffer.
+ s.out.discardBefore(s.out.end)
+ *s = cryptoStream{}
+ return nil
+}
diff --git a/quic/endpoint_test.go b/quic/endpoint_test.go
index d5f436e6d7..dc1c510971 100644
--- a/quic/endpoint_test.go
+++ b/quic/endpoint_test.go
@@ -13,6 +13,7 @@ import (
"io"
"log/slog"
"net/netip"
+ "runtime"
"testing"
"time"
@@ -23,6 +24,12 @@ func TestConnect(t *testing.T) {
newLocalConnPair(t, &Config{}, &Config{})
}
+func TestConnectDefaultTLSConfig(t *testing.T) {
+ serverConfig := newTestTLSConfigWithMoreDefaults(serverSide)
+ clientConfig := newTestTLSConfigWithMoreDefaults(clientSide)
+ newLocalConnPair(t, &Config{TLSConfig: serverConfig}, &Config{TLSConfig: clientConfig})
+}
+
func TestStreamTransfer(t *testing.T) {
ctx := context.Background()
cli, srv := newLocalConnPair(t, &Config{}, &Config{})
@@ -63,6 +70,10 @@ func TestStreamTransfer(t *testing.T) {
}
func newLocalConnPair(t testing.TB, conf1, conf2 *Config) (clientConn, serverConn *Conn) {
+ switch runtime.GOOS {
+ case "plan9":
+ t.Skipf("ReadMsgUDP not supported on %s", runtime.GOOS)
+ }
t.Helper()
ctx := context.Background()
e1 := newLocalEndpoint(t, serverSide, conf1)
diff --git a/quic/main_test.go b/quic/main_test.go
index ecd0b1e9f6..25e0096e43 100644
--- a/quic/main_test.go
+++ b/quic/main_test.go
@@ -16,6 +16,20 @@ import (
)
func TestMain(m *testing.M) {
+ // Add all goroutines running at the start of the test to the set
+ // of not-leaked goroutines. This includes TestMain, and anything else
+ // that might have been started by test infrastructure.
+ skip := [][]byte{
+ []byte("created by os/signal.Notify"),
+ []byte("gotraceback_test.go"),
+ }
+ buf := make([]byte, 2<<20)
+ buf = buf[:runtime.Stack(buf, true)]
+ for _, g := range bytes.Split(buf, []byte("\n\n")) {
+ id, _, _ := bytes.Cut(g, []byte("["))
+ skip = append(skip, id)
+ }
+
defer os.Exit(m.Run())
// Look for leaked goroutines.
@@ -34,12 +48,13 @@ func TestMain(m *testing.M) {
buf = buf[:runtime.Stack(buf, true)]
leaked := false
for _, g := range bytes.Split(buf, []byte("\n\n")) {
- if bytes.Contains(g, []byte("quic.TestMain")) ||
- bytes.Contains(g, []byte("created by os/signal.Notify")) ||
- bytes.Contains(g, []byte("gotraceback_test.go")) {
- continue
- }
leaked = true
+ for _, s := range skip {
+ if bytes.Contains(g, s) {
+ leaked = false
+ break
+ }
+ }
}
if !leaked {
break
diff --git a/quic/packet_protection.go b/quic/packet_protection.go
index 1f939f491d..fe48c14c5d 100644
--- a/quic/packet_protection.go
+++ b/quic/packet_protection.go
@@ -351,7 +351,13 @@ func (k *updatingKeyPair) init() {
// We perform the first key update early in the connection so a peer
// which does not support key updates will fail rapidly,
// rather than after the connection has been long established.
- k.updateAfter = 1000
+ //
+ // The QUIC interop runner "keyupdate" test requires that the client
+ // initiate a key rotation early in the connection. Increasing this
+ // value may cause interop test failures; if we do want to increase it,
+ // we should either skip the keyupdate test or provide a way to override
+ // the setting in interop tests.
+ k.updateAfter = 100
}
func (k *updatingKeyPair) canRead() bool {
diff --git a/quic/tls.go b/quic/tls.go
index e2f2e5bde1..89b31842cd 100644
--- a/quic/tls.go
+++ b/quic/tls.go
@@ -119,11 +119,7 @@ func (c *Conn) handleCrypto(now time.Time, space numberSpace, off int64, data []
default:
return errors.New("quic: internal error: received CRYPTO frame in unexpected number space")
}
- err := c.crypto[space].handleCrypto(off, data, func(b []byte) error {
+ return c.crypto[space].handleCrypto(off, data, func(b []byte) error {
return c.tls.HandleData(level, b)
})
- if err != nil {
- return err
- }
- return c.handleTLSEvents(now)
}
diff --git a/quic/tls_test.go b/quic/tls_test.go
index 9c1dd364ec..f4abdda582 100644
--- a/quic/tls_test.go
+++ b/quic/tls_test.go
@@ -615,3 +615,32 @@ func TestConnAEADLimitReached(t *testing.T) {
tc.advance(1 * time.Second)
tc.wantIdle("auth failures at limit: conn does not process additional packets")
}
+
+func TestConnKeysDiscardedWithExcessCryptoData(t *testing.T) {
+ tc := newTestConn(t, serverSide, permissiveTransportParameters)
+ tc.ignoreFrame(frameTypeAck)
+ tc.ignoreFrame(frameTypeNewConnectionID)
+ tc.ignoreFrame(frameTypeCrypto)
+
+ // One byte of excess CRYPTO data, separated from the valid data by a one-byte gap.
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ off: int64(len(tc.cryptoDataIn[tls.QUICEncryptionLevelInitial]) + 1),
+ data: []byte{0},
+ })
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+
+ // We don't drop the Initial keys and discover the excess data until the client
+ // sends a Handshake packet.
+ tc.writeFrames(packetTypeHandshake,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelHandshake],
+ })
+ tc.wantFrame("connection closed due to excess Initial CRYPTO data",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errTLSBase + 10,
+ })
+}
diff --git a/quic/tlsconfig_test.go b/quic/tlsconfig_test.go
index 47bfb05983..5ed9818d57 100644
--- a/quic/tlsconfig_test.go
+++ b/quic/tlsconfig_test.go
@@ -20,6 +20,13 @@ func newTestTLSConfig(side connSide) *tls.Config {
tls.TLS_CHACHA20_POLY1305_SHA256,
},
MinVersion: tls.VersionTLS13,
+ // Default key exchange mechanisms as of Go 1.23 minus X25519Kyber768Draft00,
+ // which bloats the client hello enough to spill into a second datagram.
+ // Tests were written with the assuption each flight in the handshake
+ // fits in one datagram, and it's simpler to keep that property.
+ CurvePreferences: []tls.CurveID{
+ tls.X25519, tls.CurveP256, tls.CurveP384, tls.CurveP521,
+ },
}
if side == serverSide {
config.Certificates = []tls.Certificate{testCert}
@@ -27,6 +34,18 @@ func newTestTLSConfig(side connSide) *tls.Config {
return config
}
+// newTestTLSConfigWithMoreDefaults returns a *tls.Config for testing
+// which behaves more like a default, empty config.
+//
+// In particular, it uses the default curve preferences, which can increase
+// the size of the handshake.
+func newTestTLSConfigWithMoreDefaults(side connSide) *tls.Config {
+ config := newTestTLSConfig(side)
+ config.CipherSuites = nil
+ config.CurvePreferences = nil
+ return config
+}
+
var testCert = func() tls.Certificate {
cert, err := tls.X509KeyPair(localhostCert, localhostKey)
if err != nil {
diff --git a/quic/udp_test.go b/quic/udp_test.go
index d3732c140e..5c4ba10fcc 100644
--- a/quic/udp_test.go
+++ b/quic/udp_test.go
@@ -129,6 +129,8 @@ func runUDPTest(t *testing.T, f func(t *testing.T, u udpTest)) {
if test.srcNet == "udp6" && test.dstNet == "udp" {
t.Skipf("%v: no support for mapping IPv4 address to IPv6", runtime.GOOS)
}
+ case "plan9":
+ t.Skipf("ReadMsgUDP not supported on %s", runtime.GOOS)
}
if runtime.GOARCH == "wasm" && test.srcNet == "udp6" {
t.Skipf("%v: IPv6 tests fail when using wasm fake net", runtime.GOARCH)
diff --git a/route/address.go b/route/address.go
index 5443d67223..b649f43141 100644
--- a/route/address.go
+++ b/route/address.go
@@ -170,20 +170,37 @@ func (a *Inet6Addr) marshal(b []byte) (int, error) {
// parseInetAddr parses b as an internet address for IPv4 or IPv6.
func parseInetAddr(af int, b []byte) (Addr, error) {
+ const (
+ off4 = 4 // offset of in_addr
+ off6 = 8 // offset of in6_addr
+ )
switch af {
case syscall.AF_INET:
- if len(b) < sizeofSockaddrInet {
+ if len(b) < (off4+1) || len(b) < int(b[0]) || b[0] == 0 {
return nil, errInvalidAddr
}
+ sockAddrLen := int(b[0])
a := &Inet4Addr{}
- copy(a.IP[:], b[4:8])
+ n := off4 + 4
+ if sockAddrLen < n {
+ n = sockAddrLen
+ }
+ copy(a.IP[:], b[off4:n])
return a, nil
case syscall.AF_INET6:
- if len(b) < sizeofSockaddrInet6 {
+ if len(b) < (off6+1) || len(b) < int(b[0]) || b[0] == 0 {
return nil, errInvalidAddr
}
- a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))}
- copy(a.IP[:], b[8:24])
+ sockAddrLen := int(b[0])
+ n := off6 + 16
+ if sockAddrLen < n {
+ n = sockAddrLen
+ }
+ a := &Inet6Addr{}
+ if sockAddrLen == sizeofSockaddrInet6 {
+ a.ZoneID = int(nativeEndian.Uint32(b[24:28]))
+ }
+ copy(a.IP[:], b[off6:n])
if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) {
// KAME based IPv6 protocol stack usually
// embeds the interface index in the
@@ -387,12 +404,16 @@ func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) (
}
b = b[l:]
case syscall.AF_INET, syscall.AF_INET6:
- af = int(b[1])
- a, err := parseInetAddr(af, b)
- if err != nil {
- return nil, err
+ // #70528: if the sockaddrlen is 0, no address to parse inside,
+ // skip over the record.
+ if b[0] > 0 {
+ af = int(b[1])
+ a, err := parseInetAddr(af, b)
+ if err != nil {
+ return nil, err
+ }
+ as[i] = a
}
- as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
diff --git a/route/address_darwin_test.go b/route/address_darwin_test.go
index b819183a78..add72e37ec 100644
--- a/route/address_darwin_test.go
+++ b/route/address_darwin_test.go
@@ -42,6 +42,112 @@ var parseAddrsOnDarwinLittleEndianTests = []parseAddrsOnDarwinTest{
nil,
},
},
+ {
+ syscall.RTA_DST | syscall.RTA_GATEWAY | syscall.RTA_NETMASK,
+ parseKernelInetAddr,
+ []byte{
+ 0x10, 0x02, 0x00, 0x00, 0x64, 0x71, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x14, 0x12, 0x21, 0x00, 0x01, 0x08, 0x00, 0x00,
+ 0x75, 0x74, 0x75, 0x6e, 0x34, 0x33, 0x31, 0x39,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x06, 0x02, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
+ },
+ []Addr{
+ &Inet4Addr{IP: [4]byte{100, 113, 0, 0}},
+ &LinkAddr{Index: 33, Name: "utun4319"},
+ &Inet4Addr{IP: [4]byte{255, 255, 0, 0}},
+ nil,
+ nil,
+ nil,
+ nil,
+ nil,
+ },
+ },
+ // route -n add -inet6 fd84:1b4e:6281:: -prefixlen 48 fe80::f22f:4bff:fe09:3bff%utun4319
+ // gw fe80:0000:0000:0000:f22f:4bff:fe09:3bff
+ {
+ syscall.RTA_DST | syscall.RTA_GATEWAY | syscall.RTA_NETMASK,
+ parseKernelInetAddr,
+ []byte{
+ 0x1c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfd, 0x84, 0x1b, 0x4e, 0x62, 0x81, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x1c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfe, 0x80, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00,
+ 0xf2, 0x2f, 0x4b, 0xff, 0xfe, 0x09, 0x3b, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x0e, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ },
+ []Addr{
+ &Inet6Addr{IP: [16]byte{0xfd, 0x84, 0x1b, 0x4e, 0x62, 0x81}},
+ &Inet6Addr{IP: [16]byte{0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x2f, 0x4b, 0xff, 0xfe, 0x09, 0x3b, 0xff}, ZoneID: 33},
+ &Inet6Addr{IP: [16]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ nil,
+ nil,
+ nil,
+ nil,
+ nil,
+ },
+ },
+ // golang/go#70528, the kernel can produce addresses of length 0
+ {
+ syscall.RTA_DST | syscall.RTA_GATEWAY | syscall.RTA_NETMASK,
+ parseKernelInetAddr,
+ []byte{
+ 0x00, 0x1e, 0x00, 0x00,
+
+ 0x1c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfe, 0x80, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00,
+ 0xf2, 0x2f, 0x4b, 0xff, 0xfe, 0x09, 0x3b, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x0e, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ },
+ []Addr{
+ nil,
+ &Inet6Addr{IP: [16]byte{0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x2f, 0x4b, 0xff, 0xfe, 0x09, 0x3b, 0xff}, ZoneID: 33},
+ &Inet6Addr{IP: [16]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ nil,
+ nil,
+ nil,
+ nil,
+ nil,
+ },
+ },
+ // Additional case: golang/go/issues/70528#issuecomment-2498692877
+ {
+ syscall.RTA_DST | syscall.RTA_GATEWAY | syscall.RTA_NETMASK,
+ parseKernelInetAddr,
+ []byte{
+ 0x84, 0x00, 0x05, 0x04, 0x01, 0x00, 0x00, 0x00, 0x03, 0x08, 0x00, 0x01, 0x15, 0x00, 0x00, 0x00,
+ 0x1B, 0x01, 0x00, 0x00, 0xF5, 0x5A, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x02, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+ 0x14, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ []Addr{
+ &Inet4Addr{IP: [4]byte{0x0, 0x0, 0x0, 0x0}},
+ nil,
+ nil,
+ nil,
+ nil,
+ nil,
+ nil,
+ nil,
+ },
+ },
}
func TestParseAddrsOnDarwin(t *testing.T) {
diff --git a/route/defs_darwin.go b/route/defs_darwin.go
index ec56ca02e1..46a4ed6694 100644
--- a/route/defs_darwin.go
+++ b/route/defs_darwin.go
@@ -24,14 +24,10 @@ const (
sizeofIfmaMsghdrDarwin15 = C.sizeof_struct_ifma_msghdr
sizeofIfMsghdr2Darwin15 = C.sizeof_struct_if_msghdr2
sizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2
- sizeofIfDataDarwin15 = C.sizeof_struct_if_data
- sizeofIfData64Darwin15 = C.sizeof_struct_if_data64
sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr
sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2
- sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics
- sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
- sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
- sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
+ sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)
diff --git a/route/defs_dragonfly.go b/route/defs_dragonfly.go
index 9bf202dda4..52aa700a6d 100644
--- a/route/defs_dragonfly.go
+++ b/route/defs_dragonfly.go
@@ -47,10 +47,8 @@ const (
sizeofIfaMsghdrDragonFlyBSD58 = C.sizeof_struct_ifa_msghdr_dfly58
- sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr
- sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics
+ sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr
- sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
- sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
- sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
+ sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)
diff --git a/route/defs_freebsd.go b/route/defs_freebsd.go
index abb2dc0957..68778f2d16 100644
--- a/route/defs_freebsd.go
+++ b/route/defs_freebsd.go
@@ -220,7 +220,6 @@ import "C"
const (
sizeofIfMsghdrlFreeBSD10 = C.sizeof_struct_if_msghdrl
sizeofIfaMsghdrFreeBSD10 = C.sizeof_struct_ifa_msghdr
- sizeofIfaMsghdrlFreeBSD10 = C.sizeof_struct_ifa_msghdrl
sizeofIfmaMsghdrFreeBSD10 = C.sizeof_struct_ifma_msghdr
sizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr
@@ -233,15 +232,7 @@ const (
sizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10
sizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11
- sizeofIfDataFreeBSD7 = C.sizeof_struct_if_data_freebsd7
- sizeofIfDataFreeBSD8 = C.sizeof_struct_if_data_freebsd8
- sizeofIfDataFreeBSD9 = C.sizeof_struct_if_data_freebsd9
- sizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10
- sizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11
-
- sizeofIfMsghdrlFreeBSD10Emu = C.sizeof_struct_if_msghdrl
sizeofIfaMsghdrFreeBSD10Emu = C.sizeof_struct_ifa_msghdr
- sizeofIfaMsghdrlFreeBSD10Emu = C.sizeof_struct_ifa_msghdrl
sizeofIfmaMsghdrFreeBSD10Emu = C.sizeof_struct_ifma_msghdr
sizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr
@@ -254,13 +245,6 @@ const (
sizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10
sizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11
- sizeofIfDataFreeBSD7Emu = C.sizeof_struct_if_data_freebsd7
- sizeofIfDataFreeBSD8Emu = C.sizeof_struct_if_data_freebsd8
- sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9
- sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10
- sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11
-
- sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
- sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
- sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
+ sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)
diff --git a/route/defs_netbsd.go b/route/defs_netbsd.go
index 8e89934c5a..fb60f43c83 100644
--- a/route/defs_netbsd.go
+++ b/route/defs_netbsd.go
@@ -23,10 +23,8 @@ const (
sizeofIfaMsghdrNetBSD7 = C.sizeof_struct_ifa_msghdr
sizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr
- sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr
- sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics
+ sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr
- sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
- sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
- sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
+ sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)
diff --git a/route/defs_openbsd.go b/route/defs_openbsd.go
index 8f3218bc63..471558d9ef 100644
--- a/route/defs_openbsd.go
+++ b/route/defs_openbsd.go
@@ -21,7 +21,6 @@ import "C"
const (
sizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
- sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
- sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
+ sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)
diff --git a/route/zsys_darwin.go b/route/zsys_darwin.go
index 56a0c66f44..adaa460026 100644
--- a/route/zsys_darwin.go
+++ b/route/zsys_darwin.go
@@ -9,14 +9,10 @@ const (
sizeofIfmaMsghdrDarwin15 = 0x10
sizeofIfMsghdr2Darwin15 = 0xa0
sizeofIfmaMsghdr2Darwin15 = 0x14
- sizeofIfDataDarwin15 = 0x60
- sizeofIfData64Darwin15 = 0x80
sizeofRtMsghdrDarwin15 = 0x5c
sizeofRtMsghdr2Darwin15 = 0x5c
- sizeofRtMetricsDarwin15 = 0x38
- sizeofSockaddrStorage = 0x80
- sizeofSockaddrInet = 0x10
- sizeofSockaddrInet6 = 0x1c
+ sizeofSockaddrInet = 0x10
+ sizeofSockaddrInet6 = 0x1c
)
diff --git a/route/zsys_dragonfly.go b/route/zsys_dragonfly.go
index f7c7a60cd6..209cb20af8 100644
--- a/route/zsys_dragonfly.go
+++ b/route/zsys_dragonfly.go
@@ -11,10 +11,8 @@ const (
sizeofIfaMsghdrDragonFlyBSD58 = 0x18
- sizeofRtMsghdrDragonFlyBSD4 = 0x98
- sizeofRtMetricsDragonFlyBSD4 = 0x70
+ sizeofRtMsghdrDragonFlyBSD4 = 0x98
- sizeofSockaddrStorage = 0x80
- sizeofSockaddrInet = 0x10
- sizeofSockaddrInet6 = 0x1c
+ sizeofSockaddrInet = 0x10
+ sizeofSockaddrInet6 = 0x1c
)
diff --git a/route/zsys_freebsd_386.go b/route/zsys_freebsd_386.go
index 3f985c7ee9..ec617772b2 100644
--- a/route/zsys_freebsd_386.go
+++ b/route/zsys_freebsd_386.go
@@ -6,7 +6,6 @@ package route
const (
sizeofIfMsghdrlFreeBSD10 = 0x68
sizeofIfaMsghdrFreeBSD10 = 0x14
- sizeofIfaMsghdrlFreeBSD10 = 0x6c
sizeofIfmaMsghdrFreeBSD10 = 0x10
sizeofIfAnnouncemsghdrFreeBSD10 = 0x18
@@ -19,18 +18,10 @@ const (
sizeofIfMsghdrFreeBSD10 = 0x64
sizeofIfMsghdrFreeBSD11 = 0xa8
- sizeofIfDataFreeBSD7 = 0x50
- sizeofIfDataFreeBSD8 = 0x50
- sizeofIfDataFreeBSD9 = 0x50
- sizeofIfDataFreeBSD10 = 0x54
- sizeofIfDataFreeBSD11 = 0x98
-
// MODIFIED BY HAND FOR 386 EMULATION ON AMD64
// 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT
- sizeofIfMsghdrlFreeBSD10Emu = 0xb0
sizeofIfaMsghdrFreeBSD10Emu = 0x14
- sizeofIfaMsghdrlFreeBSD10Emu = 0xb0
sizeofIfmaMsghdrFreeBSD10Emu = 0x10
sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18
@@ -43,13 +34,6 @@ const (
sizeofIfMsghdrFreeBSD10Emu = 0xa8
sizeofIfMsghdrFreeBSD11Emu = 0xa8
- sizeofIfDataFreeBSD7Emu = 0x98
- sizeofIfDataFreeBSD8Emu = 0x98
- sizeofIfDataFreeBSD9Emu = 0x98
- sizeofIfDataFreeBSD10Emu = 0x98
- sizeofIfDataFreeBSD11Emu = 0x98
-
- sizeofSockaddrStorage = 0x80
- sizeofSockaddrInet = 0x10
- sizeofSockaddrInet6 = 0x1c
+ sizeofSockaddrInet = 0x10
+ sizeofSockaddrInet6 = 0x1c
)
diff --git a/route/zsys_freebsd_amd64.go b/route/zsys_freebsd_amd64.go
index 9293393698..3d7f31d13e 100644
--- a/route/zsys_freebsd_amd64.go
+++ b/route/zsys_freebsd_amd64.go
@@ -6,7 +6,6 @@ package route
const (
sizeofIfMsghdrlFreeBSD10 = 0xb0
sizeofIfaMsghdrFreeBSD10 = 0x14
- sizeofIfaMsghdrlFreeBSD10 = 0xb0
sizeofIfmaMsghdrFreeBSD10 = 0x10
sizeofIfAnnouncemsghdrFreeBSD10 = 0x18
@@ -19,15 +18,7 @@ const (
sizeofIfMsghdrFreeBSD10 = 0xa8
sizeofIfMsghdrFreeBSD11 = 0xa8
- sizeofIfDataFreeBSD7 = 0x98
- sizeofIfDataFreeBSD8 = 0x98
- sizeofIfDataFreeBSD9 = 0x98
- sizeofIfDataFreeBSD10 = 0x98
- sizeofIfDataFreeBSD11 = 0x98
-
- sizeofIfMsghdrlFreeBSD10Emu = 0xb0
sizeofIfaMsghdrFreeBSD10Emu = 0x14
- sizeofIfaMsghdrlFreeBSD10Emu = 0xb0
sizeofIfmaMsghdrFreeBSD10Emu = 0x10
sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18
@@ -40,13 +31,6 @@ const (
sizeofIfMsghdrFreeBSD10Emu = 0xa8
sizeofIfMsghdrFreeBSD11Emu = 0xa8
- sizeofIfDataFreeBSD7Emu = 0x98
- sizeofIfDataFreeBSD8Emu = 0x98
- sizeofIfDataFreeBSD9Emu = 0x98
- sizeofIfDataFreeBSD10Emu = 0x98
- sizeofIfDataFreeBSD11Emu = 0x98
-
- sizeofSockaddrStorage = 0x80
- sizeofSockaddrInet = 0x10
- sizeofSockaddrInet6 = 0x1c
+ sizeofSockaddrInet = 0x10
+ sizeofSockaddrInet6 = 0x1c
)
diff --git a/route/zsys_freebsd_arm.go b/route/zsys_freebsd_arm.go
index a2bdb4ad3b..931afa3931 100644
--- a/route/zsys_freebsd_arm.go
+++ b/route/zsys_freebsd_arm.go
@@ -6,7 +6,6 @@ package route
const (
sizeofIfMsghdrlFreeBSD10 = 0x68
sizeofIfaMsghdrFreeBSD10 = 0x14
- sizeofIfaMsghdrlFreeBSD10 = 0x6c
sizeofIfmaMsghdrFreeBSD10 = 0x10
sizeofIfAnnouncemsghdrFreeBSD10 = 0x18
@@ -19,15 +18,7 @@ const (
sizeofIfMsghdrFreeBSD10 = 0x70
sizeofIfMsghdrFreeBSD11 = 0xa8
- sizeofIfDataFreeBSD7 = 0x60
- sizeofIfDataFreeBSD8 = 0x60
- sizeofIfDataFreeBSD9 = 0x60
- sizeofIfDataFreeBSD10 = 0x60
- sizeofIfDataFreeBSD11 = 0x98
-
- sizeofIfMsghdrlFreeBSD10Emu = 0x68
sizeofIfaMsghdrFreeBSD10Emu = 0x14
- sizeofIfaMsghdrlFreeBSD10Emu = 0x6c
sizeofIfmaMsghdrFreeBSD10Emu = 0x10
sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18
@@ -40,13 +31,6 @@ const (
sizeofIfMsghdrFreeBSD10Emu = 0x70
sizeofIfMsghdrFreeBSD11Emu = 0xa8
- sizeofIfDataFreeBSD7Emu = 0x60
- sizeofIfDataFreeBSD8Emu = 0x60
- sizeofIfDataFreeBSD9Emu = 0x60
- sizeofIfDataFreeBSD10Emu = 0x60
- sizeofIfDataFreeBSD11Emu = 0x98
-
- sizeofSockaddrStorage = 0x80
- sizeofSockaddrInet = 0x10
- sizeofSockaddrInet6 = 0x1c
+ sizeofSockaddrInet = 0x10
+ sizeofSockaddrInet6 = 0x1c
)
diff --git a/route/zsys_freebsd_arm64.go b/route/zsys_freebsd_arm64.go
index 9293393698..3d7f31d13e 100644
--- a/route/zsys_freebsd_arm64.go
+++ b/route/zsys_freebsd_arm64.go
@@ -6,7 +6,6 @@ package route
const (
sizeofIfMsghdrlFreeBSD10 = 0xb0
sizeofIfaMsghdrFreeBSD10 = 0x14
- sizeofIfaMsghdrlFreeBSD10 = 0xb0
sizeofIfmaMsghdrFreeBSD10 = 0x10
sizeofIfAnnouncemsghdrFreeBSD10 = 0x18
@@ -19,15 +18,7 @@ const (
sizeofIfMsghdrFreeBSD10 = 0xa8
sizeofIfMsghdrFreeBSD11 = 0xa8
- sizeofIfDataFreeBSD7 = 0x98
- sizeofIfDataFreeBSD8 = 0x98
- sizeofIfDataFreeBSD9 = 0x98
- sizeofIfDataFreeBSD10 = 0x98
- sizeofIfDataFreeBSD11 = 0x98
-
- sizeofIfMsghdrlFreeBSD10Emu = 0xb0
sizeofIfaMsghdrFreeBSD10Emu = 0x14
- sizeofIfaMsghdrlFreeBSD10Emu = 0xb0
sizeofIfmaMsghdrFreeBSD10Emu = 0x10
sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18
@@ -40,13 +31,6 @@ const (
sizeofIfMsghdrFreeBSD10Emu = 0xa8
sizeofIfMsghdrFreeBSD11Emu = 0xa8
- sizeofIfDataFreeBSD7Emu = 0x98
- sizeofIfDataFreeBSD8Emu = 0x98
- sizeofIfDataFreeBSD9Emu = 0x98
- sizeofIfDataFreeBSD10Emu = 0x98
- sizeofIfDataFreeBSD11Emu = 0x98
-
- sizeofSockaddrStorage = 0x80
- sizeofSockaddrInet = 0x10
- sizeofSockaddrInet6 = 0x1c
+ sizeofSockaddrInet = 0x10
+ sizeofSockaddrInet6 = 0x1c
)
diff --git a/route/zsys_freebsd_riscv64.go b/route/zsys_freebsd_riscv64.go
index 9293393698..3d7f31d13e 100644
--- a/route/zsys_freebsd_riscv64.go
+++ b/route/zsys_freebsd_riscv64.go
@@ -6,7 +6,6 @@ package route
const (
sizeofIfMsghdrlFreeBSD10 = 0xb0
sizeofIfaMsghdrFreeBSD10 = 0x14
- sizeofIfaMsghdrlFreeBSD10 = 0xb0
sizeofIfmaMsghdrFreeBSD10 = 0x10
sizeofIfAnnouncemsghdrFreeBSD10 = 0x18
@@ -19,15 +18,7 @@ const (
sizeofIfMsghdrFreeBSD10 = 0xa8
sizeofIfMsghdrFreeBSD11 = 0xa8
- sizeofIfDataFreeBSD7 = 0x98
- sizeofIfDataFreeBSD8 = 0x98
- sizeofIfDataFreeBSD9 = 0x98
- sizeofIfDataFreeBSD10 = 0x98
- sizeofIfDataFreeBSD11 = 0x98
-
- sizeofIfMsghdrlFreeBSD10Emu = 0xb0
sizeofIfaMsghdrFreeBSD10Emu = 0x14
- sizeofIfaMsghdrlFreeBSD10Emu = 0xb0
sizeofIfmaMsghdrFreeBSD10Emu = 0x10
sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18
@@ -40,13 +31,6 @@ const (
sizeofIfMsghdrFreeBSD10Emu = 0xa8
sizeofIfMsghdrFreeBSD11Emu = 0xa8
- sizeofIfDataFreeBSD7Emu = 0x98
- sizeofIfDataFreeBSD8Emu = 0x98
- sizeofIfDataFreeBSD9Emu = 0x98
- sizeofIfDataFreeBSD10Emu = 0x98
- sizeofIfDataFreeBSD11Emu = 0x98
-
- sizeofSockaddrStorage = 0x80
- sizeofSockaddrInet = 0x10
- sizeofSockaddrInet6 = 0x1c
+ sizeofSockaddrInet = 0x10
+ sizeofSockaddrInet6 = 0x1c
)
diff --git a/route/zsys_netbsd.go b/route/zsys_netbsd.go
index eaffe8c408..90ce707d47 100644
--- a/route/zsys_netbsd.go
+++ b/route/zsys_netbsd.go
@@ -8,10 +8,8 @@ const (
sizeofIfaMsghdrNetBSD7 = 0x18
sizeofIfAnnouncemsghdrNetBSD7 = 0x18
- sizeofRtMsghdrNetBSD7 = 0x78
- sizeofRtMetricsNetBSD7 = 0x50
+ sizeofRtMsghdrNetBSD7 = 0x78
- sizeofSockaddrStorage = 0x80
- sizeofSockaddrInet = 0x10
- sizeofSockaddrInet6 = 0x1c
+ sizeofSockaddrInet = 0x10
+ sizeofSockaddrInet6 = 0x1c
)
diff --git a/route/zsys_openbsd.go b/route/zsys_openbsd.go
index b11b812680..64fbdd98fb 100644
--- a/route/zsys_openbsd.go
+++ b/route/zsys_openbsd.go
@@ -6,7 +6,6 @@ package route
const (
sizeofRtMsghdr = 0x60
- sizeofSockaddrStorage = 0x100
- sizeofSockaddrInet = 0x10
- sizeofSockaddrInet6 = 0x1c
+ sizeofSockaddrInet = 0x10
+ sizeofSockaddrInet6 = 0x1c
)
diff --git a/webdav/file_test.go b/webdav/file_test.go
index e875c136ca..3af53fde31 100644
--- a/webdav/file_test.go
+++ b/webdav/file_test.go
@@ -9,7 +9,6 @@ import (
"encoding/xml"
"fmt"
"io"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -518,7 +517,7 @@ func TestDir(t *testing.T) {
t.Skip("see golang.org/issue/11453")
}
- td, err := ioutil.TempDir("", "webdav-test")
+ td, err := os.MkdirTemp("", "webdav-test")
if err != nil {
t.Fatal(err)
}
@@ -758,7 +757,7 @@ func TestMemFile(t *testing.T) {
if err != nil {
t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err)
}
- gotBytes, err := ioutil.ReadAll(g)
+ gotBytes, err := io.ReadAll(g)
if err != nil {
t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err)
}
diff --git a/webdav/webdav.go b/webdav/webdav.go
index add2bcd67c..8ff3d100f9 100644
--- a/webdav/webdav.go
+++ b/webdav/webdav.go
@@ -267,6 +267,9 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusConflict, err
+ }
return http.StatusNotFound, err
}
_, copyErr := io.Copy(f, r.Body)
diff --git a/webdav/webdav_test.go b/webdav/webdav_test.go
index 2baebe3c97..deb60fb885 100644
--- a/webdav/webdav_test.go
+++ b/webdav/webdav_test.go
@@ -9,7 +9,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
@@ -256,7 +255,7 @@ func TestFilenameEscape(t *testing.T) {
}
defer res.Body.Close()
- b, err := ioutil.ReadAll(res.Body)
+ b, err := io.ReadAll(res.Body)
if err != nil {
return "", "", err
}
@@ -347,3 +346,63 @@ func TestFilenameEscape(t *testing.T) {
}
}
}
+
+func TestPutRequest(t *testing.T) {
+ h := &Handler{
+ FileSystem: NewMemFS(),
+ LockSystem: NewMemLS(),
+ }
+ srv := httptest.NewServer(h)
+ defer srv.Close()
+
+ do := func(method, urlStr string, body string) (*http.Response, error) {
+ bodyReader := strings.NewReader(body)
+ req, err := http.NewRequest(method, urlStr, bodyReader)
+ if err != nil {
+ return nil, err
+ }
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+ }
+
+ testCases := []struct {
+ name string
+ urlPrefix string
+ want int
+ }{{
+ name: "put",
+ urlPrefix: "/res",
+ want: http.StatusCreated,
+ }, {
+ name: "put_utf8_segment",
+ urlPrefix: "/res-%e2%82%ac",
+ want: http.StatusCreated,
+ }, {
+ name: "put_empty_segment",
+ urlPrefix: "",
+ want: http.StatusNotFound,
+ }, {
+ name: "put_root_segment",
+ urlPrefix: "/",
+ want: http.StatusNotFound,
+ }, {
+ name: "put_no_parent [RFC4918:S9.7.1]",
+ urlPrefix: "/409me/noparent.txt",
+ want: http.StatusConflict,
+ }}
+
+ for _, tc := range testCases {
+ urlStr := srv.URL + tc.urlPrefix
+ res, err := do("PUT", urlStr, "ABC\n")
+ if err != nil {
+ t.Errorf("name=%q: PUT: %v", tc.name, err)
+ continue
+ }
+ if res.StatusCode != tc.want {
+ t.Errorf("name=%q: got status code %d, want %d", tc.name, res.StatusCode, tc.want)
+ }
+ }
+}
diff --git a/websocket/hybi.go b/websocket/hybi.go
index 48a069e190..dda7434666 100644
--- a/websocket/hybi.go
+++ b/websocket/hybi.go
@@ -16,7 +16,6 @@ import (
"encoding/binary"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"strings"
@@ -279,7 +278,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er
}
}
if header := frame.HeaderReader(); header != nil {
- io.Copy(ioutil.Discard, header)
+ io.Copy(io.Discard, header)
}
switch frame.PayloadType() {
case ContinuationFrame:
@@ -294,7 +293,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return nil, err
}
- io.Copy(ioutil.Discard, frame)
+ io.Copy(io.Discard, frame)
if frame.PayloadType() == PingFrame {
if _, err := handler.WritePong(b[:n]); err != nil {
return nil, err
diff --git a/websocket/hybi_test.go b/websocket/hybi_test.go
index 9504aa2d30..f0715d3f6f 100644
--- a/websocket/hybi_test.go
+++ b/websocket/hybi_test.go
@@ -163,7 +163,7 @@ Sec-WebSocket-Protocol: chat
}
for k, v := range expectedHeader {
if req.Header.Get(k) != v {
- t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k)))
+ t.Errorf("%s expected %q but got %q", k, v, req.Header.Get(k))
}
}
}
diff --git a/websocket/websocket.go b/websocket/websocket.go
index 90a2257cd5..ac76165ceb 100644
--- a/websocket/websocket.go
+++ b/websocket/websocket.go
@@ -8,7 +8,7 @@
// This package currently lacks some features found in an alternative
// and more actively maintained WebSocket package:
//
-// https://pkg.go.dev/nhooyr.io/websocket
+// https://pkg.go.dev/github.com/coder/websocket
package websocket // import "golang.org/x/net/websocket"
import (
@@ -17,7 +17,6 @@ import (
"encoding/json"
"errors"
"io"
- "io/ioutil"
"net"
"net/http"
"net/url"
@@ -208,7 +207,7 @@ again:
n, err = ws.frameReader.Read(msg)
if err == io.EOF {
if trailer := ws.frameReader.TrailerReader(); trailer != nil {
- io.Copy(ioutil.Discard, trailer)
+ io.Copy(io.Discard, trailer)
}
ws.frameReader = nil
goto again
@@ -330,7 +329,7 @@ func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
ws.rio.Lock()
defer ws.rio.Unlock()
if ws.frameReader != nil {
- _, err = io.Copy(ioutil.Discard, ws.frameReader)
+ _, err = io.Copy(io.Discard, ws.frameReader)
if err != nil {
return err
}
@@ -362,7 +361,7 @@ again:
return ErrFrameTooLarge
}
payloadType := frame.PayloadType()
- data, err := ioutil.ReadAll(frame)
+ data, err := io.ReadAll(frame)
if err != nil {
return err
}
diff --git a/xsrftoken/xsrf.go b/xsrftoken/xsrf.go
index 3ca5d5b9f5..e808e6dd80 100644
--- a/xsrftoken/xsrf.go
+++ b/xsrftoken/xsrf.go
@@ -45,10 +45,9 @@ func generateTokenAtTime(key, userID, actionID string, now time.Time) string {
h := hmac.New(sha1.New, []byte(key))
fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), milliTime)
- // Get the padded base64 string then removing the padding.
+ // Get the no padding base64 string.
tok := string(h.Sum(nil))
- tok = base64.URLEncoding.EncodeToString([]byte(tok))
- tok = strings.TrimRight(tok, "=")
+ tok = base64.RawURLEncoding.EncodeToString([]byte(tok))
return fmt.Sprintf("%s:%d", tok, milliTime)
}